diff options
author | Don Anderson <dda@ddanderson.com> | 2012-02-01 22:48:15 -0500 |
---|---|---|
committer | Don Anderson <dda@ddanderson.com> | 2012-02-01 22:48:15 -0500 |
commit | c7121b6c680742e5b2328d74504dd9146f8a4039 (patch) | |
tree | 58d4cd437353679d1b73071677d0388751b156c2 /test/3rdparty | |
parent | eb144c4000082b8500d593987bdba5172efd2ce2 (diff) | |
download | mongo-c7121b6c680742e5b2328d74504dd9146f8a4039.tar.gz |
Added complete release directories for discover, testscenarios,
testtools to test/3rdparty, removed the old versions under suiteutils,
and modified run.py to find the new paths.
--HG--
extra : rebase_source : cb76ea4148d7fa88ac1819eb78394ed696b510b8
Diffstat (limited to 'test/3rdparty')
83 files changed, 17535 insertions, 0 deletions
diff --git a/test/3rdparty/discover-0.4.0/PKG-INFO b/test/3rdparty/discover-0.4.0/PKG-INFO new file mode 100644 index 00000000000..3bcd1661643 --- /dev/null +++ b/test/3rdparty/discover-0.4.0/PKG-INFO @@ -0,0 +1,164 @@ +Metadata-Version: 1.0 +Name: discover +Version: 0.4.0 +Summary: Test discovery for unittest. Backported from Python 2.7 for Python 2.4+ +Home-page: http://pypi.python.org/pypi/discover/ +Author: Michael Foord +Author-email: michael@voidspace.org.uk +License: UNKNOWN +Description: This is the test discovery mechanism and ``load_tests`` protocol for unittest + backported from Python 2.7 to work with Python 2.4 or more recent (including + Python 3). + + .. note:: + + Test discovery is just part of what is new in unittest in Python 2.7. All + of the new features have been backported to run on Python 2.4-2.6, including + test discovery. This is the + `unittest2 package <http://pypi.python.org/pypi/unittest2>`_. + + discover can be installed with pip or easy_install. After installing switch the + current directory to the top level directory of your project and run:: + + python -m discover + python discover.py + + (If you have setuptools or `distribute <http://pypi.python.org/pypi/distribute>`_ + installed you will also have a ``discover`` script available.) + + This will discover all tests (with certain restrictions) from the current + directory. The discover module has several options to control its behavior (full + usage options are displayed with ``python -m discover -h``):: + + Usage: discover.py [options] + + Options: + -v, --verbose Verbose output + -s directory Directory to start discovery ('.' default) + -p pattern Pattern to match test files ('test*.py' default) + -t directory Top level directory of project (default to + start directory) + + For test discovery all test modules must be importable from the top + level directory of the project. + + For example to use a different pattern for matching test modules run:: + + python -m discover -p '*test.py' + + (For UNIX-like shells like Bash you need to put quotes around the test pattern + or the shell will attempt to expand the pattern instead of passing it through to + discover. On Windows you must *not* put quotes around the pattern as the + Windows shell will pass the quotes to discover as well.) + + Test discovery is implemented in ``discover.DiscoveringTestLoader.discover``. As + well as using discover as a command line script you can import + ``DiscoveringTestLoader``, which is a subclass of ``unittest.TestLoader``, and + use it in your test framework. + + This method finds and returns all test modules from the specified start + directory, recursing into subdirectories to find them. Only test files that + match *pattern* will be loaded. (Using shell style pattern matching.) + + All test modules must be importable from the top level of the project. If + the start directory is not the top level directory then the top level + directory must be specified separately. + + The ``load_tests`` protocol allows test modules and packages to customize how + they are loaded. This is implemented in + ``discover.DiscoveringTestLoader.loadTestsFromModule``. If a test module defines + a ``load_tests`` function then tests are loaded from the module by calling + ``load_tests`` with three arguments: `loader`, `standard_tests`, `None`. + + If a test package name (directory with `__init__.py`) matches the + pattern then the package will be checked for a ``load_tests`` + function. If this exists then it will be called with *loader*, *tests*, + *pattern*. + + .. note:: + + The default pattern for matching tests is ``test*.py``. The '.py' means + that it will match test files and *not* match package names. You can + change this by changing the pattern using a command line option like + ``-p 'test*'``. + + If ``load_tests`` exists then discovery does *not* recurse into the package, + ``load_tests`` is responsible for loading all tests in the package. + + The pattern is deliberately not stored as a loader attribute so that + packages can continue discovery themselves. *top_level_dir* is stored so + ``load_tests`` does not need to pass this argument in to + ``loader.discover()``. + + discover.py is maintained in a google code project (where bugs and feature + requests should be posted): http://code.google.com/p/unittest-ext/ + + The latest development version of discover.py can be found at: + http://code.google.com/p/unittest-ext/source/browse/trunk/discover.py + + + CHANGELOG + ========= + + + 2010/06/11 0.4.0 + ---------------- + + * Addition of a setuptools compatible test collector. Set + "test_suite = 'discover.collector'" in setup.py. "setup.py test" will start + test discovery with default parameters from the same directory as the setup.py. + * Allow test discovery using dotted module names instead of a path. + * Addition of a setuptools compatible entrypoint for the discover script. + * A faulty load_tests function will not halt test discovery. A failing test + is created to report the error. + * If test discovery imports a module from the wrong location (usually because + the module is globally installed and the user is expecting to run tests + against a development version in a different location) then discovery halts + with an ImportError and the problem is reported. + * Matching files during test discovery is done in + ``DiscoveringTestLoader._match_path``. This method can be overriden in + subclasses to, for example, match on the full file path or use regular + expressions for matching. + * Tests for discovery ported from unittest2. (The tests require unittest2 to + run.) + + Feature parity with the ``TestLoader`` in Python 2.7 RC 1. + + + 2010/02/07 0.3.2 + ---------------- + + * If ``load_tests`` exists it is passed the standard tests as a ``TestSuite`` + rather than a list of tests. + + 2009/09/13 0.3.1 + ---------------- + + * Fixed a problem when a package directory matches the discovery pattern. + + 2009/08/20 0.3.0 + ---------------- + + * Failing to import a file (e.g. due to a syntax error) no longer halts + discovery but is reported as a failure. + * Discovery will not attempt to import test files whose names are not valid Python + identifiers, even if they match the pattern. +Keywords: unittest,testing,tests +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2.4 +Classifier: Programming Language :: Python :: 2.5 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.0 +Classifier: Programming Language :: Python :: 3.1 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Operating System :: OS Independent +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Testing diff --git a/test/3rdparty/discover-0.4.0/README.txt b/test/3rdparty/discover-0.4.0/README.txt new file mode 100644 index 00000000000..8e6b4e11803 --- /dev/null +++ b/test/3rdparty/discover-0.4.0/README.txt @@ -0,0 +1,137 @@ +This is the test discovery mechanism and ``load_tests`` protocol for unittest +backported from Python 2.7 to work with Python 2.4 or more recent (including +Python 3). + +.. note:: + + Test discovery is just part of what is new in unittest in Python 2.7. All + of the new features have been backported to run on Python 2.4-2.6, including + test discovery. This is the + `unittest2 package <http://pypi.python.org/pypi/unittest2>`_. + +discover can be installed with pip or easy_install. After installing switch the +current directory to the top level directory of your project and run:: + + python -m discover + python discover.py + +(If you have setuptools or `distribute <http://pypi.python.org/pypi/distribute>`_ +installed you will also have a ``discover`` script available.) + +This will discover all tests (with certain restrictions) from the current +directory. The discover module has several options to control its behavior (full +usage options are displayed with ``python -m discover -h``):: + + Usage: discover.py [options] + + Options: + -v, --verbose Verbose output + -s directory Directory to start discovery ('.' default) + -p pattern Pattern to match test files ('test*.py' default) + -t directory Top level directory of project (default to + start directory) + + For test discovery all test modules must be importable from the top + level directory of the project. + +For example to use a different pattern for matching test modules run:: + + python -m discover -p '*test.py' + +(For UNIX-like shells like Bash you need to put quotes around the test pattern +or the shell will attempt to expand the pattern instead of passing it through to +discover. On Windows you must *not* put quotes around the pattern as the +Windows shell will pass the quotes to discover as well.) + +Test discovery is implemented in ``discover.DiscoveringTestLoader.discover``. As +well as using discover as a command line script you can import +``DiscoveringTestLoader``, which is a subclass of ``unittest.TestLoader``, and +use it in your test framework. + +This method finds and returns all test modules from the specified start +directory, recursing into subdirectories to find them. Only test files that +match *pattern* will be loaded. (Using shell style pattern matching.) + +All test modules must be importable from the top level of the project. If +the start directory is not the top level directory then the top level +directory must be specified separately. + +The ``load_tests`` protocol allows test modules and packages to customize how +they are loaded. This is implemented in +``discover.DiscoveringTestLoader.loadTestsFromModule``. If a test module defines +a ``load_tests`` function then tests are loaded from the module by calling +``load_tests`` with three arguments: `loader`, `standard_tests`, `None`. + +If a test package name (directory with `__init__.py`) matches the +pattern then the package will be checked for a ``load_tests`` +function. If this exists then it will be called with *loader*, *tests*, +*pattern*. + +.. note:: + + The default pattern for matching tests is ``test*.py``. The '.py' means + that it will match test files and *not* match package names. You can + change this by changing the pattern using a command line option like + ``-p 'test*'``. + +If ``load_tests`` exists then discovery does *not* recurse into the package, +``load_tests`` is responsible for loading all tests in the package. + +The pattern is deliberately not stored as a loader attribute so that +packages can continue discovery themselves. *top_level_dir* is stored so +``load_tests`` does not need to pass this argument in to +``loader.discover()``. + +discover.py is maintained in a google code project (where bugs and feature +requests should be posted): http://code.google.com/p/unittest-ext/ + +The latest development version of discover.py can be found at: +http://code.google.com/p/unittest-ext/source/browse/trunk/discover.py + + +CHANGELOG +========= + + +2010/06/11 0.4.0 +---------------- + +* Addition of a setuptools compatible test collector. Set + "test_suite = 'discover.collector'" in setup.py. "setup.py test" will start + test discovery with default parameters from the same directory as the setup.py. +* Allow test discovery using dotted module names instead of a path. +* Addition of a setuptools compatible entrypoint for the discover script. +* A faulty load_tests function will not halt test discovery. A failing test + is created to report the error. +* If test discovery imports a module from the wrong location (usually because + the module is globally installed and the user is expecting to run tests + against a development version in a different location) then discovery halts + with an ImportError and the problem is reported. +* Matching files during test discovery is done in + ``DiscoveringTestLoader._match_path``. This method can be overriden in + subclasses to, for example, match on the full file path or use regular + expressions for matching. +* Tests for discovery ported from unittest2. (The tests require unittest2 to + run.) + +Feature parity with the ``TestLoader`` in Python 2.7 RC 1. + + +2010/02/07 0.3.2 +---------------- + +* If ``load_tests`` exists it is passed the standard tests as a ``TestSuite`` + rather than a list of tests. + +2009/09/13 0.3.1 +---------------- + +* Fixed a problem when a package directory matches the discovery pattern. + +2009/08/20 0.3.0 +---------------- + +* Failing to import a file (e.g. due to a syntax error) no longer halts + discovery but is reported as a failure. +* Discovery will not attempt to import test files whose names are not valid Python + identifiers, even if they match the pattern.
\ No newline at end of file diff --git a/test/3rdparty/discover-0.4.0/discover.py b/test/3rdparty/discover-0.4.0/discover.py new file mode 100755 index 00000000000..c1e20273bcb --- /dev/null +++ b/test/3rdparty/discover-0.4.0/discover.py @@ -0,0 +1,480 @@ +#! /usr/bin/env python +# Copyright Michael Foord 2009-2010 +# discover.py +# Test discovery for unittest +# Compatible with Python 2.4-2.6 and 3.0-3.1 +# Licensed under the BSD License +# See: http://pypi.python.org/pypi/discover + +import optparse +import os +import re +import sys +import traceback +import types +import unittest + +from fnmatch import fnmatch + +__version__ = '0.4.0' +__all__ = ['DiscoveringTestLoader', 'main', 'defaultTestLoader'] + + +if hasattr(types, 'ClassType'): + class_types = (types.ClassType, type) +else: + # for Python 3.0 compatibility + class_types = type + +def _CmpToKey(mycmp): + 'Convert a cmp= function into a key= function' + class K(object): + def __init__(self, obj): + self.obj = obj + def __lt__(self, other): + return mycmp(self.obj, other.obj) == -1 + return K + +try: + from types import UnboundMethodType +except ImportError: + # Python 3 compatibility + UnboundMethodType = types.FunctionType + +# what about .pyc or .pyo (etc) +# we would need to avoid loading the same tests multiple times +# from '.py', '.pyc' *and* '.pyo' +VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE) + + +def _make_failed_import_test(name, suiteClass): + message = 'Failed to import test module: %s' % name + if hasattr(traceback, 'format_exc'): + # Python 2.3 compatibility + # format_exc returns two frames of discover.py as well + message += '\n%s' % traceback.format_exc() + return _make_failed_test('ModuleImportFailure', name, ImportError(message), + suiteClass) + +def _make_failed_load_tests(name, exception, suiteClass): + return _make_failed_test('LoadTestsFailure', name, exception, suiteClass) + +def _make_failed_test(classname, methodname, exception, suiteClass): + def testFailure(self): + raise exception + attrs = {methodname: testFailure} + TestClass = type(classname, (unittest.TestCase,), attrs) + return suiteClass((TestClass(methodname),)) + +try: + cmp +except NameError: + @staticmethod + def cmp(x, y): + """Return -1 if x < y, 0 if x == y and 1 if x > y""" + return (x > y) - (x < y) + + +class DiscoveringTestLoader(unittest.TestLoader): + """ + This class is responsible for loading tests according to various criteria + and returning them wrapped in a TestSuite + """ + testMethodPrefix = 'test' + sortTestMethodsUsing = cmp + suiteClass = unittest.TestSuite + _top_level_dir = None + + def loadTestsFromTestCase(self, testCaseClass): + """Return a suite of all tests cases contained in testCaseClass""" + if issubclass(testCaseClass, unittest.TestSuite): + raise TypeError("Test cases should not be derived from TestSuite." + " Maybe you meant to derive from TestCase?") + testCaseNames = self.getTestCaseNames(testCaseClass) + if not testCaseNames and hasattr(testCaseClass, 'runTest'): + testCaseNames = ['runTest'] + loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames)) + return loaded_suite + + def loadTestsFromModule(self, module, use_load_tests=True): + """Return a suite of all tests cases contained in the given module""" + tests = [] + for name in dir(module): + obj = getattr(module, name) + if isinstance(obj, type) and issubclass(obj, unittest.TestCase): + tests.append(self.loadTestsFromTestCase(obj)) + + load_tests = getattr(module, 'load_tests', None) + tests = self.suiteClass(tests) + if use_load_tests and load_tests is not None: + try: + return load_tests(self, tests, None) + except: + ExceptionClass, e = sys.exc_info()[:2] + if not isinstance(e, Exception): + # for BaseException exceptions + raise + return _make_failed_load_tests(module.__name__, e, + self.suiteClass) + return tests + + def loadTestsFromName(self, name, module=None): + """Return a suite of all tests cases given a string specifier. + + The name may resolve either to a module, a test case class, a + test method within a test case class, or a callable object which + returns a TestCase or TestSuite instance. + + The method optionally resolves the names relative to a given module. + """ + parts = name.split('.') + if module is None: + parts_copy = parts[:] + while parts_copy: + try: + module = __import__('.'.join(parts_copy)) + break + except ImportError: + del parts_copy[-1] + if not parts_copy: + raise + parts = parts[1:] + obj = module + for part in parts: + parent, obj = obj, getattr(obj, part) + + if isinstance(obj, types.ModuleType): + return self.loadTestsFromModule(obj) + elif isinstance(obj, type) and issubclass(obj, unittest.TestCase): + return self.loadTestsFromTestCase(obj) + elif (isinstance(obj, UnboundMethodType) and + isinstance(parent, type) and + issubclass(parent, unittest.TestCase)): + name = obj.__name__ + inst = parent(name) + # static methods follow a different path + if not isinstance(getattr(inst, name), types.FunctionType): + return self.suiteClass([inst]) + elif isinstance(obj, unittest.TestSuite): + return obj + if hasattr(obj, '__call__'): + test = obj() + if isinstance(test, unittest.TestSuite): + return test + elif isinstance(test, unittest.TestCase): + return self.suiteClass([test]) + else: + raise TypeError("calling %s returned %s, not a test" % + (obj, test)) + else: + raise TypeError("don't know how to make test from: %s" % obj) + + def loadTestsFromNames(self, names, module=None): + """Return a suite of all tests cases found using the given sequence + of string specifiers. See 'loadTestsFromName()'. + """ + suites = [self.loadTestsFromName(name, module) for name in names] + return self.suiteClass(suites) + + def getTestCaseNames(self, testCaseClass): + """Return a sorted sequence of method names found within testCaseClass + """ + def isTestMethod(attrname, testCaseClass=testCaseClass, + prefix=self.testMethodPrefix): + return attrname.startswith(prefix) and \ + hasattr(getattr(testCaseClass, attrname), '__call__') + testFnNames = list(filter(isTestMethod, dir(testCaseClass))) + if self.sortTestMethodsUsing: + testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing)) + return testFnNames + + def discover(self, start_dir, pattern='test*.py', top_level_dir=None): + """Find and return all test modules from the specified start + directory, recursing into subdirectories to find them. Only test files + that match the pattern will be loaded. (Using shell style pattern + matching.) + + All test modules must be importable from the top level of the project. + If the start directory is not the top level directory then the top + level directory must be specified separately. + + If a test package name (directory with '__init__.py') matches the + pattern then the package will be checked for a 'load_tests' function. If + this exists then it will be called with loader, tests, pattern. + + If load_tests exists then discovery does *not* recurse into the package, + load_tests is responsible for loading all tests in the package. + + The pattern is deliberately not stored as a loader attribute so that + packages can continue discovery themselves. top_level_dir is stored so + load_tests does not need to pass this argument in to loader.discover(). + """ + set_implicit_top = False + if top_level_dir is None and self._top_level_dir is not None: + # make top_level_dir optional if called from load_tests in a package + top_level_dir = self._top_level_dir + elif top_level_dir is None: + set_implicit_top = True + top_level_dir = start_dir + + top_level_dir = os.path.abspath(top_level_dir) + + if not top_level_dir in sys.path: + # all test modules must be importable from the top level directory + # should we *unconditionally* put the start directory in first + # in sys.path to minimise likelihood of conflicts between installed + # modules and development versions? + sys.path.insert(0, top_level_dir) + self._top_level_dir = top_level_dir + + is_not_importable = False + if os.path.isdir(os.path.abspath(start_dir)): + start_dir = os.path.abspath(start_dir) + if start_dir != top_level_dir: + is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py')) + else: + # support for discovery from dotted module names + try: + __import__(start_dir) + except ImportError: + is_not_importable = True + else: + the_module = sys.modules[start_dir] + top_part = start_dir.split('.')[0] + start_dir = os.path.abspath(os.path.dirname((the_module.__file__))) + if set_implicit_top: + self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__))) + sys.path.remove(top_level_dir) + + if is_not_importable: + raise ImportError('Start directory is not importable: %r' % start_dir) + + tests = list(self._find_tests(start_dir, pattern)) + return self.suiteClass(tests) + + def _get_name_from_path(self, path): + path = os.path.splitext(os.path.normpath(path))[0] + + _relpath = relpath(path, self._top_level_dir) + assert not os.path.isabs(_relpath), "Path must be within the project" + assert not _relpath.startswith('..'), "Path must be within the project" + + name = _relpath.replace(os.path.sep, '.') + return name + + def _get_module_from_name(self, name): + __import__(name) + return sys.modules[name] + + def _match_path(self, path, full_path, pattern): + # override this method to use alternative matching strategy + return fnmatch(path, pattern) + + def _find_tests(self, start_dir, pattern): + """Used by discovery. Yields test suites it loads.""" + paths = os.listdir(start_dir) + + for path in paths: + full_path = os.path.join(start_dir, path) + if os.path.isfile(full_path): + if not VALID_MODULE_NAME.match(path): + # valid Python identifiers only + continue + if not self._match_path(path, full_path, pattern): + continue + # if the test file matches, load it + name = self._get_name_from_path(full_path) + try: + module = self._get_module_from_name(name) + except: + yield _make_failed_import_test(name, self.suiteClass) + else: + mod_file = os.path.abspath(getattr(module, '__file__', full_path)) + realpath = os.path.splitext(mod_file)[0] + fullpath_noext = os.path.splitext(full_path)[0] + if realpath.lower() != fullpath_noext.lower(): + module_dir = os.path.dirname(realpath) + mod_name = os.path.splitext(os.path.basename(full_path))[0] + expected_dir = os.path.dirname(full_path) + msg = ("%r module incorrectly imported from %r. Expected %r. " + "Is this module globally installed?") + raise ImportError(msg % (mod_name, module_dir, expected_dir)) + yield self.loadTestsFromModule(module) + elif os.path.isdir(full_path): + if not os.path.isfile(os.path.join(full_path, '__init__.py')): + continue + + load_tests = None + tests = None + if fnmatch(path, pattern): + # only check load_tests if the package directory itself matches the filter + name = self._get_name_from_path(full_path) + package = self._get_module_from_name(name) + load_tests = getattr(package, 'load_tests', None) + tests = self.loadTestsFromModule(package, use_load_tests=False) + + if load_tests is None: + if tests is not None: + # tests loaded from package file + yield tests + # recurse into the package + for test in self._find_tests(full_path, pattern): + yield test + else: + try: + yield load_tests(self, tests, pattern) + except: + ExceptionClass, e = sys.exc_info()[:2] + if not isinstance(e, Exception): + # for BaseException exceptions + raise + yield _make_failed_load_tests(package.__name__, e, + self.suiteClass) + + +############################################## +# relpath implementation taken from Python 2.7 + +if not hasattr(os.path, 'relpath'): + if os.path is sys.modules.get('ntpath'): + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + + if not path: + raise ValueError("no path specified") + start_list = os.path.abspath(start).split(os.path.sep) + path_list = os.path.abspath(path).split(os.path.sep) + if start_list[0].lower() != path_list[0].lower(): + unc_path, rest = os.path.splitunc(path) + unc_start, rest = os.path.splitunc(start) + if bool(unc_path) ^ bool(unc_start): + raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" + % (path, start)) + else: + raise ValueError("path is on drive %s, start on drive %s" + % (path_list[0], start_list[0])) + # Work out how much of the filepath is shared by start and path. + for i in range(min(len(start_list), len(path_list))): + if start_list[i].lower() != path_list[i].lower(): + break + else: + i += 1 + + rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return os.path.curdir + return os.path.join(*rel_list) + + else: + # default to posixpath definition + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + + if not path: + raise ValueError("no path specified") + + start_list = os.path.abspath(start).split(os.path.sep) + path_list = os.path.abspath(path).split(os.path.sep) + + # Work out how much of the filepath is shared by start and path. + i = len(os.path.commonprefix([start_list, path_list])) + + rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return os.path.curdir + return os.path.join(*rel_list) +else: + from os.path import relpath + +############################################# + + +USAGE = """\ +Usage: discover.py [options] + +Options: + -v, --verbose Verbose output + -s directory Directory to start discovery ('.' default) + -p pattern Pattern to match test files ('test*.py' default) + -t directory Top level directory of project (default to + start directory) + +For test discovery all test modules must be importable from the top +level directory of the project. +""" + +def _usage_exit(msg=None): + if msg: + print (msg) + print (USAGE) + sys.exit(2) + + +def _do_discovery(argv, verbosity, Loader): + # handle command line args for test discovery + parser = optparse.OptionParser() + parser.add_option('-v', '--verbose', dest='verbose', default=False, + help='Verbose output', action='store_true') + parser.add_option('-s', '--start-directory', dest='start', default='.', + help="Directory to start discovery ('.' default)") + parser.add_option('-p', '--pattern', dest='pattern', default='test*.py', + help="Pattern to match tests ('test*.py' default)") + parser.add_option('-t', '--top-level-directory', dest='top', default=None, + help='Top level directory of project (defaults to start directory)') + + options, args = parser.parse_args(argv) + if len(args) > 3: + _usage_exit() + + for name, value in zip(('start', 'pattern', 'top'), args): + setattr(options, name, value) + + if options.verbose: + verbosity = 2 + + start_dir = options.start + pattern = options.pattern + top_level_dir = options.top + + loader = Loader() + return loader.discover(start_dir, pattern, top_level_dir), verbosity + + +def _run_tests(tests, testRunner, verbosity, exit): + if isinstance(testRunner, class_types): + try: + testRunner = testRunner(verbosity=verbosity) + except TypeError: + # didn't accept the verbosity argument + testRunner = testRunner() + result = testRunner.run(tests) + if exit: + sys.exit(not result.wasSuccessful()) + return result + + +def main(argv=None, testRunner=None, testLoader=None, exit=True, verbosity=1): + if testLoader is None: + testLoader = DiscoveringTestLoader + if testRunner is None: + testRunner = unittest.TextTestRunner + if argv is None: + argv = sys.argv[1:] + + tests, verbosity = _do_discovery(argv, verbosity, testLoader) + return _run_tests(tests, testRunner, verbosity, exit) + +defaultTestLoader = DiscoveringTestLoader() + +def collector(): + # import __main__ triggers code re-execution + __main__ = sys.modules['__main__'] + setupDir = os.path.abspath(os.path.dirname(__main__.__file__)) + return defaultTestLoader.discover(setupDir) + +if __name__ == '__main__': + if sys.argv[0] is None: + # fix for weird behaviour when run with python -m + # from a zipped egg. + sys.argv[0] = 'discover.py' + main() diff --git a/test/3rdparty/discover-0.4.0/setup.cfg b/test/3rdparty/discover-0.4.0/setup.cfg new file mode 100644 index 00000000000..51964789b3d --- /dev/null +++ b/test/3rdparty/discover-0.4.0/setup.cfg @@ -0,0 +1,2 @@ +[sdist] +force-manifest = 1
\ No newline at end of file diff --git a/test/3rdparty/discover-0.4.0/setup.py b/test/3rdparty/discover-0.4.0/setup.py new file mode 100644 index 00000000000..2a0a0e7c218 --- /dev/null +++ b/test/3rdparty/discover-0.4.0/setup.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# setup.py +# Install script for discover.py +# Copyright (C) 2009-2010 Michael Foord +# E-mail: michael AT voidspace DOT org DOT uk + +# This software is licensed under the terms of the BSD license. +# http://www.voidspace.org.uk/python/license.shtml + +import sys +from distutils.core import setup +from discover import __version__ as VERSION + + +NAME = 'discover' +MODULES = ('discover',) +DESCRIPTION = 'Test discovery for unittest. Backported from Python 2.7 for Python 2.4+' +URL = 'http://pypi.python.org/pypi/discover/' +CLASSIFIERS = [ + 'Development Status :: 4 - Beta', + 'Environment :: Console', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: BSD License', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2.4', + 'Programming Language :: Python :: 2.5', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.0', + 'Programming Language :: Python :: 3.1', + 'Programming Language :: Python :: 3.2', + 'Operating System :: OS Independent', + 'Topic :: Software Development :: Libraries', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: Software Development :: Testing', +] +AUTHOR = 'Michael Foord' +AUTHOR_EMAIL = 'michael@voidspace.org.uk' +KEYWORDS = "unittest, testing, tests".split(', ') +LONG_DESCRIPTION = open('README.txt').read() + + +params = dict( + name=NAME, + version=VERSION, + description=DESCRIPTION, + long_description=LONG_DESCRIPTION, + author=AUTHOR, + author_email=AUTHOR_EMAIL, + url=URL, + py_modules=MODULES, + classifiers=CLASSIFIERS, + keywords=KEYWORDS +) + + +try: + from setuptools import setup +except ImportError: + from distutils.core import setup +else: + params.update(dict( + entry_points = { + 'console_scripts': [ + 'discover = discover:main', + ], + }, + )) + params['test_suite'] = 'discover.collector' + +setup(**params) + + diff --git a/test/3rdparty/testscenarios-0.2/.bzrignore b/test/3rdparty/testscenarios-0.2/.bzrignore new file mode 100644 index 00000000000..336aaca369d --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/.bzrignore @@ -0,0 +1,5 @@ +TAGS +tags +lib/testtools +MANIFEST +dist diff --git a/test/3rdparty/testscenarios-0.2/Apache-2.0 b/test/3rdparty/testscenarios-0.2/Apache-2.0 new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/Apache-2.0 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/3rdparty/testscenarios-0.2/BSD b/test/3rdparty/testscenarios-0.2/BSD new file mode 100644 index 00000000000..0e75db647b3 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/BSD @@ -0,0 +1,26 @@ +Copyright (c) Robert Collins and Testscenarios contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of Robert Collins nor the names of Subunit contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY ROBERT COLLINS AND SUBUNIT CONTRIBUTORS ``AS IS'' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. diff --git a/test/3rdparty/testscenarios-0.2/COPYING b/test/3rdparty/testscenarios-0.2/COPYING new file mode 100644 index 00000000000..ee16c4ecaf9 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/COPYING @@ -0,0 +1,31 @@ +Testscenarios is licensed under two licenses, the Apache License, Version 2.0 +or the 3-clause BSD License. You may use this project under either of these +licenses - choose the one that works best for you. + +We require contributions to be licensed under both licenses. The primary +difference between them is that the Apache license takes care of potential +issues with Patents and other intellectual property concerns that some users +or contributors may find important. + +Generally every source file in Testscenarios needs a license grant under both +these licenses. As the code is shipped as a single unit, a brief form is used: +---- +Copyright (c) [yyyy][,yyyy]* [name or 'Testscenarios Contributors'] + +Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +license at the users choice. A copy of both licenses are available in the +project source as Apache-2.0 and BSD. You may not use this file except in +compliance with one of these two licences. + +Unless required by applicable law or agreed to in writing, software +distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +license you chose for the specific language governing permissions and +limitations under that license. +---- + +Code that has been incorporated into Testscenarios from other projects will +naturally be under its own license, and will retain that license. + +A known list of such code is maintained here: +* No entries. diff --git a/test/3rdparty/testscenarios-0.2/GOALS b/test/3rdparty/testscenarios-0.2/GOALS new file mode 100644 index 00000000000..68be00129b2 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/GOALS @@ -0,0 +1,25 @@ + +testscenarios goals +=================== + + * nice, declarative interface for multiplying tests by scenarios. + + * plays nice with testresources - when a scenario uses a resource, the + resource ordering logic should be able to group them together. + + * (at user discretion) plays nice with $random test discovery + + * arbitrary post-load multiplication. + + * cross-productable scenarios (for X and for Y) + + * extenable scenarios (for X using Y) + + * scenarios and the tests that use them are loosely coupled + + * tests that use scenarios should be easy to debug + + * fast + + * usable in trial, bzr, Zope testrunner, nose and the default unittest + TestRunner diff --git a/test/3rdparty/testscenarios-0.2/HACKING b/test/3rdparty/testscenarios-0.2/HACKING new file mode 100644 index 00000000000..0c68ee7da90 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/HACKING @@ -0,0 +1,38 @@ +Contributing to testscenarios +============================= + +Code access ++++++++++++ + +Branch from the trunk (all patches should be for trunk unless there are +exceptional circumstances):: + + bzr branch lp:testscenarios path-to-new-local-branch + +Publish your branches whereever you like, I encourage launchpad hosting though, +as it can notify me of new testscenarios branches:: + + bzr push lp:~YOURUSERNAME/testscearios/YOURBRANCHNAME + +Copyright ++++++++++ + +Testscenarios is Copyright (C) 2009 Robert Collins. I'd like to be able to +offer it up for stdlib inclusion once it has proved itself, so am asking for +copyright assignment to me - or for your contributions to be under either the +BSD or Apache-2.0 licences that Testscenarios are with (which permit inclusion +in Python). + +Coding standards +++++++++++++++++ + +PEP-8 coding style please, though I'm not nitpicky. Make sure that 'make check' +passes before sending in a patch. + +Code arrangement +++++++++++++++++ + +The ``testscenarios`` module should simply import classes and functions from +more specific modules, rather than becoming large and bloated itself. For +instance, TestWithScenarios lives in testscenarios.testcase, and is imported in +the testscenarios __init__.py. diff --git a/test/3rdparty/testscenarios-0.2/MANIFEST.in b/test/3rdparty/testscenarios-0.2/MANIFEST.in new file mode 100644 index 00000000000..0edefa19588 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/MANIFEST.in @@ -0,0 +1,10 @@ +include .bzrignore +include Apache-2.0 +include BSD +include COPYING +include GOALS +include HACKING +include MANIFEST.in +include Makefile +include NEWS +include doc/*.py diff --git a/test/3rdparty/testscenarios-0.2/Makefile b/test/3rdparty/testscenarios-0.2/Makefile new file mode 100644 index 00000000000..c38edf6bfe7 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/Makefile @@ -0,0 +1,19 @@ +PYTHONPATH:=$(shell pwd)/lib:${PYTHONPATH} +PYTHON ?= python + +all: check + +check: + PYTHONPATH=$(PYTHONPATH) $(PYTHON) -m testtools.run \ + testscenarios.test_suite + +clean: + find . -name '*.pyc' -print0 | xargs -0 rm -f + +TAGS: lib/testscenarios/*.py lib/testscenarios/tests/*.py + ctags -e -R lib/testscenarios/ + +tags: lib/testscenarios/*.py lib/testscenarios/tests/*.py + ctags -R lib/testscenarios/ + +.PHONY: all check clean diff --git a/test/3rdparty/testscenarios-0.2/NEWS b/test/3rdparty/testscenarios-0.2/NEWS new file mode 100644 index 00000000000..311d57664b2 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/NEWS @@ -0,0 +1,37 @@ +--------------------------- +testscenarios release notes +--------------------------- + + +IN DEVELOPMENT +~~~~~~~~~~~~~~ + +0.2 +~~~ + +CHANGES: + +* Adjust the cloned tests ``shortDescription`` if one is present. (Ben Finney) + +0.1 +~~~ + +CHANGES: + +* Created project. The primary interfaces are + ``testscenarios.TestWithScenarios`` and + ``testscenarios.generate_scenarios``. Documentation is primarily in README. + (Robert Collins) + +* Make the README documentation doctest compatible, to be sure it works. + Also various presentation and language touchups. (Martin Pool) + (Adjusted to use doctest directly, and to not print the demo runners + output to stderror during make check - Robert Collins) + +IMPROVEMENTS: + +BUG FIXES: + +API CHANGES: + +INTERNALS: diff --git a/test/3rdparty/testscenarios-0.2/PKG-INFO b/test/3rdparty/testscenarios-0.2/PKG-INFO new file mode 100644 index 00000000000..4408c965685 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/PKG-INFO @@ -0,0 +1,274 @@ +Metadata-Version: 1.0 +Name: testscenarios +Version: 0.2 +Summary: Testscenarios, a pyunit extension for dependency injection +Home-page: https://launchpad.net/testscenarios +Author: Robert Collins +Author-email: robertc@robertcollins.net +License: UNKNOWN +Description: ***************************************************************** + testscenarios: extensions to python unittest to support scenarios + ***************************************************************** + + Copyright (c) 2009, Robert Collins <robertc@robertcollins.net> + + Licensed under either the Apache License, Version 2.0 or the BSD 3-clause + license at the users choice. A copy of both licenses are available in the + project source as Apache-2.0 and BSD. You may not use this file except in + compliance with one of these two licences. + + Unless required by applicable law or agreed to in writing, software + distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + license you chose for the specific language governing permissions and + limitations under that license. + + + testscenarios provides clean dependency injection for python unittest style + tests. This can be used for interface testing (testing many implementations via + a single test suite) or for classic dependency injection (provide tests with + dependencies externally to the test code itself, allowing easy testing in + different situations). + + Dependencies + ============ + + * Python 2.4+ + * testtools <https://launchpad.net/testtools> + + + Why TestScenarios + ================= + + Standard Python unittest.py provides on obvious method for running a single + test_foo method with two (or more) scenarios: by creating a mix-in that + provides the functions, objects or settings that make up the scenario. This is + however limited and unsatisfying. Firstly, when two projects are cooperating + on a test suite (for instance, a plugin to a larger project may want to run + the standard tests for a given interface on its implementation), then it is + easy for them to get out of sync with each other: when the list of TestCase + classes to mix-in with changes, the plugin will either fail to run some tests + or error trying to run deleted tests. Secondly, its not as easy to work with + runtime-created-subclasses (a way of dealing with the aforementioned skew) + because they require more indirection to locate the source of the test, and will + often be ignored by e.g. pyflakes pylint etc. + + It is the intent of testscenarios to make dynamically running a single test + in multiple scenarios clear, easy to debug and work with even when the list + of scenarios is dynamically generated. + + + Defining Scenarios + ================== + + A **scenario** is a tuple of a string name for the scenario, and a dict of + parameters describing the scenario. The name is appended to the test name, and + the parameters are made available to the test instance when it's run. + + Scenarios are presented in **scenario lists** which are typically Python lists + but may be any iterable. + + + Getting Scenarios applied + ========================= + + At its heart the concept is simple. For a given test object with a list of + scenarios we prepare a new test object for each scenario. This involves: + + * Clone the test to a new test with a new id uniquely distinguishing it. + * Apply the scenario to the test by setting each key, value in the scenario + as attributes on the test object. + + There are some complicating factors around making this happen seamlessly. These + factors are in two areas: + + * Choosing what scenarios to use. (See Setting Scenarios For A Test). + * Getting the multiplication to happen. + + Subclasssing + ++++++++++++ + + If you can subclass TestWithScenarios, then the ``run()`` method in + TestWithScenarios will take care of test multiplication. It will at test + execution act as a generator causing multiple tests to execute. For this to + work reliably TestWithScenarios must be first in the MRO and you cannot + override run() or __call__. This is the most robust method, in the sense + that any test runner or test loader that obeys the python unittest protocol + will run all your scenarios. + + Manual generation + +++++++++++++++++ + + If you cannot subclass TestWithScenarios (e.g. because you are using + TwistedTestCase, or TestCaseWithResources, or any one of a number of other + useful test base classes, or need to override run() or __call__ yourself) then + you can cause scenario application to happen later by calling + ``testscenarios.generate_scenarios()``. For instance:: + + >>> import unittest + >>> import StringIO + >>> from testscenarios.scenarios import generate_scenarios + + This can work with loaders and runners from the standard library, or possibly other + implementations:: + + >>> loader = unittest.TestLoader() + >>> test_suite = unittest.TestSuite() + >>> runner = unittest.TextTestRunner(stream=StringIO.StringIO()) + + >>> mytests = loader.loadTestsFromNames(['doc.test_sample']) + >>> test_suite.addTests(generate_scenarios(mytests)) + >>> runner.run(test_suite) + <unittest._TextTestResult run=1 errors=0 failures=0> + + Testloaders + +++++++++++ + + Some test loaders support hooks like ``load_tests`` and ``test_suite``. + Ensuring your tests have had scenario application done through these hooks can + be a good idea - it means that external test runners (which support these hooks + like ``nose``, ``trial``, ``tribunal``) will still run your scenarios. (Of + course, if you are using the subclassing approach this is already a surety). + With ``load_tests``:: + + >>> def load_tests(standard_tests, module, loader): + ... result = loader.suiteClass() + ... result.addTests(generate_scenarios(standard_tests)) + ... return result + + With ``test_suite``:: + + >>> def test_suite(): + ... loader = TestLoader() + ... tests = loader.loadTestsFromName(__name__) + ... result = loader.suiteClass() + ... result.addTests(generate_scenarios(tests)) + ... return result + + + Setting Scenarios for a test + ============================ + + A sample test using scenarios can be found in the doc/ folder. + + See `pydoc testscenarios` for details. + + On the TestCase + +++++++++++++++ + + You can set a scenarios attribute on the test case:: + + >>> class MyTest(unittest.TestCase): + ... + ... scenarios = [ + ... ('scenario1', dict(param=1)), + ... ('scenario2', dict(param=2)),] + + This provides the main interface by which scenarios are found for a given test. + Subclasses will inherit the scenarios (unless they override the attribute). + + After loading + +++++++++++++ + + Test scenarios can also be generated arbitrarily later, as long as the test has + not yet run. Simply replace (or alter, but be aware that many tests may share a + single scenarios attribute) the scenarios attribute. For instance in this + example some third party tests are extended to run with a custom scenario. :: + + >>> import testtools + >>> class TestTransport: + ... """Hypothetical test case for bzrlib transport tests""" + ... pass + ... + >>> stock_library_tests = unittest.TestLoader().loadTestsFromNames( + ... ['doc.test_sample']) + ... + >>> for test in testtools.iterate_tests(stock_library_tests): + ... if isinstance(test, TestTransport): + ... test.scenarios = test.scenarios + [my_vfs_scenario] + ... + >>> suite = unittest.TestSuite() + >>> suite.addTests(generate_scenarios(stock_library_tests)) + + Generated tests don't have a ``scenarios`` list, because they don't normally + require any more expansion. However, you can add a ``scenarios`` list back on + to them, and then run them through ``generate_scenarios`` again to generate the + cross product of tests. :: + + >>> class CrossProductDemo(unittest.TestCase): + ... scenarios = [('scenario_0_0', {}), + ... ('scenario_0_1', {})] + ... def test_foo(self): + ... return + ... + >>> suite = unittest.TestSuite() + >>> suite.addTests(generate_scenarios(CrossProductDemo("test_foo"))) + >>> for test in testtools.iterate_tests(suite): + ... test.scenarios = [ + ... ('scenario_1_0', {}), + ... ('scenario_1_1', {})] + ... + >>> suite2 = unittest.TestSuite() + >>> suite2.addTests(generate_scenarios(suite)) + >>> print suite2.countTestCases() + 4 + + Dynamic Scenarios + +++++++++++++++++ + + A common use case is to have the list of scenarios be dynamic based on plugins + and available libraries. An easy way to do this is to provide a global scope + scenarios somewhere relevant to the tests that will use it, and then that can + be customised, or dynamically populate your scenarios from a registry etc. + For instance:: + + >>> hash_scenarios = [] + >>> try: + ... from hashlib import md5 + ... except ImportError: + ... pass + ... else: + ... hash_scenarios.append(("md5", dict(hash=md5))) + >>> try: + ... from hashlib import sha1 + ... except ImportError: + ... pass + ... else: + ... hash_scenarios.append(("sha1", dict(hash=sha1))) + ... + >>> class TestHashContract(unittest.TestCase): + ... + ... scenarios = hash_scenarios + ... + >>> class TestHashPerformance(unittest.TestCase): + ... + ... scenarios = hash_scenarios + + + Forcing Scenarios + +++++++++++++++++ + + The ``apply_scenarios`` function can be useful to apply scenarios to a test + that has none applied. ``apply_scenarios`` is the workhorse for + ``generate_scenarios``, except it takes the scenarios passed in rather than + introspecting the test object to determine the scenarios. The + ``apply_scenarios`` function does not reset the test scenarios attribute, + allowing it to be used to layer scenarios without affecting existing scenario + selection. + + + Advice on Writing Scenarios + =========================== + + If a parameterised test is because of a bug run without being parameterized, + it should fail rather than running with defaults, because this can hide bugs. + +Platform: UNKNOWN +Classifier: Development Status :: 6 - Mature +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Software Development :: Testing diff --git a/test/3rdparty/testscenarios-0.2/README b/test/3rdparty/testscenarios-0.2/README new file mode 100644 index 00000000000..b827cb67a82 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/README @@ -0,0 +1,256 @@ +***************************************************************** +testscenarios: extensions to python unittest to support scenarios +***************************************************************** + + Copyright (c) 2009, Robert Collins <robertc@robertcollins.net> + + Licensed under either the Apache License, Version 2.0 or the BSD 3-clause + license at the users choice. A copy of both licenses are available in the + project source as Apache-2.0 and BSD. You may not use this file except in + compliance with one of these two licences. + + Unless required by applicable law or agreed to in writing, software + distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + license you chose for the specific language governing permissions and + limitations under that license. + + +testscenarios provides clean dependency injection for python unittest style +tests. This can be used for interface testing (testing many implementations via +a single test suite) or for classic dependency injection (provide tests with +dependencies externally to the test code itself, allowing easy testing in +different situations). + +Dependencies +============ + +* Python 2.4+ +* testtools <https://launchpad.net/testtools> + + +Why TestScenarios +================= + +Standard Python unittest.py provides on obvious method for running a single +test_foo method with two (or more) scenarios: by creating a mix-in that +provides the functions, objects or settings that make up the scenario. This is +however limited and unsatisfying. Firstly, when two projects are cooperating +on a test suite (for instance, a plugin to a larger project may want to run +the standard tests for a given interface on its implementation), then it is +easy for them to get out of sync with each other: when the list of TestCase +classes to mix-in with changes, the plugin will either fail to run some tests +or error trying to run deleted tests. Secondly, its not as easy to work with +runtime-created-subclasses (a way of dealing with the aforementioned skew) +because they require more indirection to locate the source of the test, and will +often be ignored by e.g. pyflakes pylint etc. + +It is the intent of testscenarios to make dynamically running a single test +in multiple scenarios clear, easy to debug and work with even when the list +of scenarios is dynamically generated. + + +Defining Scenarios +================== + +A **scenario** is a tuple of a string name for the scenario, and a dict of +parameters describing the scenario. The name is appended to the test name, and +the parameters are made available to the test instance when it's run. + +Scenarios are presented in **scenario lists** which are typically Python lists +but may be any iterable. + + +Getting Scenarios applied +========================= + +At its heart the concept is simple. For a given test object with a list of +scenarios we prepare a new test object for each scenario. This involves: + +* Clone the test to a new test with a new id uniquely distinguishing it. +* Apply the scenario to the test by setting each key, value in the scenario + as attributes on the test object. + +There are some complicating factors around making this happen seamlessly. These +factors are in two areas: + +* Choosing what scenarios to use. (See Setting Scenarios For A Test). +* Getting the multiplication to happen. + +Subclasssing +++++++++++++ + +If you can subclass TestWithScenarios, then the ``run()`` method in +TestWithScenarios will take care of test multiplication. It will at test +execution act as a generator causing multiple tests to execute. For this to +work reliably TestWithScenarios must be first in the MRO and you cannot +override run() or __call__. This is the most robust method, in the sense +that any test runner or test loader that obeys the python unittest protocol +will run all your scenarios. + +Manual generation ++++++++++++++++++ + +If you cannot subclass TestWithScenarios (e.g. because you are using +TwistedTestCase, or TestCaseWithResources, or any one of a number of other +useful test base classes, or need to override run() or __call__ yourself) then +you can cause scenario application to happen later by calling +``testscenarios.generate_scenarios()``. For instance:: + + >>> import unittest + >>> import StringIO + >>> from testscenarios.scenarios import generate_scenarios + +This can work with loaders and runners from the standard library, or possibly other +implementations:: + + >>> loader = unittest.TestLoader() + >>> test_suite = unittest.TestSuite() + >>> runner = unittest.TextTestRunner(stream=StringIO.StringIO()) + + >>> mytests = loader.loadTestsFromNames(['doc.test_sample']) + >>> test_suite.addTests(generate_scenarios(mytests)) + >>> runner.run(test_suite) + <unittest._TextTestResult run=1 errors=0 failures=0> + +Testloaders ++++++++++++ + +Some test loaders support hooks like ``load_tests`` and ``test_suite``. +Ensuring your tests have had scenario application done through these hooks can +be a good idea - it means that external test runners (which support these hooks +like ``nose``, ``trial``, ``tribunal``) will still run your scenarios. (Of +course, if you are using the subclassing approach this is already a surety). +With ``load_tests``:: + + >>> def load_tests(standard_tests, module, loader): + ... result = loader.suiteClass() + ... result.addTests(generate_scenarios(standard_tests)) + ... return result + +With ``test_suite``:: + + >>> def test_suite(): + ... loader = TestLoader() + ... tests = loader.loadTestsFromName(__name__) + ... result = loader.suiteClass() + ... result.addTests(generate_scenarios(tests)) + ... return result + + +Setting Scenarios for a test +============================ + +A sample test using scenarios can be found in the doc/ folder. + +See `pydoc testscenarios` for details. + +On the TestCase ++++++++++++++++ + +You can set a scenarios attribute on the test case:: + + >>> class MyTest(unittest.TestCase): + ... + ... scenarios = [ + ... ('scenario1', dict(param=1)), + ... ('scenario2', dict(param=2)),] + +This provides the main interface by which scenarios are found for a given test. +Subclasses will inherit the scenarios (unless they override the attribute). + +After loading ++++++++++++++ + +Test scenarios can also be generated arbitrarily later, as long as the test has +not yet run. Simply replace (or alter, but be aware that many tests may share a +single scenarios attribute) the scenarios attribute. For instance in this +example some third party tests are extended to run with a custom scenario. :: + + >>> import testtools + >>> class TestTransport: + ... """Hypothetical test case for bzrlib transport tests""" + ... pass + ... + >>> stock_library_tests = unittest.TestLoader().loadTestsFromNames( + ... ['doc.test_sample']) + ... + >>> for test in testtools.iterate_tests(stock_library_tests): + ... if isinstance(test, TestTransport): + ... test.scenarios = test.scenarios + [my_vfs_scenario] + ... + >>> suite = unittest.TestSuite() + >>> suite.addTests(generate_scenarios(stock_library_tests)) + +Generated tests don't have a ``scenarios`` list, because they don't normally +require any more expansion. However, you can add a ``scenarios`` list back on +to them, and then run them through ``generate_scenarios`` again to generate the +cross product of tests. :: + + >>> class CrossProductDemo(unittest.TestCase): + ... scenarios = [('scenario_0_0', {}), + ... ('scenario_0_1', {})] + ... def test_foo(self): + ... return + ... + >>> suite = unittest.TestSuite() + >>> suite.addTests(generate_scenarios(CrossProductDemo("test_foo"))) + >>> for test in testtools.iterate_tests(suite): + ... test.scenarios = [ + ... ('scenario_1_0', {}), + ... ('scenario_1_1', {})] + ... + >>> suite2 = unittest.TestSuite() + >>> suite2.addTests(generate_scenarios(suite)) + >>> print suite2.countTestCases() + 4 + +Dynamic Scenarios ++++++++++++++++++ + +A common use case is to have the list of scenarios be dynamic based on plugins +and available libraries. An easy way to do this is to provide a global scope +scenarios somewhere relevant to the tests that will use it, and then that can +be customised, or dynamically populate your scenarios from a registry etc. +For instance:: + + >>> hash_scenarios = [] + >>> try: + ... from hashlib import md5 + ... except ImportError: + ... pass + ... else: + ... hash_scenarios.append(("md5", dict(hash=md5))) + >>> try: + ... from hashlib import sha1 + ... except ImportError: + ... pass + ... else: + ... hash_scenarios.append(("sha1", dict(hash=sha1))) + ... + >>> class TestHashContract(unittest.TestCase): + ... + ... scenarios = hash_scenarios + ... + >>> class TestHashPerformance(unittest.TestCase): + ... + ... scenarios = hash_scenarios + + +Forcing Scenarios ++++++++++++++++++ + +The ``apply_scenarios`` function can be useful to apply scenarios to a test +that has none applied. ``apply_scenarios`` is the workhorse for +``generate_scenarios``, except it takes the scenarios passed in rather than +introspecting the test object to determine the scenarios. The +``apply_scenarios`` function does not reset the test scenarios attribute, +allowing it to be used to layer scenarios without affecting existing scenario +selection. + + +Advice on Writing Scenarios +=========================== + +If a parameterised test is because of a bug run without being parameterized, +it should fail rather than running with defaults, because this can hide bugs. diff --git a/test/3rdparty/testscenarios-0.2/doc/__init__.py b/test/3rdparty/testscenarios-0.2/doc/__init__.py new file mode 100644 index 00000000000..4dbad55dcbb --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/doc/__init__.py @@ -0,0 +1,16 @@ +# testscenarios: extensions to python unittest to allow declarative +# dependency injection ('scenarios') by tests. +# +# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net> +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + diff --git a/test/3rdparty/testscenarios-0.2/doc/example.py b/test/3rdparty/testscenarios-0.2/doc/example.py new file mode 100644 index 00000000000..a8d195fade2 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/doc/example.py @@ -0,0 +1,30 @@ +# testscenarios: extensions to python unittest to allow declarative +# dependency injection ('scenarios') by tests. +# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net> +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +"""Example TestScenario.""" + +from testscenarios import TestWithScenarios + + +scenario1 = ('basic', {'attribute': 'value'}) +scenario2 = ('advanced', {'attribute': 'value2'}) + + +class SampleWithScenarios(TestWithScenarios): + + scenarios = [scenario1, scenario2] + + def test_demo(self): + self.assertIsInstance(self.attribute, str) diff --git a/test/3rdparty/testscenarios-0.2/doc/test_sample.py b/test/3rdparty/testscenarios-0.2/doc/test_sample.py new file mode 100644 index 00000000000..a0b00a5ef54 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/doc/test_sample.py @@ -0,0 +1,22 @@ +# testscenarios: extensions to python unittest to allow declarative +# dependency injection ('scenarios') by tests. +# +# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net> +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +import unittest + +class TestSample(unittest.TestCase): + + def test_so_easy(self): + pass diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/__init__.py b/test/3rdparty/testscenarios-0.2/lib/testscenarios/__init__.py new file mode 100644 index 00000000000..d608f13e842 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/lib/testscenarios/__init__.py @@ -0,0 +1,64 @@ +# testscenarios: extensions to python unittest to allow declarative +# dependency injection ('scenarios') by tests. +# +# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net> +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + + +"""Support for running tests with different scenarios declaratively + +Testscenarios provides clean dependency injection for python unittest style +tests. This can be used for interface testing (testing many implementations via +a single test suite) or for classic dependency injection (provide tests with +dependencies externally to the test code itself, allowing easy testing in +different situations). + +See the README for a manual, and the docstrings on individual functions and +methods for details. +""" + +# same format as sys.version_info: "A tuple containing the five components of +# the version number: major, minor, micro, releaselevel, and serial. All +# values except releaselevel are integers; the release level is 'alpha', +# 'beta', 'candidate', or 'final'. The version_info value corresponding to the +# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a +# releaselevel of 'dev' for unreleased under-development code. +# +# If the releaselevel is 'alpha' then the major/minor/micro components are not +# established at this point, and setup.py will use a version of next-$(revno). +# If the releaselevel is 'final', then the tarball will be major.minor.micro. +# Otherwise it is major.minor.micro~$(revno). +__version__ = (0, 2, 0, 'final', 0) + +__all__ = [ + 'TestWithScenarios', + 'apply_scenario', + 'apply_scenarios', + 'generate_scenarios', + ] + + +import unittest + +from testscenarios.scenarios import apply_scenario, generate_scenarios +from testscenarios.testcase import TestWithScenarios + + +def test_suite(): + import testscenarios.tests + return testscenarios.tests.test_suite() + + +def load_tests(standard_tests, module, loader): + standard_tests.addTests(loader.loadTestsFromNames(["testscenarios.tests"])) + return standard_tests diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/scenarios.py b/test/3rdparty/testscenarios-0.2/lib/testscenarios/scenarios.py new file mode 100644 index 00000000000..e531b2e0da1 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/lib/testscenarios/scenarios.py @@ -0,0 +1,78 @@ +# testscenarios: extensions to python unittest to allow declarative +# dependency injection ('scenarios') by tests. +# +# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net> +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +__all__ = [ + 'apply_scenario', + 'apply_scenarios', + 'generate_scenarios', + ] + +import unittest + +from testtools.testcase import clone_test_with_new_id +from testtools import iterate_tests + + +def apply_scenario((name, parameters), test): + """Apply scenario to test. + + :param scenario: A tuple (name, parameters) to apply to the test. The test + is cloned, its id adjusted to have (name) after it, and the parameters + dict is used to update the new test. + :param test: The test to apply the scenario to. This test is unaltered. + :return: A new test cloned from test, with the scenario applied. + """ + scenario_suffix = '(' + name + ')' + newtest = clone_test_with_new_id(test, + test.id() + scenario_suffix) + test_desc = test.shortDescription() + if test_desc is not None: + newtest_desc = "%(test_desc)s %(scenario_suffix)s" % vars() + newtest.shortDescription = (lambda: newtest_desc) + for key, value in parameters.iteritems(): + setattr(newtest, key, value) + return newtest + + +def apply_scenarios(scenarios, test): + """Apply many scenarios to a test. + + :param scenarios: An iterable of scenarios. + :param test: A test to apply the scenarios to. + :return: A generator of tests. + """ + for scenario in scenarios: + yield apply_scenario(scenario, test) + + +def generate_scenarios(test_or_suite): + """Yield the tests in test_or_suite with scenario multiplication done. + + TestCase objects with no scenarios specified are yielded unaltered. Tests + with scenarios are not yielded at all, instead the results of multiplying + them by the scenarios they specified gets yielded. + + :param test_or_suite: A TestCase or TestSuite. + :return: A generator of tests - objects satisfying the TestCase protocol. + """ + for test in iterate_tests(test_or_suite): + scenarios = getattr(test, 'scenarios', None) + if scenarios: + for newtest in apply_scenarios(scenarios, test): + newtest.scenarios = None + yield newtest + else: + yield test diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/testcase.py b/test/3rdparty/testscenarios-0.2/lib/testscenarios/testcase.py new file mode 100644 index 00000000000..5ec3a94a1d3 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/lib/testscenarios/testcase.py @@ -0,0 +1,62 @@ +# testscenarios: extensions to python unittest to allow declarative +# dependency injection ('scenarios') by tests. +# +# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net> +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +__all__ = [ + 'TestWithScenarios', + ] + +import unittest + +from testtools.testcase import clone_test_with_new_id + +from testscenarios.scenarios import generate_scenarios + +class TestWithScenarios(unittest.TestCase): + """A TestCase with support for scenarios via a scenarios attribute. + + When a test object which is an instance of TestWithScenarios is run, + and there is a non-empty scenarios attribute on the object, the test is + multiplied by the run method into one test per scenario. For this to work + reliably the TestWithScenarios.run method must not be overriden in a + subclass (or overridden compatibly with TestWithScenarios). + """ + + def _get_scenarios(self): + return getattr(self, 'scenarios', None) + + def countTestCases(self): + scenarios = self._get_scenarios() + if not scenarios: + return 1 + else: + return len(scenarios) + + def debug(self): + scenarios = self._get_scenarios() + if scenarios: + for test in generate_scenarios(self): + test.debug() + else: + return super(TestWithScenarios, self).debug() + + def run(self, result=None): + scenarios = self._get_scenarios() + if scenarios: + for test in generate_scenarios(self): + test.run(result) + return + else: + return super(TestWithScenarios, self).run(result) diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/__init__.py b/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/__init__.py new file mode 100644 index 00000000000..e5e2bbeaa84 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/__init__.py @@ -0,0 +1,42 @@ +# testscenarios: extensions to python unittest to allow declarative +# dependency injection ('scenarios') by tests. +# +# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net> +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +import doctest +import sys +import unittest + +import testscenarios + + +def test_suite(): + result = unittest.TestSuite() + standard_tests = unittest.TestSuite() + module = sys.modules['testscenarios.tests'] + loader = unittest.TestLoader() + return load_tests(standard_tests, module, loader) + + +def load_tests(standard_tests, module, loader): + test_modules = [ + 'testcase', + 'scenarios', + ] + prefix = "testscenarios.tests.test_" + test_mod_names = [prefix + test_module for test_module in test_modules] + standard_tests.addTests(loader.loadTestsFromNames(test_mod_names)) + doctest.set_unittest_reportflags(doctest.REPORT_ONLY_FIRST_FAILURE) + standard_tests.addTest(doctest.DocFileSuite("../../../README")) + return standard_tests diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_scenarios.py b/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_scenarios.py new file mode 100644 index 00000000000..4c801503d2c --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_scenarios.py @@ -0,0 +1,173 @@ +# testscenarios: extensions to python unittest to allow declarative +# dependency injection ('scenarios') by tests. +# +# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net> +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +import unittest + +import testscenarios +from testscenarios.scenarios import ( + apply_scenario, + apply_scenarios, + generate_scenarios, + ) +import testtools +from testtools.tests.helpers import LoggingResult + + +class TestGenerateScenarios(testtools.TestCase): + + def hook_apply_scenarios(self): + self.addCleanup(setattr, testscenarios.scenarios, 'apply_scenarios', + apply_scenarios) + log = [] + def capture(scenarios, test): + log.append((scenarios, test)) + return apply_scenarios(scenarios, test) + testscenarios.scenarios.apply_scenarios = capture + return log + + def test_generate_scenarios_preserves_normal_test(self): + class ReferenceTest(unittest.TestCase): + def test_pass(self): + pass + test = ReferenceTest("test_pass") + log = self.hook_apply_scenarios() + self.assertEqual([test], list(generate_scenarios(test))) + self.assertEqual([], log) + + def test_tests_with_scenarios_calls_apply_scenarios(self): + class ReferenceTest(unittest.TestCase): + scenarios = [('demo', {})] + def test_pass(self): + pass + test = ReferenceTest("test_pass") + log = self.hook_apply_scenarios() + tests = list(generate_scenarios(test)) + self.assertEqual( + 'testscenarios.tests.test_scenarios.ReferenceTest.test_pass(demo)', + tests[0].id()) + self.assertEqual([([('demo', {})], test)], log) + + def test_all_scenarios_yielded(self): + class ReferenceTest(unittest.TestCase): + scenarios = [('1', {}), ('2', {})] + def test_pass(self): + pass + test = ReferenceTest("test_pass") + tests = list(generate_scenarios(test)) + self.assertEqual( + 'testscenarios.tests.test_scenarios.ReferenceTest.test_pass(1)', + tests[0].id()) + self.assertEqual( + 'testscenarios.tests.test_scenarios.ReferenceTest.test_pass(2)', + tests[1].id()) + + def test_scenarios_attribute_cleared(self): + class ReferenceTest(unittest.TestCase): + scenarios = [ + ('1', {'foo': 1, 'bar': 2}), + ('2', {'foo': 2, 'bar': 4})] + def test_check_foo(self): + pass + test = ReferenceTest("test_check_foo") + tests = list(generate_scenarios(test)) + for adapted in tests: + self.assertEqual(None, adapted.scenarios) + + def test_multiple_tests(self): + class Reference1(unittest.TestCase): + scenarios = [('1', {}), ('2', {})] + def test_something(self): + pass + class Reference2(unittest.TestCase): + scenarios = [('3', {}), ('4', {})] + def test_something(self): + pass + suite = unittest.TestSuite() + suite.addTest(Reference1("test_something")) + suite.addTest(Reference2("test_something")) + tests = list(generate_scenarios(suite)) + self.assertEqual(4, len(tests)) + + +class TestApplyScenario(testtools.TestCase): + + def setUp(self): + super(TestApplyScenario, self).setUp() + + self.scenario_name = 'demo' + self.scenario_attrs = {'foo': 'bar'} + self.scenario = (self.scenario_name, self.scenario_attrs) + + class ReferenceTest(unittest.TestCase): + def test_pass(self): + pass + def test_pass_with_docstring(self): + """ The test that always passes. + + This test case has a PEP 257 conformant docstring, + with its first line being a brief synopsis and the + rest of the docstring explaining that this test + does nothing but pass unconditionally. + + """ + pass + + self.ReferenceTest = ReferenceTest + + def test_sets_specified_id(self): + raw_test = self.ReferenceTest('test_pass') + raw_id = "testscenarios.tests.test_scenarios.ReferenceTest.test_pass" + scenario_name = self.scenario_name + expect_id = "%(raw_id)s(%(scenario_name)s)" % vars() + modified_test = apply_scenario(self.scenario, raw_test) + self.assertEqual(expect_id, modified_test.id()) + + def test_sets_specified_attributes(self): + raw_test = self.ReferenceTest('test_pass') + modified_test = apply_scenario(self.scenario, raw_test) + self.assertEqual('bar', modified_test.foo) + + def test_appends_scenario_name_to_short_description(self): + raw_test = self.ReferenceTest('test_pass_with_docstring') + modified_test = apply_scenario(self.scenario, raw_test) + raw_doc = self.ReferenceTest.test_pass_with_docstring.__doc__ + raw_desc = raw_doc.split("\n")[0].strip() + scenario_name = self.scenario_name + expect_desc = "%(raw_desc)s (%(scenario_name)s)" % vars() + self.assertEqual(expect_desc, modified_test.shortDescription()) + +class TestApplyScenarios(testtools.TestCase): + + def test_calls_apply_scenario(self): + self.addCleanup(setattr, testscenarios.scenarios, 'apply_scenario', + apply_scenario) + log = [] + def capture(scenario, test): + log.append((scenario, test)) + testscenarios.scenarios.apply_scenario = capture + scenarios = ["foo", "bar"] + result = list(apply_scenarios(scenarios, "test")) + self.assertEqual([('foo', 'test'), ('bar', 'test')], log) + + def test_preserves_scenarios_attribute(self): + class ReferenceTest(unittest.TestCase): + scenarios = [('demo', {})] + def test_pass(self): + pass + test = ReferenceTest("test_pass") + tests = list(apply_scenarios(ReferenceTest.scenarios, test)) + self.assertEqual([('demo', {})], ReferenceTest.scenarios) + self.assertEqual(ReferenceTest.scenarios, tests[0].scenarios) diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_testcase.py b/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_testcase.py new file mode 100644 index 00000000000..6a9bbf997e2 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_testcase.py @@ -0,0 +1,145 @@ +# testscenarios: extensions to python unittest to allow declarative +# dependency injection ('scenarios') by tests. +# +# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net> +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +import unittest + +import testscenarios +from testtools.tests.helpers import LoggingResult + + +class TestTestWithScenarios(unittest.TestCase): + + def test_no_scenarios_no_error(self): + class ReferenceTest(testscenarios.TestWithScenarios): + def test_pass(self): + pass + test = ReferenceTest("test_pass") + result = unittest.TestResult() + test.run(result) + self.assertTrue(result.wasSuccessful()) + self.assertEqual(1, result.testsRun) + + def test_with_one_scenario_one_run(self): + class ReferenceTest(testscenarios.TestWithScenarios): + scenarios = [('demo', {})] + def test_pass(self): + pass + test = ReferenceTest("test_pass") + log = [] + result = LoggingResult(log) + test.run(result) + self.assertTrue(result.wasSuccessful()) + self.assertEqual(1, result.testsRun) + self.assertEqual( + 'testscenarios.tests.test_testcase.ReferenceTest.test_pass(demo)', + log[0][1].id()) + + def test_with_two_scenarios_two_run(self): + class ReferenceTest(testscenarios.TestWithScenarios): + scenarios = [('1', {}), ('2', {})] + def test_pass(self): + pass + test = ReferenceTest("test_pass") + log = [] + result = LoggingResult(log) + test.run(result) + self.assertTrue(result.wasSuccessful()) + self.assertEqual(2, result.testsRun) + self.assertEqual( + 'testscenarios.tests.test_testcase.ReferenceTest.test_pass(1)', + log[0][1].id()) + self.assertEqual( + 'testscenarios.tests.test_testcase.ReferenceTest.test_pass(2)', + log[4][1].id()) + + def test_attributes_set(self): + class ReferenceTest(testscenarios.TestWithScenarios): + scenarios = [ + ('1', {'foo': 1, 'bar': 2}), + ('2', {'foo': 2, 'bar': 4})] + def test_check_foo(self): + self.assertEqual(self.foo * 2, self.bar) + test = ReferenceTest("test_check_foo") + log = [] + result = LoggingResult(log) + test.run(result) + self.assertTrue(result.wasSuccessful()) + self.assertEqual(2, result.testsRun) + + def test_scenarios_attribute_cleared(self): + class ReferenceTest(testscenarios.TestWithScenarios): + scenarios = [ + ('1', {'foo': 1, 'bar': 2}), + ('2', {'foo': 2, 'bar': 4})] + def test_check_foo(self): + self.assertEqual(self.foo * 2, self.bar) + test = ReferenceTest("test_check_foo") + log = [] + result = LoggingResult(log) + test.run(result) + self.assertTrue(result.wasSuccessful()) + self.assertEqual(2, result.testsRun) + self.assertNotEqual(None, test.scenarios) + self.assertEqual(None, log[0][1].scenarios) + self.assertEqual(None, log[4][1].scenarios) + + def test_countTestCases_no_scenarios(self): + class ReferenceTest(testscenarios.TestWithScenarios): + def test_check_foo(self): + pass + test = ReferenceTest("test_check_foo") + self.assertEqual(1, test.countTestCases()) + + def test_countTestCases_empty_scenarios(self): + class ReferenceTest(testscenarios.TestWithScenarios): + scenarios = [] + def test_check_foo(self): + pass + test = ReferenceTest("test_check_foo") + self.assertEqual(1, test.countTestCases()) + + def test_countTestCases_1_scenarios(self): + class ReferenceTest(testscenarios.TestWithScenarios): + scenarios = [('1', {'foo': 1, 'bar': 2})] + def test_check_foo(self): + pass + test = ReferenceTest("test_check_foo") + self.assertEqual(1, test.countTestCases()) + + def test_countTestCases_2_scenarios(self): + class ReferenceTest(testscenarios.TestWithScenarios): + scenarios = [ + ('1', {'foo': 1, 'bar': 2}), + ('2', {'foo': 2, 'bar': 4})] + def test_check_foo(self): + pass + test = ReferenceTest("test_check_foo") + self.assertEqual(2, test.countTestCases()) + + def test_debug_2_scenarios(self): + log = [] + class ReferenceTest(testscenarios.TestWithScenarios): + scenarios = [ + ('1', {'foo': 1, 'bar': 2}), + ('2', {'foo': 2, 'bar': 4})] + def test_check_foo(self): + log.append(self) + test = ReferenceTest("test_check_foo") + test.debug() + self.assertEqual(2, len(log)) + self.assertEqual(None, log[0].scenarios) + self.assertEqual(None, log[1].scenarios) + self.assertNotEqual(log[0].id(), log[1].id()) diff --git a/test/3rdparty/testscenarios-0.2/setup.py b/test/3rdparty/testscenarios-0.2/setup.py new file mode 100755 index 00000000000..ace9f08c882 --- /dev/null +++ b/test/3rdparty/testscenarios-0.2/setup.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python + +from distutils.core import setup +import os.path + +description = file(os.path.join(os.path.dirname(__file__), 'README'), 'rb').read() + +setup(name="testscenarios", + version="0.2", + description="Testscenarios, a pyunit extension for dependency injection", + long_description=description, + maintainer="Robert Collins", + maintainer_email="robertc@robertcollins.net", + url="https://launchpad.net/testscenarios", + packages=['testscenarios', 'testscenarios.tests'], + package_dir = {'':'lib'}, + classifiers = [ + 'Development Status :: 6 - Mature', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: BSD License', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Topic :: Software Development :: Quality Assurance', + 'Topic :: Software Development :: Testing', + ], + ) diff --git a/test/3rdparty/testtools-0.9.12/.bzrignore b/test/3rdparty/testtools-0.9.12/.bzrignore new file mode 100644 index 00000000000..d6aac0da189 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/.bzrignore @@ -0,0 +1,10 @@ +__pycache__ +./build +MANIFEST +dist +tags +TAGS +apidocs +_trial_temp +doc/_build +./.testrepository diff --git a/test/3rdparty/testtools-0.9.12/LICENSE b/test/3rdparty/testtools-0.9.12/LICENSE new file mode 100644 index 00000000000..42421b0b2d9 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/LICENSE @@ -0,0 +1,57 @@ +Copyright (c) 2008-2011 Jonathan M. Lange <jml@mumak.net> and the testtools +authors. + +The testtools authors are: + * Canonical Ltd + * Twisted Matrix Labs + * Jonathan Lange + * Robert Collins + * Andrew Bennetts + * Benjamin Peterson + * Jamu Kakar + * James Westby + * Martin [gz] + * Michael Hudson-Doyle + * Aaron Bentley + * Christian Kampka + * Gavin Panella + * Martin Pool + +and are collectively referred to as "testtools developers". + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +Some code in testtools/run.py taken from Python's unittest module: +Copyright (c) 1999-2003 Steve Purcell +Copyright (c) 2003-2010 Python Software Foundation + +This module is free software, and you may redistribute it and/or modify +it under the same terms as Python itself, so long as this copyright message +and disclaimer are retained in their original form. + +IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF +THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, +AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, +SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. diff --git a/test/3rdparty/testtools-0.9.12/MANIFEST.in b/test/3rdparty/testtools-0.9.12/MANIFEST.in new file mode 100644 index 00000000000..92a623b2a12 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/MANIFEST.in @@ -0,0 +1,12 @@ +include LICENSE +include HACKING +include Makefile +include MANIFEST.in +include MANUAL +include NEWS +include README +include .bzrignore +graft doc +graft doc/_static +graft doc/_templates +prune doc/_build diff --git a/test/3rdparty/testtools-0.9.12/Makefile b/test/3rdparty/testtools-0.9.12/Makefile new file mode 100644 index 00000000000..b3e40ecddfb --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/Makefile @@ -0,0 +1,56 @@ +# See README for copyright and licensing details. + +PYTHON=python +SOURCES=$(shell find testtools -name "*.py") + +check: + PYTHONPATH=$(PWD) $(PYTHON) -m testtools.run testtools.tests.test_suite + +TAGS: ${SOURCES} + ctags -e -R testtools/ + +tags: ${SOURCES} + ctags -R testtools/ + +clean: clean-sphinx + rm -f TAGS tags + find testtools -name "*.pyc" -exec rm '{}' \; + +prerelease: + # An existing MANIFEST breaks distutils sometimes. Avoid that. + -rm MANIFEST + +release: + ./setup.py sdist upload --sign + $(PYTHON) scripts/_lp_release.py + +snapshot: prerelease + ./setup.py sdist + +### Documentation ### + +apidocs: + # pydoctor emits deprecation warnings under Ubuntu 10.10 LTS + PYTHONWARNINGS='ignore::DeprecationWarning' \ + pydoctor --make-html --add-package testtools \ + --docformat=restructuredtext --project-name=testtools \ + --project-url=https://launchpad.net/testtools + +doc/news.rst: + ln -s ../NEWS doc/news.rst + +docs: doc/news.rst docs-sphinx + rm doc/news.rst + +docs-sphinx: html-sphinx + +# Clean out generated documentation +clean-sphinx: + cd doc && make clean + +# Build the html docs using Sphinx. +html-sphinx: + cd doc && make html + +.PHONY: apidocs docs-sphinx clean-sphinx html-sphinx docs +.PHONY: check clean prerelease release diff --git a/test/3rdparty/testtools-0.9.12/NEWS b/test/3rdparty/testtools-0.9.12/NEWS new file mode 100644 index 00000000000..9ff3c05ce22 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/NEWS @@ -0,0 +1,688 @@ +testtools NEWS +++++++++++++++ + +Changes and improvements to testtools_, grouped by release. + +0.9.12 +~~~~~~ + +This is a very big release. We've made huge improvements on three fronts: + 1. Test failures are way nicer and easier to read + 2. Matchers and ``assertThat`` are much more convenient to use + 3. Correct handling of extended unicode characters + +We've trimmed off the fat from the stack trace you get when tests fail, we've +cut out the bits of error messages that just didn't help, we've made it easier +to annotate mismatch failures, to compare complex objects and to match raised +exceptions. + +Testing code was never this fun. + +Changes +------- + +* ``AfterPreproccessing`` renamed to ``AfterPreprocessing``, which is a more + correct spelling. Old name preserved for backwards compatibility, but is + now deprecated. Please stop using it. + (Jonathan Lange, #813460) + +* ``assertThat`` raises ``MismatchError`` instead of + ``TestCase.failureException``. ``MismatchError`` is a subclass of + ``AssertionError``, so in most cases this change will not matter. However, + if ``self.failureException`` has been set to a non-default value, then + mismatches will become test errors rather than test failures. + +* ``gather_details`` takes two dicts, rather than two detailed objects. + (Jonathan Lange, #801027) + +* ``MatchesRegex`` mismatch now says "<value> does not match /<regex>/" rather + than "<regex> did not match <value>". The regular expression contains fewer + backslashes too. (Jonathan Lange, #818079) + +* Tests that run with ``AsynchronousDeferredRunTest`` now have the ``reactor`` + attribute set to the running reactor. (Jonathan Lange, #720749) + +Improvements +------------ + +* All public matchers are now in ``testtools.matchers.__all__``. + (Jonathan Lange, #784859) + +* ``assertThat`` can actually display mismatches and matchers that contain + extended unicode characters. (Jonathan Lange, Martin [gz], #804127) + +* ``assertThat`` output is much less verbose, displaying only what the mismatch + tells us to display. Old-style verbose output can be had by passing + ``verbose=True`` to assertThat. (Jonathan Lange, #675323, #593190) + +* ``assertThat`` accepts a message which will be used to annotate the matcher. + This can be given as a third parameter or as a keyword parameter. + (Robert Collins) + +* Automated the Launchpad part of the release process. + (Jonathan Lange, #623486) + +* Correctly display non-ASCII unicode output on terminals that claim to have a + unicode encoding. (Martin [gz], #804122) + +* ``DocTestMatches`` correctly handles unicode output from examples, rather + than raising an error. (Martin [gz], #764170) + +* ``ErrorHolder`` and ``PlaceHolder`` added to docs. (Jonathan Lange, #816597) + +* ``ExpectedException`` now matches any exception of the given type by + default, and also allows specifying a ``Matcher`` rather than a mere regular + expression. (Jonathan Lange, #791889) + +* ``FixtureSuite`` added, allows test suites to run with a given fixture. + (Jonathan Lange) + +* Hide testtools's own stack frames when displaying tracebacks, making it + easier for test authors to focus on their errors. + (Jonathan Lange, Martin [gz], #788974) + +* Less boilerplate displayed in test failures and errors. + (Jonathan Lange, #660852) + +* ``MatchesException`` now allows you to match exceptions against any matcher, + rather than just regular expressions. (Jonathan Lange, #791889) + +* ``MatchesException`` now permits a tuple of types rather than a single type + (when using the type matching mode). (Robert Collins) + +* ``MatchesStructure.byEquality`` added to make the common case of matching + many attributes by equality much easier. ``MatchesStructure.byMatcher`` + added in case folk want to match by things other than equality. + (Jonathan Lange) + +* New convenience assertions, ``assertIsNone`` and ``assertIsNotNone``. + (Christian Kampka) + +* New matchers: + + * ``AllMatch`` matches many values against a single matcher. + (Jonathan Lange, #615108) + + * ``Contains``. (Robert Collins) + + * ``GreaterThan``. (Christian Kampka) + +* New helper, ``safe_hasattr`` added. (Jonathan Lange) + +* ``reraise`` added to ``testtools.compat``. (Jonathan Lange) + + +0.9.11 +~~~~~~ + +This release brings consistent use of super for better compatibility with +multiple inheritance, fixed Python3 support, improvements in fixture and mather +outputs and a compat helper for testing libraries that deal with bytestrings. + +Changes +------- + +* ``TestCase`` now uses super to call base ``unittest.TestCase`` constructor, + ``setUp`` and ``tearDown``. (Tim Cole, #771508) + +* If, when calling ``useFixture`` an error occurs during fixture set up, we + still attempt to gather details from the fixture. (Gavin Panella) + + +Improvements +------------ + +* Additional compat helper for ``BytesIO`` for libraries that build on + testtools and are working on Python 3 porting. (Robert Collins) + +* Corrected documentation for ``MatchesStructure`` in the test authors + document. (Jonathan Lange) + +* ``LessThan`` error message now says something that is logically correct. + (Gavin Panella, #762008) + +* Multiple details from a single fixture are now kept separate, rather than + being mooshed together. (Gavin Panella, #788182) + +* Python 3 support now back in action. (Martin [gz], #688729) + +* ``try_import`` and ``try_imports`` have a callback that is called whenever + they fail to import a module. (Martin Pool) + + +0.9.10 +~~~~~~ + +The last release of testtools could not be easy_installed. This is considered +severe enough for a re-release. + +Improvements +------------ + +* Include ``doc/`` in the source distribution, making testtools installable + from PyPI again (Tres Seaver, #757439) + + +0.9.9 +~~~~~ + +Many, many new matchers, vastly expanded documentation, stacks of bug fixes, +better unittest2 integration. If you've ever wanted to try out testtools but +been afraid to do so, this is the release to try. + + +Changes +------- + +* The timestamps generated by ``TestResult`` objects when no timing data has + been received are now datetime-with-timezone, which allows them to be + sensibly serialised and transported. (Robert Collins, #692297) + +Improvements +------------ + +* ``AnnotatedMismatch`` now correctly returns details. + (Jonathan Lange, #724691) + +* distutils integration for the testtools test runner. Can now use it for + 'python setup.py test'. (Christian Kampka, #693773) + +* ``EndsWith`` and ``KeysEqual`` now in testtools.matchers.__all__. + (Jonathan Lange, #692158) + +* ``MatchesException`` extended to support a regular expression check against + the str() of a raised exception. (Jonathan Lange) + +* ``MultiTestResult`` now forwards the ``time`` API. (Robert Collins, #692294) + +* ``MultiTestResult`` now documented in the manual. (Jonathan Lange, #661116) + +* New content helpers ``content_from_file``, ``content_from_stream`` and + ``attach_file`` make it easier to attach file-like objects to a + test. (Jonathan Lange, Robert Collins, #694126) + +* New ``ExpectedException`` context manager to help write tests against things + that are expected to raise exceptions. (Aaron Bentley) + +* New matchers: + + * ``MatchesListwise`` matches an iterable of matchers against an iterable + of values. (Michael Hudson-Doyle) + + * ``MatchesRegex`` matches a string against a regular expression. + (Michael Hudson-Doyle) + + * ``MatchesStructure`` matches attributes of an object against given + matchers. (Michael Hudson-Doyle) + + * ``AfterPreproccessing`` matches values against a matcher after passing them + through a callable. (Michael Hudson-Doyle) + + * ``MatchesSetwise`` matches an iterable of matchers against an iterable of + values, without regard to order. (Michael Hudson-Doyle) + +* ``setup.py`` can now build a snapshot when Bazaar is installed but the tree + is not a Bazaar tree. (Jelmer Vernooij) + +* Support for running tests using distutils (Christian Kampka, #726539) + +* Vastly improved and extended documentation. (Jonathan Lange) + +* Use unittest2 exception classes if available. (Jelmer Vernooij) + + +0.9.8 +~~~~~ + +In this release we bring some very interesting improvements: + +* new matchers for exceptions, sets, lists, dicts and more. + +* experimental (works but the contract isn't supported) twisted reactor + support. + +* The built in runner can now list tests and filter tests (the -l and + --load-list options). + +Changes +------- + +* addUnexpectedSuccess is translated to addFailure for test results that don't + know about addUnexpectedSuccess. Further, it fails the entire result for + all testtools TestResults (i.e. wasSuccessful() returns False after + addUnexpectedSuccess has been called). Note that when using a delegating + result such as ThreadsafeForwardingResult, MultiTestResult or + ExtendedToOriginalDecorator then the behaviour of addUnexpectedSuccess is + determined by the delegated to result(s). + (Jonathan Lange, Robert Collins, #654474, #683332) + +* startTestRun will reset any errors on the result. That is, wasSuccessful() + will always return True immediately after startTestRun() is called. This + only applies to delegated test results (ThreadsafeForwardingResult, + MultiTestResult and ExtendedToOriginalDecorator) if the delegated to result + is a testtools test result - we cannot reliably reset the state of unknown + test result class instances. (Jonathan Lange, Robert Collins, #683332) + +* Responsibility for running test cleanups has been moved to ``RunTest``. + This change does not affect public APIs and can be safely ignored by test + authors. (Jonathan Lange, #662647) + +Improvements +------------ + +* New matchers: + + * ``EndsWith`` which complements the existing ``StartsWith`` matcher. + (Jonathan Lange, #669165) + + * ``MatchesException`` matches an exception class and parameters. (Robert + Collins) + + * ``KeysEqual`` matches a dictionary with particular keys. (Jonathan Lange) + +* ``assertIsInstance`` supports a custom error message to be supplied, which + is necessary when using ``assertDictEqual`` on Python 2.7 with a + ``testtools.TestCase`` base class. (Jelmer Vernooij) + +* Experimental support for running tests that return Deferreds. + (Jonathan Lange, Martin [gz]) + +* Provide a per-test decorator, run_test_with, to specify which RunTest + object to use for a given test. (Jonathan Lange, #657780) + +* Fix the runTest parameter of TestCase to actually work, rather than raising + a TypeError. (Jonathan Lange, #657760) + +* Non-release snapshots of testtools will now work with buildout. + (Jonathan Lange, #613734) + +* Malformed SyntaxErrors no longer blow up the test suite. (Martin [gz]) + +* ``MismatchesAll.describe`` no longer appends a trailing newline. + (Michael Hudson-Doyle, #686790) + +* New helpers for conditionally importing modules, ``try_import`` and + ``try_imports``. (Jonathan Lange) + +* ``Raises`` added to the ``testtools.matchers`` module - matches if the + supplied callable raises, and delegates to an optional matcher for validation + of the exception. (Robert Collins) + +* ``raises`` added to the ``testtools.matchers`` module - matches if the + supplied callable raises and delegates to ``MatchesException`` to validate + the exception. (Jonathan Lange) + +* Tests will now pass on Python 2.6.4 : an ``Exception`` change made only in + 2.6.4 and reverted in Python 2.6.5 was causing test failures on that version. + (Martin [gz], #689858). + +* ``testtools.TestCase.useFixture`` has been added to glue with fixtures nicely. + (Robert Collins) + +* ``testtools.run`` now supports ``-l`` to list tests rather than executing + them. This is useful for integration with external test analysis/processing + tools like subunit and testrepository. (Robert Collins) + +* ``testtools.run`` now supports ``--load-list``, which takes a file containing + test ids, one per line, and intersects those ids with the tests found. This + allows fine grained control of what tests are run even when the tests cannot + be named as objects to import (e.g. due to test parameterisation via + testscenarios). (Robert Collins) + +* Update documentation to say how to use testtools.run() on Python 2.4. + (Jonathan Lange, #501174) + +* ``text_content`` conveniently converts a Python string to a Content object. + (Jonathan Lange, James Westby) + + + +0.9.7 +~~~~~ + +Lots of little cleanups in this release; many small improvements to make your +testing life more pleasant. + +Improvements +------------ + +* Cleanups can raise ``testtools.MultipleExceptions`` if they have multiple + exceptions to report. For instance, a cleanup which is itself responsible for + running several different internal cleanup routines might use this. + +* Code duplication between assertEqual and the matcher Equals has been removed. + +* In normal circumstances, a TestCase will no longer share details with clones + of itself. (Andrew Bennetts, bug #637725) + +* Less exception object cycles are generated (reduces peak memory use between + garbage collection). (Martin [gz]) + +* New matchers 'DoesNotStartWith' and 'StartsWith' contributed by Canonical + from the Launchpad project. Written by James Westby. + +* Timestamps as produced by subunit protocol clients are now forwarded in the + ThreadsafeForwardingResult so correct test durations can be reported. + (Martin [gz], Robert Collins, #625594) + +* With unittest from Python 2.7 skipped tests will now show only the reason + rather than a serialisation of all details. (Martin [gz], #625583) + +* The testtools release process is now a little better documented and a little + smoother. (Jonathan Lange, #623483, #623487) + + +0.9.6 +~~~~~ + +Nothing major in this release, just enough small bits and pieces to make it +useful enough to upgrade to. + +In particular, a serious bug in assertThat() has been fixed, it's easier to +write Matchers, there's a TestCase.patch() method for those inevitable monkey +patches and TestCase.assertEqual gives slightly nicer errors. + +Improvements +------------ + +* 'TestCase.assertEqual' now formats errors a little more nicely, in the + style of bzrlib. + +* Added `PlaceHolder` and `ErrorHolder`, TestCase-like objects that can be + used to add results to a `TestResult`. + +* 'Mismatch' now takes optional description and details parameters, so + custom Matchers aren't compelled to make their own subclass. + +* jml added a built-in UTF8_TEXT ContentType to make it slightly easier to + add details to test results. See bug #520044. + +* Fix a bug in our built-in matchers where assertThat would blow up if any + of them failed. All built-in mismatch objects now provide get_details(). + +* New 'Is' matcher, which lets you assert that a thing is identical to + another thing. + +* New 'LessThan' matcher which lets you assert that a thing is less than + another thing. + +* TestCase now has a 'patch()' method to make it easier to monkey-patching + objects in tests. See the manual for more information. Fixes bug #310770. + +* MultiTestResult methods now pass back return values from the results it + forwards to. + +0.9.5 +~~~~~ + +This release fixes some obscure traceback formatting issues that probably +weren't affecting you but were certainly breaking our own test suite. + +Changes +------- + +* Jamu Kakar has updated classes in testtools.matchers and testtools.runtest + to be new-style classes, fixing bug #611273. + +Improvements +------------ + +* Martin[gz] fixed traceback handling to handle cases where extract_tb returns + a source line of None. Fixes bug #611307. + +* Martin[gz] fixed an unicode issue that was causing the tests to fail, + closing bug #604187. + +* testtools now handles string exceptions (although why would you want to use + them?) and formats their tracebacks correctly. Thanks to Martin[gz] for + fixing bug #592262. + +0.9.4 +~~~~~ + +This release overhauls the traceback formatting layer to deal with Python 2 +line numbers and traceback objects often being local user encoded strings +rather than unicode objects. Test discovery has also been added and Python 3.1 +is also supported. Finally, the Mismatch protocol has been extended to let +Matchers collaborate with tests in supplying detailed data about failures. + +Changes +------- + +* testtools.utils has been renamed to testtools.compat. Importing + testtools.utils will now generate a deprecation warning. + +Improvements +------------ + +* Add machinery for Python 2 to create unicode tracebacks like those used by + Python 3. This means testtools no longer throws on encountering non-ascii + filenames, source lines, or exception strings when displaying test results. + Largely contributed by Martin[gz] with some tweaks from Robert Collins. + +* James Westby has supplied test discovery support using the Python 2.7 + TestRunner in testtools.run. This requires the 'discover' module. This + closes bug #250764. + +* Python 3.1 is now supported, thanks to Martin[gz] for a partial patch. + This fixes bug #592375. + +* TestCase.addCleanup has had its docstring corrected about when cleanups run. + +* TestCase.skip is now deprecated in favour of TestCase.skipTest, which is the + Python2.7 spelling for skip. This closes bug #560436. + +* Tests work on IronPython patch from Martin[gz] applied. + +* Thanks to a patch from James Westby testtools.matchers.Mismatch can now + supply a get_details method, which assertThat will query to provide + additional attachments. This can be used to provide additional detail + about the mismatch that doesn't suite being included in describe(). For + instance, if the match process was complex, a log of the process could be + included, permitting debugging. + +* testtools.testresults.real._StringException will now answer __str__ if its + value is unicode by encoding with UTF8, and vice versa to answer __unicode__. + This permits subunit decoded exceptions to contain unicode and still format + correctly. + +0.9.3 +~~~~~ + +More matchers, Python 2.4 support, faster test cloning by switching to copy +rather than deepcopy and better output when exceptions occur in cleanups are +the defining characteristics of this release. + +Improvements +------------ + +* New matcher "Annotate" that adds a simple string message to another matcher, + much like the option 'message' parameter to standard library assertFoo + methods. + +* New matchers "Not" and "MatchesAll". "Not" will invert another matcher, and + "MatchesAll" that needs a successful match for all of its arguments. + +* On Python 2.4, where types.FunctionType cannot be deepcopied, testtools will + now monkeypatch copy._deepcopy_dispatch using the same trivial patch that + added such support to Python 2.5. The monkey patch is triggered by the + absence of FunctionType from the dispatch dict rather than a version check. + Bug #498030. + +* On windows the test 'test_now_datetime_now' should now work reliably. + +* TestCase.getUniqueInteger and TestCase.getUniqueString now have docstrings. + +* TestCase.getUniqueString now takes an optional prefix parameter, so you can + now use it in circumstances that forbid strings with '.'s, and such like. + +* testtools.testcase.clone_test_with_new_id now uses copy.copy, rather than + copy.deepcopy. Tests that need a deeper copy should use the copy protocol to + control how they are copied. Bug #498869. + +* The backtrace test result output tests should now pass on windows and other + systems where os.sep is not '/'. + +* When a cleanUp or tearDown exception occurs, it is now accumulated as a new + traceback in the test details, rather than as a separate call to addError / + addException. This makes testtools work better with most TestResult objects + and fixes bug #335816. + + +0.9.2 +~~~~~ + +Python 3 support, more matchers and better consistency with Python 2.7 -- +you'd think that would be enough for a point release. Well, we here on the +testtools project think that you deserve more. + +We've added a hook so that user code can be called just-in-time whenever there +is an exception, and we've also factored out the "run" logic of test cases so +that new outcomes can be added without fiddling with the actual flow of logic. + +It might sound like small potatoes, but it's changes like these that will +bring about the end of test frameworks. + + +Improvements +------------ + +* A failure in setUp and tearDown now report as failures not as errors. + +* Cleanups now run after tearDown to be consistent with Python 2.7's cleanup + feature. + +* ExtendedToOriginalDecorator now passes unrecognised attributes through + to the decorated result object, permitting other extensions to the + TestCase -> TestResult protocol to work. + +* It is now possible to trigger code just-in-time after an exception causes + a test outcome such as failure or skip. See the testtools MANUAL or + ``pydoc testtools.TestCase.addOnException``. (bug #469092) + +* New matcher Equals which performs a simple equality test. + +* New matcher MatchesAny which looks for a match of any of its arguments. + +* TestCase no longer breaks if a TestSkipped exception is raised with no + parameters. + +* TestCase.run now clones test cases before they are run and runs the clone. + This reduces memory footprint in large test runs - state accumulated on + test objects during their setup and execution gets freed when test case + has finished running unless the TestResult object keeps a reference. + NOTE: As test cloning uses deepcopy, this can potentially interfere if + a test suite has shared state (such as the testscenarios or testresources + projects use). Use the __deepcopy__ hook to control the copying of such + objects so that the shared references stay shared. + +* Testtools now accepts contributions without copyright assignment under some + circumstances. See HACKING for details. + +* Testtools now provides a convenient way to run a test suite using the + testtools result object: python -m testtools.run testspec [testspec...]. + +* Testtools now works on Python 3, thanks to Benjamin Peterson. + +* Test execution now uses a separate class, testtools.RunTest to run single + tests. This can be customised and extended in a more consistent fashion than + the previous run method idiom. See pydoc for more information. + +* The test doubles that testtools itself uses are now available as part of + the testtools API in testtols.testresult.doubles. + +* TracebackContent now sets utf8 as the charset encoding, rather than not + setting one and encoding with the default encoder. + +* With python2.7 testtools.TestSkipped will be the unittest.case.SkipTest + exception class making skips compatible with code that manually raises the + standard library exception. (bug #490109) + +Changes +------- + +* TestCase.getUniqueInteger is now implemented using itertools.count. Thanks + to Benjamin Peterson for the patch. (bug #490111) + + +0.9.1 +~~~~~ + +The new matcher API introduced in 0.9.0 had a small flaw where the matchee +would be evaluated twice to get a description of the mismatch. This could lead +to bugs if the act of matching caused side effects to occur in the matchee. +Since having such side effects isn't desirable, we have changed the API now +before it has become widespread. + +Changes +------- + +* Matcher API changed to avoid evaluating matchee twice. Please consult + the API documentation. + +* TestCase.getUniqueString now uses the test id, not the test method name, + which works nicer with parameterised tests. + +Improvements +------------ + +* Python2.4 is now supported again. + + +0.9.0 +~~~~~ + +This release of testtools is perhaps the most interesting and exciting one +it's ever had. We've continued in bringing together the best practices of unit +testing from across a raft of different Python projects, but we've also +extended our mission to incorporating unit testing concepts from other +languages and from our own research, led by Robert Collins. + +We now support skipping and expected failures. We'll make sure that you +up-call setUp and tearDown, avoiding unexpected testing weirdnesses. We're +now compatible with Python 2.5, 2.6 and 2.7 unittest library. + +All in all, if you are serious about unit testing and want to get the best +thinking from the whole Python community, you should get this release. + +Improvements +------------ + +* A new TestResult API has been added for attaching details to test outcomes. + This API is currently experimental, but is being prepared with the intent + of becoming an upstream Python API. For more details see pydoc + testtools.TestResult and the TestCase addDetail / getDetails methods. + +* assertThat has been added to TestCase. This new assertion supports + a hamcrest-inspired matching protocol. See pydoc testtools.Matcher for + details about writing matchers, and testtools.matchers for the included + matchers. See http://code.google.com/p/hamcrest/. + +* Compatible with Python 2.6 and Python 2.7 + +* Failing to upcall in setUp or tearDown will now cause a test failure. + While the base methods do nothing, failing to upcall is usually a problem + in deeper hierarchies, and checking that the root method is called is a + simple way to catch this common bug. + +* New TestResult decorator ExtendedToOriginalDecorator which handles + downgrading extended API calls like addSkip to older result objects that + do not support them. This is used internally to make testtools simpler but + can also be used to simplify other code built on or for use with testtools. + +* New TextTestResult supporting the extended APIs that testtools provides. + +* Nose will no longer find 'runTest' tests in classes derived from + testtools.testcase.TestCase (bug #312257). + +* Supports the Python 2.7/3.1 addUnexpectedSuccess and addExpectedFailure + TestResult methods, with a support function 'knownFailure' to let tests + trigger these outcomes. + +* When using the skip feature with TestResult objects that do not support it + a test success will now be reported. Previously an error was reported but + production experience has shown that this is too disruptive for projects that + are using skips: they cannot get a clean run on down-level result objects. + + +.. _testtools: http://pypi.python.org/pypi/testtools diff --git a/test/3rdparty/testtools-0.9.12/PKG-INFO b/test/3rdparty/testtools-0.9.12/PKG-INFO new file mode 100644 index 00000000000..7dbace779b0 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/PKG-INFO @@ -0,0 +1,107 @@ +Metadata-Version: 1.0 +Name: testtools +Version: 0.9.12 +Summary: Extensions to the Python standard library unit testing framework +Home-page: https://launchpad.net/testtools +Author: Jonathan M. Lange +Author-email: jml+testtools@mumak.net +License: UNKNOWN +Description: ====================================== + testtools: tasteful testing for Python + ====================================== + + testtools is a set of extensions to the Python standard library's unit testing + framework. These extensions have been derived from many years of experience + with unit testing in Python and come from many different sources. testtools + also ports recent unittest changes all the way back to Python 2.4. + + What better way to start than with a contrived code snippet?:: + + from testtools import TestCase + from testtools.content import Content + from testtools.content_type import UTF8_TEXT + from testtools.matchers import Equals + + from myproject import SillySquareServer + + class TestSillySquareServer(TestCase): + + def setUp(self): + super(TestSillySquare, self).setUp() + self.server = self.useFixture(SillySquareServer()) + self.addCleanup(self.attach_log_file) + + def attach_log_file(self): + self.addDetail( + 'log-file', + Content(UTF8_TEXT + lambda: open(self.server.logfile, 'r').readlines())) + + def test_server_is_cool(self): + self.assertThat(self.server.temperature, Equals("cool")) + + def test_square(self): + self.assertThat(self.server.silly_square_of(7), Equals(49)) + + + Why use testtools? + ================== + + Better assertion methods + ------------------------ + + The standard assertion methods that come with unittest aren't as helpful as + they could be, and there aren't quite enough of them. testtools adds + ``assertIn``, ``assertIs``, ``assertIsInstance`` and their negatives. + + + Matchers: better than assertion methods + --------------------------------------- + + Of course, in any serious project you want to be able to have assertions that + are specific to that project and the particular problem that it is addressing. + Rather than forcing you to define your own assertion methods and maintain your + own inheritance hierarchy of ``TestCase`` classes, testtools lets you write + your own "matchers", custom predicates that can be plugged into a unit test:: + + def test_response_has_bold(self): + # The response has bold text. + response = self.server.getResponse() + self.assertThat(response, HTMLContains(Tag('bold', 'b'))) + + + More debugging info, when you need it + -------------------------------------- + + testtools makes it easy to add arbitrary data to your test result. If you + want to know what's in a log file when a test fails, or what the load was on + the computer when a test started, or what files were open, you can add that + information with ``TestCase.addDetail``, and it will appear in the test + results if that test fails. + + + Extend unittest, but stay compatible and re-usable + -------------------------------------------------- + + testtools goes to great lengths to allow serious test authors and test + *framework* authors to do whatever they like with their tests and their + extensions while staying compatible with the standard library's unittest. + + testtools has completely parametrized how exceptions raised in tests are + mapped to ``TestResult`` methods and how tests are actually executed (ever + wanted ``tearDown`` to be called regardless of whether ``setUp`` succeeds?) + + It also provides many simple but handy utilities, like the ability to clone a + test, a ``MultiTestResult`` object that lets many result objects get the + results from one test suite, adapters to bring legacy ``TestResult`` objects + into our new golden age. + + + Cross-Python compatibility + -------------------------- + + testtools gives you the very latest in unit testing technology in a way that + will work with Python 2.4, 2.5, 2.6, 2.7 and 3.1. + +Platform: UNKNOWN +Classifier: License :: OSI Approved :: MIT License diff --git a/test/3rdparty/testtools-0.9.12/README b/test/3rdparty/testtools-0.9.12/README new file mode 100644 index 00000000000..78397de85b6 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/README @@ -0,0 +1,86 @@ +========= +testtools +========= + +testtools is a set of extensions to the Python standard library's unit testing +framework. + +These extensions have been derived from years of experience with unit testing +in Python and come from many different sources. + + +Documentation +------------- + +If you would like to learn more about testtools, consult our documentation in +the 'doc/' directory. You might like to start at 'doc/overview.rst' or +'doc/for-test-authors.rst'. + + +Licensing +--------- + +This project is distributed under the MIT license and copyright is owned by +Jonathan M. Lange and the testtools authors. See LICENSE for details. + +Some code in 'testtools/run.py' is taken from Python's unittest module, and is +copyright Steve Purcell and the Python Software Foundation, it is distributed +under the same license as Python, see LICENSE for details. + + +Required Dependencies +--------------------- + + * Python 2.4+ or 3.0+ + + +Optional Dependencies +--------------------- + +If you would like to use our undocumented, unsupported Twisted support, then +you will need Twisted. + +If you want to use ``fixtures`` then you can either install fixtures (e.g. from +https://launchpad.net/python-fixtures or http://pypi.python.org/pypi/fixtures) +or alternatively just make sure your fixture objects obey the same protocol. + + +Bug reports and patches +----------------------- + +Please report bugs using Launchpad at <https://bugs.launchpad.net/testtools>. +Patches can also be submitted via Launchpad, or mailed to the author. You can +mail the author directly at jml@mumak.net. + +There's no mailing list for this project yet, however the testing-in-python +mailing list may be a useful resource: + + * Address: testing-in-python@lists.idyll.org + * Subscription link: http://lists.idyll.org/listinfo/testing-in-python + + +History +------- + +testtools used to be called 'pyunit3k'. The name was changed to avoid +conflating the library with the Python 3.0 release (commonly referred to as +'py3k'). + + +Thanks +------ + + * Canonical Ltd + * Bazaar + * Twisted Matrix Labs + * Robert Collins + * Andrew Bennetts + * Benjamin Peterson + * Jamu Kakar + * James Westby + * Martin [gz] + * Michael Hudson-Doyle + * Aaron Bentley + * Christian Kampka + * Gavin Panella + * Martin Pool diff --git a/test/3rdparty/testtools-0.9.12/doc/Makefile b/test/3rdparty/testtools-0.9.12/doc/Makefile new file mode 100644 index 00000000000..b5d07af57f2 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/doc/Makefile @@ -0,0 +1,89 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +help: + @echo "Please use \`make <target>' where <target> is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/testtools.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/testtools.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/test/3rdparty/testtools-0.9.12/doc/_static/placeholder.txt b/test/3rdparty/testtools-0.9.12/doc/_static/placeholder.txt new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/doc/_static/placeholder.txt diff --git a/test/3rdparty/testtools-0.9.12/doc/_templates/placeholder.txt b/test/3rdparty/testtools-0.9.12/doc/_templates/placeholder.txt new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/doc/_templates/placeholder.txt diff --git a/test/3rdparty/testtools-0.9.12/doc/conf.py b/test/3rdparty/testtools-0.9.12/doc/conf.py new file mode 100644 index 00000000000..de5fdd4224e --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/doc/conf.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- +# +# testtools documentation build configuration file, created by +# sphinx-quickstart on Sun Nov 28 13:45:40 2010. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.append(os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'testtools' +copyright = u'2010, The testtools authors' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = 'VERSION' +# The full version, including alpha/beta/rc tags. +release = 'VERSION' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# "<project> v<release> documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a <link> tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'testtoolsdoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'testtools.tex', u'testtools Documentation', + u'The testtools authors', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True diff --git a/test/3rdparty/testtools-0.9.12/doc/for-framework-folk.rst b/test/3rdparty/testtools-0.9.12/doc/for-framework-folk.rst new file mode 100644 index 00000000000..a4b20f64cac --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/doc/for-framework-folk.rst @@ -0,0 +1,219 @@ +============================ +testtools for framework folk +============================ + +Introduction +============ + +In addition to having many features :doc:`for test authors +<for-test-authors>`, testtools also has many bits and pieces that are useful +for folk who write testing frameworks. + +If you are the author of a test runner, are working on a very large +unit-tested project, are trying to get one testing framework to play nicely +with another or are hacking away at getting your test suite to run in parallel +over a heterogenous cluster of machines, this guide is for you. + +This manual is a summary. You can get details by consulting the `testtools +API docs`_. + + +Extensions to TestCase +====================== + +Custom exception handling +------------------------- + +testtools provides a way to control how test exceptions are handled. To do +this, add a new exception to ``self.exception_handlers`` on a +``testtools.TestCase``. For example:: + + >>> self.exception_handlers.insert(-1, (ExceptionClass, handler)). + +Having done this, if any of ``setUp``, ``tearDown``, or the test method raise +``ExceptionClass``, ``handler`` will be called with the test case, test result +and the raised exception. + +Use this if you want to add a new kind of test result, that is, if you think +that ``addError``, ``addFailure`` and so forth are not enough for your needs. + + +Controlling test execution +-------------------------- + +If you want to control more than just how exceptions are raised, you can +provide a custom ``RunTest`` to a ``TestCase``. The ``RunTest`` object can +change everything about how the test executes. + +To work with ``testtools.TestCase``, a ``RunTest`` must have a factory that +takes a test and an optional list of exception handlers. Instances returned +by the factory must have a ``run()`` method that takes an optional ``TestResult`` +object. + +The default is ``testtools.runtest.RunTest``, which calls ``setUp``, the test +method, ``tearDown`` and clean ups (see :ref:`addCleanup`) in the normal, vanilla +way that Python's standard unittest_ does. + +To specify a ``RunTest`` for all the tests in a ``TestCase`` class, do something +like this:: + + class SomeTests(TestCase): + run_tests_with = CustomRunTestFactory + +To specify a ``RunTest`` for a specific test in a ``TestCase`` class, do:: + + class SomeTests(TestCase): + @run_test_with(CustomRunTestFactory, extra_arg=42, foo='whatever') + def test_something(self): + pass + +In addition, either of these can be overridden by passing a factory in to the +``TestCase`` constructor with the optional ``runTest`` argument. + + +Test renaming +------------- + +``testtools.clone_test_with_new_id`` is a function to copy a test case +instance to one with a new name. This is helpful for implementing test +parameterization. + + +Test placeholders +================= + +Sometimes, it's useful to be able to add things to a test suite that are not +actually tests. For example, you might wish to represents import failures +that occur during test discovery as tests, so that your test result object +doesn't have to do special work to handle them nicely. + +testtools provides two such objects, called "placeholders": ``PlaceHolder`` +and ``ErrorHolder``. ``PlaceHolder`` takes a test id and an optional +description. When it's run, it succeeds. ``ErrorHolder`` takes a test id, +and error and an optional short description. When it's run, it reports that +error. + +These placeholders are best used to log events that occur outside the test +suite proper, but are still very relevant to its results. + +e.g.:: + + >>> suite = TestSuite() + >>> suite.add(PlaceHolder('I record an event')) + >>> suite.run(TextTestResult(verbose=True)) + I record an event [OK] + + +Extensions to TestResult +======================== + +TestResult.addSkip +------------------ + +This method is called on result objects when a test skips. The +``testtools.TestResult`` class records skips in its ``skip_reasons`` instance +dict. The can be reported on in much the same way as succesful tests. + + +TestResult.time +--------------- + +This method controls the time used by a ``TestResult``, permitting accurate +timing of test results gathered on different machines or in different threads. +See pydoc testtools.TestResult.time for more details. + + +ThreadsafeForwardingResult +-------------------------- + +A ``TestResult`` which forwards activity to another test result, but synchronises +on a semaphore to ensure that all the activity for a single test arrives in a +batch. This allows simple TestResults which do not expect concurrent test +reporting to be fed the activity from multiple test threads, or processes. + +Note that when you provide multiple errors for a single test, the target sees +each error as a distinct complete test. + + +MultiTestResult +--------------- + +A test result that dispatches its events to many test results. Use this +to combine multiple different test result objects into one test result object +that can be passed to ``TestCase.run()`` or similar. For example:: + + a = TestResult() + b = TestResult() + combined = MultiTestResult(a, b) + combined.startTestRun() # Calls a.startTestRun() and b.startTestRun() + +Each of the methods on ``MultiTestResult`` will return a tuple of whatever the +component test results return. + + +TextTestResult +-------------- + +A ``TestResult`` that provides a text UI very similar to the Python standard +library UI. Key differences are that its supports the extended outcomes and +details API, and is completely encapsulated into the result object, permitting +it to be used without a 'TestRunner' object. Not all the Python 2.7 outcomes +are displayed (yet). It is also a 'quiet' result with no dots or verbose mode. +These limitations will be corrected soon. + + +ExtendedToOriginalDecorator +--------------------------- + +Adapts legacy ``TestResult`` objects, such as those found in older Pythons, to +meet the testtools ``TestResult`` API. + + +Test Doubles +------------ + +In testtools.testresult.doubles there are three test doubles that testtools +uses for its own testing: ``Python26TestResult``, ``Python27TestResult``, +``ExtendedTestResult``. These TestResult objects implement a single variation of +the TestResult API each, and log activity to a list ``self._events``. These are +made available for the convenience of people writing their own extensions. + + +startTestRun and stopTestRun +---------------------------- + +Python 2.7 added hooks ``startTestRun`` and ``stopTestRun`` which are called +before and after the entire test run. 'stopTestRun' is particularly useful for +test results that wish to produce summary output. + +``testtools.TestResult`` provides default ``startTestRun`` and ``stopTestRun`` +methods, and he default testtools runner will call these methods +appropriately. + +The ``startTestRun`` method will reset any errors, failures and so forth on +the result, making the result object look as if no tests have been run. + + +Extensions to TestSuite +======================= + +ConcurrentTestSuite +------------------- + +A TestSuite for parallel testing. This is used in conjuction with a helper that +runs a single suite in some parallel fashion (for instance, forking, handing +off to a subprocess, to a compute cloud, or simple threads). +ConcurrentTestSuite uses the helper to get a number of separate runnable +objects with a run(result), runs them all in threads using the +ThreadsafeForwardingResult to coalesce their activity. + +FixtureSuite +------------ + +A test suite that sets up a fixture_ before running any tests, and then tears +it down after all of the tests are run. The fixture is *not* made available to +any of the tests. + +.. _`testtools API docs`: http://mumak.net/testtools/apidocs/ +.. _unittest: http://docs.python.org/library/unittest.html +.. _fixture: http://pypi.python.org/pypi/fixtures diff --git a/test/3rdparty/testtools-0.9.12/doc/for-test-authors.rst b/test/3rdparty/testtools-0.9.12/doc/for-test-authors.rst new file mode 100644 index 00000000000..04c4be6b0db --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/doc/for-test-authors.rst @@ -0,0 +1,1204 @@ +========================== +testtools for test authors +========================== + +If you are writing tests for a Python project and you (rather wisely) want to +use testtools to do so, this is the manual for you. + +We assume that you already know Python and that you know something about +automated testing already. + +If you are a test author of an unusually large or unusually unusual test +suite, you might be interested in :doc:`for-framework-folk`. + +You might also be interested in the `testtools API docs`_. + + +Introduction +============ + +testtools is a set of extensions to Python's standard unittest module. +Writing tests with testtools is very much like writing tests with standard +Python, or with Twisted's "trial_", or nose_, except a little bit easier and +more enjoyable. + +Below, we'll try to give some examples of how to use testtools in its most +basic way, as well as a sort of feature-by-feature breakdown of the cool bits +that you could easily miss. + + +The basics +========== + +Here's what a basic testtools unit tests look like:: + + from testtools import TestCase + from myproject import silly + + class TestSillySquare(TestCase): + """Tests for silly square function.""" + + def test_square(self): + # 'square' takes a number and multiplies it by itself. + result = silly.square(7) + self.assertEqual(result, 49) + + def test_square_bad_input(self): + # 'square' raises a TypeError if it's given bad input, say a + # string. + self.assertRaises(TypeError, silly.square, "orange") + + +Here you have a class that inherits from ``testtools.TestCase`` and bundles +together a bunch of related tests. The tests themselves are methods on that +class that begin with ``test_``. + +Running your tests +------------------ + +You can run these tests in many ways. testtools provides a very basic +mechanism for doing so:: + + $ python -m testtools.run exampletest + Tests running... + Ran 2 tests in 0.000s + + OK + +where 'exampletest' is a module that contains unit tests. By default, +``testtools.run`` will *not* recursively search the module or package for unit +tests. To do this, you will need to either have the discover_ module +installed or have Python 2.7 or later, and then run:: + + $ python -m testtools.run discover packagecontainingtests + +For more information see the Python 2.7 unittest documentation, or:: + + python -m testtools.run --help + +As your testing needs grow and evolve, you will probably want to use a more +sophisticated test runner. There are many of these for Python, and almost all +of them will happily run testtools tests. In particular: + +* testrepository_ +* Trial_ +* nose_ +* unittest2_ +* `zope.testrunner`_ (aka zope.testing) + +From now on, we'll assume that you know how to run your tests. + +Running test with Distutils +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you are using Distutils_ to build your Python project, you can use the testtools +Distutils_ command to integrate testtools into your Distutils_ workflow:: + + from distutils.core import setup + from testtools import TestCommand + setup(name='foo', + version='1.0', + py_modules=['foo'], + cmdclass={'test': TestCommand} + ) + +You can then run:: + + $ python setup.py test -m exampletest + Tests running... + Ran 2 tests in 0.000s + + OK + +For more information about the capabilities of the `TestCommand` command see:: + + $ python setup.py test --help + +You can use the `setup configuration`_ to specify the default behavior of the +`TestCommand` command. + +Assertions +========== + +The core of automated testing is making assertions about the way things are, +and getting a nice, helpful, informative error message when things are not as +they ought to be. + +All of the assertions that you can find in Python standard unittest_ can be +found in testtools (remember, testtools extends unittest). testtools changes +the behaviour of some of those assertions slightly and adds some new +assertions that you will almost certainly find useful. + + +Improved assertRaises +--------------------- + +``TestCase.assertRaises`` returns the caught exception. This is useful for +asserting more things about the exception than just the type:: + + def test_square_bad_input(self): + # 'square' raises a TypeError if it's given bad input, say a + # string. + e = self.assertRaises(TypeError, silly.square, "orange") + self.assertEqual("orange", e.bad_value) + self.assertEqual("Cannot square 'orange', not a number.", str(e)) + +Note that this is incompatible with the ``assertRaises`` in unittest2 and +Python2.7. + + +ExpectedException +----------------- + +If you are using a version of Python that supports the ``with`` context +manager syntax, you might prefer to use that syntax to ensure that code raises +particular errors. ``ExpectedException`` does just that. For example:: + + def test_square_root_bad_input_2(self): + # 'square' raises a TypeError if it's given bad input. + with ExpectedException(TypeError, "Cannot square.*"): + silly.square('orange') + +The first argument to ``ExpectedException`` is the type of exception you +expect to see raised. The second argument is optional, and can be either a +regular expression or a matcher. If it is a regular expression, the ``str()`` +of the raised exception must match the regular expression. If it is a matcher, +then the raised exception object must match it. + + +assertIn, assertNotIn +--------------------- + +These two assertions check whether a value is in a sequence and whether a +value is not in a sequence. They are "assert" versions of the ``in`` and +``not in`` operators. For example:: + + def test_assert_in_example(self): + self.assertIn('a', 'cat') + self.assertNotIn('o', 'cat') + self.assertIn(5, list_of_primes_under_ten) + self.assertNotIn(12, list_of_primes_under_ten) + + +assertIs, assertIsNot +--------------------- + +These two assertions check whether values are identical to one another. This +is sometimes useful when you want to test something more strict than mere +equality. For example:: + + def test_assert_is_example(self): + foo = [None] + foo_alias = foo + bar = [None] + self.assertIs(foo, foo_alias) + self.assertIsNot(foo, bar) + self.assertEqual(foo, bar) # They are equal, but not identical + + +assertIsInstance +---------------- + +As much as we love duck-typing and polymorphism, sometimes you need to check +whether or not a value is of a given type. This method does that. For +example:: + + def test_assert_is_instance_example(self): + now = datetime.now() + self.assertIsInstance(now, datetime) + +Note that there is no ``assertIsNotInstance`` in testtools currently. + + +expectFailure +------------- + +Sometimes it's useful to write tests that fail. For example, you might want +to turn a bug report into a unit test, but you don't know how to fix the bug +yet. Or perhaps you want to document a known, temporary deficiency in a +dependency. + +testtools gives you the ``TestCase.expectFailure`` to help with this. You use +it to say that you expect this assertion to fail. When the test runs and the +assertion fails, testtools will report it as an "expected failure". + +Here's an example:: + + def test_expect_failure_example(self): + self.expectFailure( + "cats should be dogs", self.assertEqual, 'cats', 'dogs') + +As long as 'cats' is not equal to 'dogs', the test will be reported as an +expected failure. + +If ever by some miracle 'cats' becomes 'dogs', then testtools will report an +"unexpected success". Unlike standard unittest, testtools treats this as +something that fails the test suite, like an error or a failure. + + +Matchers +======== + +The built-in assertion methods are very useful, they are the bread and butter +of writing tests. However, soon enough you will probably want to write your +own assertions. Perhaps there are domain specific things that you want to +check (e.g. assert that two widgets are aligned parallel to the flux grid), or +perhaps you want to check something that could almost but not quite be found +in some other standard library (e.g. assert that two paths point to the same +file). + +When you are in such situations, you could either make a base class for your +project that inherits from ``testtools.TestCase`` and make sure that all of +your tests derive from that, *or* you could use the testtools ``Matcher`` +system. + + +Using Matchers +-------------- + +Here's a really basic example using stock matchers found in testtools:: + + import testtools + from testtools.matchers import Equals + + class TestSquare(TestCase): + def test_square(self): + result = square(7) + self.assertThat(result, Equals(49)) + +The line ``self.assertThat(result, Equals(49))`` is equivalent to +``self.assertEqual(result, 49)`` and means "assert that ``result`` equals 49". +The difference is that ``assertThat`` is a more general method that takes some +kind of observed value (in this case, ``result``) and any matcher object +(here, ``Equals(49)``). + +The matcher object could be absolutely anything that implements the Matcher +protocol. This means that you can make more complex matchers by combining +existing ones:: + + def test_square_silly(self): + result = square(7) + self.assertThat(result, Not(Equals(50))) + +Which is roughly equivalent to:: + + def test_square_silly(self): + result = square(7) + self.assertNotEqual(result, 50) + + +Stock matchers +-------------- + +testtools comes with many matchers built in. They can all be found in and +imported from the ``testtools.matchers`` module. + +Equals +~~~~~~ + +Matches if two items are equal. For example:: + + def test_equals_example(self): + self.assertThat([42], Equals([42])) + + +Is +~~~ + +Matches if two items are identical. For example:: + + def test_is_example(self): + foo = object() + self.assertThat(foo, Is(foo)) + + +IsInstance +~~~~~~~~~~ + +Adapts isinstance() to use as a matcher. For example:: + + def test_isinstance_example(self): + class MyClass:pass + self.assertThat(MyClass(), IsInstance(MyClass)) + self.assertThat(MyClass(), IsInstance(MyClass, str)) + + +The raises helper +~~~~~~~~~~~~~~~~~ + +Matches if a callable raises a particular type of exception. For example:: + + def test_raises_example(self): + self.assertThat(lambda: 1/0, raises(ZeroDivisionError)) + +This is actually a convenience function that combines two other matchers: +Raises_ and MatchesException_. + + +DocTestMatches +~~~~~~~~~~~~~~ + +Matches a string as if it were the output of a doctest_ example. Very useful +for making assertions about large chunks of text. For example:: + + import doctest + + def test_doctest_example(self): + output = "Colorless green ideas" + self.assertThat( + output, + DocTestMatches("Colorless ... ideas", doctest.ELLIPSIS)) + +We highly recommend using the following flags:: + + doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF + + +GreaterThan +~~~~~~~~~~~ + +Matches if the given thing is greater than the thing in the matcher. For +example:: + + def test_greater_than_example(self): + self.assertThat(3, GreaterThan(2)) + + +LessThan +~~~~~~~~ + +Matches if the given thing is less than the thing in the matcher. For +example:: + + def test_less_than_example(self): + self.assertThat(2, LessThan(3)) + + +StartsWith, EndsWith +~~~~~~~~~~~~~~~~~~~~ + +These matchers check to see if a string starts with or ends with a particular +substring. For example:: + + def test_starts_and_ends_with_example(self): + self.assertThat('underground', StartsWith('und')) + self.assertThat('underground', EndsWith('und')) + + +Contains +~~~~~~~~ + +This matcher checks to see if the given thing contains the thing in the +matcher. For example:: + + def test_contains_example(self): + self.assertThat('abc', Contains('b')) + + +MatchesException +~~~~~~~~~~~~~~~~ + +Matches an exc_info tuple if the exception is of the correct type. For +example:: + + def test_matches_exception_example(self): + try: + raise RuntimeError('foo') + except RuntimeError: + exc_info = sys.exc_info() + self.assertThat(exc_info, MatchesException(RuntimeError)) + self.assertThat(exc_info, MatchesException(RuntimeError('bar')) + +Most of the time, you will want to uses `The raises helper`_ instead. + + +NotEquals +~~~~~~~~~ + +Matches if something is not equal to something else. Note that this is subtly +different to ``Not(Equals(x))``. ``NotEquals(x)`` will match if ``y != x``, +``Not(Equals(x))`` will match if ``not y == x``. + +You only need to worry about this distinction if you are testing code that +relies on badly written overloaded equality operators. + + +KeysEqual +~~~~~~~~~ + +Matches if the keys of one dict are equal to the keys of another dict. For +example:: + + def test_keys_equal(self): + x = {'a': 1, 'b': 2} + y = {'a': 2, 'b': 3} + self.assertThat(a, KeysEqual(b)) + + +MatchesRegex +~~~~~~~~~~~~ + +Matches a string against a regular expression, which is a wonderful thing to +be able to do, if you think about it:: + + def test_matches_regex_example(self): + self.assertThat('foo', MatchesRegex('fo+')) + + +Combining matchers +------------------ + +One great thing about matchers is that you can readily combine existing +matchers to get variations on their behaviour or to quickly build more complex +assertions. + +Below are a few of the combining matchers that come with testtools. + + +Not +~~~ + +Negates another matcher. For example:: + + def test_not_example(self): + self.assertThat([42], Not(Equals("potato"))) + self.assertThat([42], Not(Is([42]))) + +If you find yourself using ``Not`` frequently, you may wish to create a custom +matcher for it. For example:: + + IsNot = lambda x: Not(Is(x)) + + def test_not_example_2(self): + self.assertThat([42], IsNot([42])) + + +Annotate +~~~~~~~~ + +Used to add custom notes to a matcher. For example:: + + def test_annotate_example(self): + result = 43 + self.assertThat( + result, Annotate("Not the answer to the Question!", Equals(42)) + +Since the annotation is only ever displayed when there is a mismatch +(e.g. when ``result`` does not equal 42), it's a good idea to phrase the note +negatively, so that it describes what a mismatch actually means. + +As with Not_, you may wish to create a custom matcher that describes a +common operation. For example:: + + PoliticallyEquals = lambda x: Annotate("Death to the aristos!", Equals(x)) + + def test_annotate_example_2(self): + self.assertThat("orange", PoliticallyEquals("yellow")) + +You can have assertThat perform the annotation for you as a convenience:: + + def test_annotate_example_3(self): + self.assertThat("orange", Equals("yellow"), "Death to the aristos!") + + +AfterPreprocessing +~~~~~~~~~~~~~~~~~~ + +Used to make a matcher that applies a function to the matched object before +matching. This can be used to aid in creating trivial matchers as functions, for +example:: + + def test_after_preprocessing_example(self): + def HasFileContent(content): + def _read(path): + return open(path).read() + return AfterPreprocessing(_read, Equals(content)) + self.assertThat('/tmp/foo.txt', PathHasFileContent("Hello world!")) + + +MatchesAll +~~~~~~~~~~ + +Combines many matchers to make a new matcher. The new matcher will only match +things that match every single one of the component matchers. + +It's much easier to understand in Python than in English:: + + def test_matches_all_example(self): + has_und_at_both_ends = MatchesAll(StartsWith("und"), EndsWith("und")) + # This will succeed. + self.assertThat("underground", has_und_at_both_ends) + # This will fail. + self.assertThat("found", has_und_at_both_ends) + # So will this. + self.assertThat("undead", has_und_at_both_ends) + +At this point some people ask themselves, "why bother doing this at all? why +not just have two separate assertions?". It's a good question. + +The first reason is that when a ``MatchesAll`` gets a mismatch, the error will +include information about all of the bits that mismatched. When you have two +separate assertions, as below:: + + def test_two_separate_assertions(self): + self.assertThat("foo", StartsWith("und")) + self.assertThat("foo", EndsWith("und")) + +Then you get absolutely no information from the second assertion if the first +assertion fails. Tests are largely there to help you debug code, so having +more information in error messages is a big help. + +The second reason is that it is sometimes useful to give a name to a set of +matchers. ``has_und_at_both_ends`` is a bit contrived, of course, but it is +clear. + + +MatchesAny +~~~~~~~~~~ + +Like MatchesAll_, ``MatchesAny`` combines many matchers to make a new +matcher. The difference is that the new matchers will match a thing if it +matches *any* of the component matchers. + +For example:: + + def test_matches_any_example(self): + self.assertThat(42, MatchesAny(Equals(5), Not(Equals(6)))) + + +AllMatch +~~~~~~~~ + +Matches many values against a single matcher. Can be used to make sure that +many things all meet the same condition:: + + def test_all_match_example(self): + self.assertThat([2, 3, 5, 7], AllMatch(LessThan(10))) + +If the match fails, then all of the values that fail to match will be included +in the error message. + +In some ways, this is the converse of MatchesAll_. + + +MatchesListwise +~~~~~~~~~~~~~~~ + +Where ``MatchesAny`` and ``MatchesAll`` combine many matchers to match a +single value, ``MatchesListwise`` combines many matches to match many values. + +For example:: + + def test_matches_listwise_example(self): + self.assertThat( + [1, 2, 3], MatchesListwise(map(Equals, [1, 2, 3]))) + +This is useful for writing custom, domain-specific matchers. + + +MatchesSetwise +~~~~~~~~~~~~~~ + +Combines many matchers to match many values, without regard to their order. + +Here's an example:: + + def test_matches_setwise_example(self): + self.assertThat( + [1, 2, 3], MatchesSetwise(Equals(2), Equals(3), Equals(1))) + +Much like ``MatchesListwise``, best used for writing custom, domain-specific +matchers. + + +MatchesStructure +~~~~~~~~~~~~~~~~ + +Creates a matcher that matches certain attributes of an object against a +pre-defined set of matchers. + +It's much easier to understand in Python than in English:: + + def test_matches_structure_example(self): + foo = Foo() + foo.a = 1 + foo.b = 2 + matcher = MatchesStructure(a=Equals(1), b=Equals(2)) + self.assertThat(foo, matcher) + +Since all of the matchers used were ``Equals``, we could also write this using +the ``byEquality`` helper:: + + def test_matches_structure_example(self): + foo = Foo() + foo.a = 1 + foo.b = 2 + matcher = MatchesStructure.byEquality(a=1, b=2) + self.assertThat(foo, matcher) + +``MatchesStructure.fromExample`` takes an object and a list of attributes and +creates a ``MatchesStructure`` matcher where each attribute of the matched +object must equal each attribute of the example object. For example:: + + matcher = MatchesStructure.fromExample(foo, 'a', 'b') + +is exactly equivalent to ``matcher`` in the previous example. + + +Raises +~~~~~~ + +Takes whatever the callable raises as an exc_info tuple and matches it against +whatever matcher it was given. For example, if you want to assert that a +callable raises an exception of a given type:: + + def test_raises_example(self): + self.assertThat( + lambda: 1/0, Raises(MatchesException(ZeroDivisionError))) + +Although note that this could also be written as:: + + def test_raises_example_convenient(self): + self.assertThat(lambda: 1/0, raises(ZeroDivisionError)) + +See also MatchesException_ and `the raises helper`_ + + +Writing your own matchers +------------------------- + +Combining matchers is fun and can get you a very long way indeed, but +sometimes you will have to write your own. Here's how. + +You need to make two closely-linked objects: a ``Matcher`` and a +``Mismatch``. The ``Matcher`` knows how to actually make the comparison, and +the ``Mismatch`` knows how to describe a failure to match. + +Here's an example matcher:: + + class IsDivisibleBy(object): + """Match if a number is divisible by another number.""" + def __init__(self, divider): + self.divider = divider + def __str__(self): + return 'IsDivisibleBy(%s)' % (self.divider,) + def match(self, actual): + remainder = actual % self.divider + if remainder != 0: + return IsDivisibleByMismatch(actual, self.divider, remainder) + else: + return None + +The matcher has a constructor that takes parameters that describe what you +actually *expect*, in this case a number that other numbers ought to be +divisible by. It has a ``__str__`` method, the result of which is displayed +on failure by ``assertThat`` and a ``match`` method that does the actual +matching. + +``match`` takes something to match against, here ``actual``, and decides +whether or not it matches. If it does match, then ``match`` must return +``None``. If it does *not* match, then ``match`` must return a ``Mismatch`` +object. ``assertThat`` will call ``match`` and then fail the test if it +returns a non-None value. For example:: + + def test_is_divisible_by_example(self): + # This succeeds, since IsDivisibleBy(5).match(10) returns None. + self.assertThat(10, IsDivisbleBy(5)) + # This fails, since IsDivisibleBy(7).match(10) returns a mismatch. + self.assertThat(10, IsDivisbleBy(7)) + +The mismatch is responsible for what sort of error message the failing test +generates. Here's an example mismatch:: + + class IsDivisibleByMismatch(object): + def __init__(self, number, divider, remainder): + self.number = number + self.divider = divider + self.remainder = remainder + + def describe(self): + return "%r is not divisible by %r, %r remains" % ( + self.number, self.divider, self.remainder) + + def get_details(self): + return {} + +The mismatch takes information about the mismatch, and provides a ``describe`` +method that assembles all of that into a nice error message for end users. +You can use the ``get_details`` method to provide extra, arbitrary data with +the mismatch (e.g. the contents of a log file). Most of the time it's fine to +just return an empty dict. You can read more about Details_ elsewhere in this +document. + +Sometimes you don't need to create a custom mismatch class. In particular, if +you don't care *when* the description is calculated, then you can just do that +in the Matcher itself like this:: + + def match(self, actual): + remainder = actual % self.divider + if remainder != 0: + return Mismatch( + "%r is not divisible by %r, %r remains" % ( + actual, self.divider, remainder)) + else: + return None + +When writing a ``describe`` method or constructing a ``Mismatch`` object the +code should ensure it only emits printable unicode. As this output must be +combined with other text and forwarded for presentation, letting through +non-ascii bytes of ambiguous encoding or control characters could throw an +exception or mangle the display. In most cases simply avoiding the ``%s`` +format specifier and using ``%r`` instead will be enough. For examples of +more complex formatting see the ``testtools.matchers`` implementatons. + + +Details +======= + +As we may have mentioned once or twice already, one of the great benefits of +automated tests is that they help find, isolate and debug errors in your +system. + +Frequently however, the information provided by a mere assertion failure is +not enough. It's often useful to have other information: the contents of log +files; what queries were run; benchmark timing information; what state certain +subsystem components are in and so forth. + +testtools calls all of these things "details" and provides a single, powerful +mechanism for including this information in your test run. + +Here's an example of how to add them:: + + from testtools import TestCase + from testtools.content import text_content + + class TestSomething(TestCase): + + def test_thingy(self): + self.addDetail('arbitrary-color-name', text_content("blue")) + 1 / 0 # Gratuitous error! + +A detail an arbitrary piece of content given a name that's unique within the +test. Here the name is ``arbitrary-color-name`` and the content is +``text_content("blue")``. The name can be any text string, and the content +can be any ``testtools.content.Content`` object. + +When the test runs, testtools will show you something like this:: + + ====================================================================== + ERROR: exampletest.TestSomething.test_thingy + ---------------------------------------------------------------------- + arbitrary-color-name: {{{blue}}} + + Traceback (most recent call last): + File "exampletest.py", line 8, in test_thingy + 1 / 0 # Gratuitous error! + ZeroDivisionError: integer division or modulo by zero + ------------ + Ran 1 test in 0.030s + +As you can see, the detail is included as an attachment, here saying +that our arbitrary-color-name is "blue". + + +Content +------- + +For the actual content of details, testtools uses its own MIME-based Content +object. This allows you to attach any information that you could possibly +conceive of to a test, and allows testtools to use or serialize that +information. + +The basic ``testtools.content.Content`` object is constructed from a +``testtools.content.ContentType`` and a nullary callable that must return an +iterator of chunks of bytes that the content is made from. + +So, to make a Content object that is just a simple string of text, you can +do:: + + from testtools.content import Content + from testtools.content_type import ContentType + + text = Content(ContentType('text', 'plain'), lambda: ["some text"]) + +Because adding small bits of text content is very common, there's also a +convenience method:: + + text = text_content("some text") + +To make content out of an image stored on disk, you could do something like:: + + image = Content(ContentType('image', 'png'), lambda: open('foo.png').read()) + +Or you could use the convenience function:: + + image = content_from_file('foo.png', ContentType('image', 'png')) + +The ``lambda`` helps make sure that the file is opened and the actual bytes +read only when they are needed – by default, when the test is finished. This +means that tests can construct and add Content objects freely without worrying +too much about how they affect run time. + + +A realistic example +------------------- + +A very common use of details is to add a log file to failing tests. Say your +project has a server represented by a class ``SomeServer`` that you can start +up and shut down in tests, but runs in another process. You want to test +interaction with that server, and whenever the interaction fails, you want to +see the client-side error *and* the logs from the server-side. Here's how you +might do it:: + + from testtools import TestCase + from testtools.content import attach_file, Content + from testtools.content_type import UTF8_TEXT + + from myproject import SomeServer + + class SomeTestCase(TestCase): + + def setUp(self): + super(SomeTestCase, self).setUp() + self.server = SomeServer() + self.server.start_up() + self.addCleanup(self.server.shut_down) + self.addCleanup(attach_file, self.server.logfile, self) + + def attach_log_file(self): + self.addDetail( + 'log-file', + Content(UTF8_TEXT, + lambda: open(self.server.logfile, 'r').readlines())) + + def test_a_thing(self): + self.assertEqual("cool", self.server.temperature) + +This test will attach the log file of ``SomeServer`` to each test that is +run. testtools will only display the log file for failing tests, so it's not +such a big deal. + +If the act of adding at detail is expensive, you might want to use +addOnException_ so that you only do it when a test actually raises an +exception. + + +Controlling test execution +========================== + +.. _addCleanup: + +addCleanup +---------- + +``TestCase.addCleanup`` is a robust way to arrange for a clean up function to +be called before ``tearDown``. This is a powerful and simple alternative to +putting clean up logic in a try/finally block or ``tearDown`` method. For +example:: + + def test_foo(self): + foo.lock() + self.addCleanup(foo.unlock) + ... + +This is particularly useful if you have some sort of factory in your test:: + + def make_locked_foo(self): + foo = Foo() + foo.lock() + self.addCleanup(foo.unlock) + return foo + + def test_frotz_a_foo(self): + foo = self.make_locked_foo() + foo.frotz() + self.assertEqual(foo.frotz_count, 1) + +Any extra arguments or keyword arguments passed to ``addCleanup`` are passed +to the callable at cleanup time. + +Cleanups can also report multiple errors, if appropriate by wrapping them in +a ``testtools.MultipleExceptions`` object:: + + raise MultipleExceptions(exc_info1, exc_info2) + + +Fixtures +-------- + +Tests often depend on a system being set up in a certain way, or having +certain resources available to them. Perhaps a test needs a connection to the +database or access to a running external server. + +One common way of doing this is to do:: + + class SomeTest(TestCase): + def setUp(self): + super(SomeTest, self).setUp() + self.server = Server() + self.server.setUp() + self.addCleanup(self.server.tearDown) + +testtools provides a more convenient, declarative way to do the same thing:: + + class SomeTest(TestCase): + def setUp(self): + super(SomeTest, self).setUp() + self.server = self.useFixture(Server()) + +``useFixture(fixture)`` calls ``setUp`` on the fixture, schedules a clean up +to clean it up, and schedules a clean up to attach all details_ held by the +fixture to the test case. The fixture object must meet the +``fixtures.Fixture`` protocol (version 0.3.4 or newer, see fixtures_). + +If you have anything beyond the most simple test set up, we recommend that +you put this set up into a ``Fixture`` class. Once there, the fixture can be +easily re-used by other tests and can be combined with other fixtures to make +more complex resources. + + +Skipping tests +-------------- + +Many reasons exist to skip a test: a dependency might be missing; a test might +be too expensive and thus should not berun while on battery power; or perhaps +the test is testing an incomplete feature. + +``TestCase.skipTest`` is a simple way to have a test stop running and be +reported as a skipped test, rather than a success, error or failure. For +example:: + + def test_make_symlink(self): + symlink = getattr(os, 'symlink', None) + if symlink is None: + self.skipTest("No symlink support") + symlink(whatever, something_else) + +Using ``skipTest`` means that you can make decisions about what tests to run +as late as possible, and close to the actual tests. Without it, you might be +forced to use convoluted logic during test loading, which is a bit of a mess. + + +Legacy skip support +~~~~~~~~~~~~~~~~~~~ + +If you are using this feature when running your test suite with a legacy +``TestResult`` object that is missing the ``addSkip`` method, then the +``addError`` method will be invoked instead. If you are using a test result +from testtools, you do not have to worry about this. + +In older versions of testtools, ``skipTest`` was known as ``skip``. Since +Python 2.7 added ``skipTest`` support, the ``skip`` name is now deprecated. +No warning is emitted yet – some time in the future we may do so. + + +addOnException +-------------- + +Sometimes, you might wish to do something only when a test fails. Perhaps you +need to run expensive diagnostic routines or some such. +``TestCase.addOnException`` allows you to easily do just this. For example:: + + class SomeTest(TestCase): + def setUp(self): + super(SomeTest, self).setUp() + self.server = self.useFixture(SomeServer()) + self.addOnException(self.attach_server_diagnostics) + + def attach_server_diagnostics(self, exc_info): + self.server.prep_for_diagnostics() # Expensive! + self.addDetail('server-diagnostics', self.server.get_diagnostics) + + def test_a_thing(self): + self.assertEqual('cheese', 'chalk') + +In this example, ``attach_server_diagnostics`` will only be called when a test +fails. It is given the exc_info tuple of the error raised by the test, just +in case it is needed. + + +Twisted support +--------------- + +testtools provides *highly experimental* support for running Twisted tests – +tests that return a Deferred_ and rely on the Twisted reactor. You should not +use this feature right now. We reserve the right to change the API and +behaviour without telling you first. + +However, if you are going to, here's how you do it:: + + from testtools import TestCase + from testtools.deferredruntest import AsynchronousDeferredRunTest + + class MyTwistedTests(TestCase): + + run_tests_with = AsynchronousDeferredRunTest + + def test_foo(self): + # ... + return d + +In particular, note that you do *not* have to use a special base ``TestCase`` +in order to run Twisted tests. + +You can also run individual tests within a test case class using the Twisted +test runner:: + + class MyTestsSomeOfWhichAreTwisted(TestCase): + + def test_normal(self): + pass + + @run_test_with(AsynchronousDeferredRunTest) + def test_twisted(self): + # ... + return d + +Here are some tips for converting your Trial tests into testtools tests. + +* Use the ``AsynchronousDeferredRunTest`` runner +* Make sure to upcall to ``setUp`` and ``tearDown`` +* Don't use ``setUpClass`` or ``tearDownClass`` +* Don't expect setting .todo, .timeout or .skip attributes to do anything +* ``flushLoggedErrors`` is ``testtools.deferredruntest.flush_logged_errors`` +* ``assertFailure`` is ``testtools.deferredruntest.assert_fails_with`` +* Trial spins the reactor a couple of times before cleaning it up, + ``AsynchronousDeferredRunTest`` does not. If you rely on this behavior, use + ``AsynchronousDeferredRunTestForBrokenTwisted``. + + +Test helpers +============ + +testtools comes with a few little things that make it a little bit easier to +write tests. + + +TestCase.patch +-------------- + +``patch`` is a convenient way to monkey-patch a Python object for the duration +of your test. It's especially useful for testing legacy code. e.g.:: + + def test_foo(self): + my_stream = StringIO() + self.patch(sys, 'stderr', my_stream) + run_some_code_that_prints_to_stderr() + self.assertEqual('', my_stream.getvalue()) + +The call to ``patch`` above masks ``sys.stderr`` with ``my_stream`` so that +anything printed to stderr will be captured in a StringIO variable that can be +actually tested. Once the test is done, the real ``sys.stderr`` is restored to +its rightful place. + + +Creation methods +---------------- + +Often when writing unit tests, you want to create an object that is a +completely normal instance of its type. You don't want there to be anything +special about its properties, because you are testing generic behaviour rather +than specific conditions. + +A lot of the time, test authors do this by making up silly strings and numbers +and passing them to constructors (e.g. 42, 'foo', "bar" etc), and that's +fine. However, sometimes it's useful to be able to create arbitrary objects +at will, without having to make up silly sample data. + +To help with this, ``testtools.TestCase`` implements creation methods called +``getUniqueString`` and ``getUniqueInteger``. They return strings and +integers that are unique within the context of the test that can be used to +assemble more complex objects. Here's a basic example where +``getUniqueString`` is used instead of saying "foo" or "bar" or whatever:: + + class SomeTest(TestCase): + + def test_full_name(self): + first_name = self.getUniqueString() + last_name = self.getUniqueString() + p = Person(first_name, last_name) + self.assertEqual(p.full_name, "%s %s" % (first_name, last_name)) + + +And here's how it could be used to make a complicated test:: + + class TestCoupleLogic(TestCase): + + def make_arbitrary_person(self): + return Person(self.getUniqueString(), self.getUniqueString()) + + def test_get_invitation(self): + a = self.make_arbitrary_person() + b = self.make_arbitrary_person() + couple = Couple(a, b) + event_name = self.getUniqueString() + invitation = couple.get_invitation(event_name) + self.assertEqual( + invitation, + "We invite %s and %s to %s" % ( + a.full_name, b.full_name, event_name)) + +Essentially, creation methods like these are a way of reducing the number of +assumptions in your tests and communicating to test readers that the exact +details of certain variables don't actually matter. + +See pages 419-423 of `xUnit Test Patterns`_ by Gerard Meszaros for a detailed +discussion of creation methods. + + +General helpers +=============== + +Conditional imports +------------------- + +Lots of the time we would like to conditionally import modules. testtools +needs to do this itself, and graciously extends the ability to its users. + +Instead of:: + + try: + from twisted.internet import defer + except ImportError: + defer = None + +You can do:: + + defer = try_import('twisted.internet.defer') + + +Instead of:: + + try: + from StringIO import StringIO + except ImportError: + from io import StringIO + +You can do:: + + StringIO = try_imports(['StringIO.StringIO', 'io.StringIO']) + + +Safe attribute testing +---------------------- + +``hasattr`` is broken_ on many versions of Python. testtools provides +``safe_hasattr``, which can be used to safely test whether an object has a +particular attribute. + + +.. _testrepository: https://launchpad.net/testrepository +.. _Trial: http://twistedmatrix.com/documents/current/core/howto/testing.html +.. _nose: http://somethingaboutorange.com/mrl/projects/nose/ +.. _unittest2: http://pypi.python.org/pypi/unittest2 +.. _zope.testrunner: http://pypi.python.org/pypi/zope.testrunner/ +.. _xUnit test patterns: http://xunitpatterns.com/ +.. _fixtures: http://pypi.python.org/pypi/fixtures +.. _unittest: http://docs.python.org/library/unittest.html +.. _doctest: http://docs.python.org/library/doctest.html +.. _Deferred: http://twistedmatrix.com/documents/current/core/howto/defer.html +.. _discover: http://pypi.python.org/pypi/discover +.. _`testtools API docs`: http://mumak.net/testtools/apidocs/ +.. _Distutils: http://docs.python.org/library/distutils.html +.. _`setup configuration`: http://docs.python.org/distutils/configfile.html +.. _broken: http://chipaca.com/post/3210673069/hasattr-17-less-harmful diff --git a/test/3rdparty/testtools-0.9.12/doc/hacking.rst b/test/3rdparty/testtools-0.9.12/doc/hacking.rst new file mode 100644 index 00000000000..b9f5ff22c6c --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/doc/hacking.rst @@ -0,0 +1,154 @@ +========================= +Contributing to testtools +========================= + +Coding style +------------ + +In general, follow `PEP 8`_ except where consistency with the standard +library's unittest_ module would suggest otherwise. + +testtools supports Python 2.4 and later, including Python 3, so avoid any +2.5-only features like the ``with`` statement. + + +Copyright assignment +-------------------- + +Part of testtools raison d'etre is to provide Python with improvements to the +testing code it ships. For that reason we require all contributions (that are +non-trivial) to meet one of the following rules: + +* be inapplicable for inclusion in Python. +* be able to be included in Python without further contact with the contributor. +* be copyright assigned to Jonathan M. Lange. + +Please pick one of these and specify it when contributing code to testtools. + + +Licensing +--------- + +All code that is not copyright assigned to Jonathan M. Lange (see Copyright +Assignment above) needs to be licensed under the `MIT license`_ that testtools +uses, so that testtools can ship it. + + +Testing +------- + +Please write tests for every feature. This project ought to be a model +example of well-tested Python code! + +Take particular care to make sure the *intent* of each test is clear. + +You can run tests with ``make check``. + +By default, testtools hides many levels of its own stack when running tests. +This is for the convenience of users, who do not care about how, say, assert +methods are implemented. However, when writing tests for testtools itself, it +is often useful to see all levels of the stack. To do this, add +``run_tests_with = FullStackRunTest`` to the top of a test's class definition. + + +Documentation +------------- + +Documents are written using the Sphinx_ variant of reStructuredText_. All +public methods, functions, classes and modules must have API documentation. +When changing code, be sure to check the API documentation to see if it could +be improved. Before submitting changes to trunk, look over them and see if +the manuals ought to be updated. + + +Source layout +------------- + +The top-level directory contains the ``testtools/`` package directory, and +miscellaneous files like ``README`` and ``setup.py``. + +The ``testtools/`` directory is the Python package itself. It is separated +into submodules for internal clarity, but all public APIs should be “promoted” +into the top-level package by importing them in ``testtools/__init__.py``. +Users of testtools should never import a submodule in order to use a stable +API. Unstable APIs like ``testtools.matchers`` and +``testtools.deferredruntest`` should be exported as submodules. + +Tests belong in ``testtools/tests/``. + + +Committing to trunk +------------------- + +Testtools is maintained using bzr, with its trunk at lp:testtools. This gives +every contributor the ability to commit their work to their own branches. +However permission must be granted to allow contributors to commit to the trunk +branch. + +Commit access to trunk is obtained by joining the testtools-committers +Launchpad team. Membership in this team is contingent on obeying the testtools +contribution policy, see `Copyright Assignment`_ above. + + +Code Review +----------- + +All code must be reviewed before landing on trunk. The process is to create a +branch in launchpad, and submit it for merging to lp:testtools. It will then +be reviewed before it can be merged to trunk. It will be reviewed by someone: + +* not the author +* a committer (member of the `~testtools-committers`_ team) + +As a special exception, while the testtools committers team is small and prone +to blocking, a merge request from a committer that has not been reviewed after +24 hours may be merged by that committer. When the team is larger this policy +will be revisited. + +Code reviewers should look for the quality of what is being submitted, +including conformance with this HACKING file. + +Changes which all users should be made aware of should be documented in NEWS. + + +NEWS management +--------------- + +The file NEWS is structured as a sorted list of releases. Each release can have +a free form description and more or more sections with bullet point items. +Sections in use today are 'Improvements' and 'Changes'. To ease merging between +branches, the bullet points are kept alphabetically sorted. The release NEXT is +permanently present at the top of the list. + + +Release tasks +------------- + +#. Choose a version number, say X.Y.Z +#. Branch from trunk to testtools-X.Y.Z +#. In testtools-X.Y.Z, ensure __init__ has version ``(X, Y, Z, 'final', 0)`` +#. Replace NEXT in NEWS with the version number X.Y.Z, adjusting the reST. +#. Possibly write a blurb into NEWS. +#. Replace any additional references to NEXT with the version being + released. (There should be none other than the ones in these release tasks + which should not be replaced). +#. Commit the changes. +#. Tag the release, bzr tag testtools-X.Y.Z +#. Run 'make release', this: + #. Creates a source distribution and uploads to PyPI + #. Ensures all Fix Committed bugs are in the release milestone + #. Makes a release on Launchpad and uploads the tarball + #. Marks all the Fix Committed bugs as Fix Released + #. Creates a new milestone +#. Merge the release branch testtools-X.Y.Z into trunk. Before the commit, + add a NEXT heading to the top of NEWS and bump the version in __init__.py. + Push trunk to Launchpad +#. If a new series has been created (e.g. 0.10.0), make the series on Launchpad. + +.. _PEP 8: http://www.python.org/dev/peps/pep-0008/ +.. _unittest: http://docs.python.org/library/unittest.html +.. _~testtools-dev: https://launchpad.net/~testtools-dev +.. _MIT license: http://www.opensource.org/licenses/mit-license.php +.. _Sphinx: http://sphinx.pocoo.org/ +.. _restructuredtext: http://docutils.sourceforge.net/rst.html + diff --git a/test/3rdparty/testtools-0.9.12/doc/index.rst b/test/3rdparty/testtools-0.9.12/doc/index.rst new file mode 100644 index 00000000000..4687cebb62b --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/doc/index.rst @@ -0,0 +1,33 @@ +.. testtools documentation master file, created by + sphinx-quickstart on Sun Nov 28 13:45:40 2010. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +testtools: tasteful testing for Python +====================================== + +testtools is a set of extensions to the Python standard library's unit testing +framework. These extensions have been derived from many years of experience +with unit testing in Python and come from many different sources. testtools +also ports recent unittest changes all the way back to Python 2.4. + + +Contents: + +.. toctree:: + :maxdepth: 1 + + overview + for-test-authors + for-framework-folk + hacking + Changes to testtools <news> + API reference documentation <http://mumak.net/testtools/apidocs/> + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/test/3rdparty/testtools-0.9.12/doc/make.bat b/test/3rdparty/testtools-0.9.12/doc/make.bat new file mode 100644 index 00000000000..f8c1fd520ab --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/doc/make.bat @@ -0,0 +1,113 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +set SPHINXBUILD=sphinx-build +set BUILDDIR=_build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^<target^>` where ^<target^> is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\testtools.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\testtools.ghc + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +:end diff --git a/test/3rdparty/testtools-0.9.12/doc/overview.rst b/test/3rdparty/testtools-0.9.12/doc/overview.rst new file mode 100644 index 00000000000..e43265fd1e0 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/doc/overview.rst @@ -0,0 +1,96 @@ +====================================== +testtools: tasteful testing for Python +====================================== + +testtools is a set of extensions to the Python standard library's unit testing +framework. These extensions have been derived from many years of experience +with unit testing in Python and come from many different sources. testtools +also ports recent unittest changes all the way back to Python 2.4. + +What better way to start than with a contrived code snippet?:: + + from testtools import TestCase + from testtools.content import Content + from testtools.content_type import UTF8_TEXT + from testtools.matchers import Equals + + from myproject import SillySquareServer + + class TestSillySquareServer(TestCase): + + def setUp(self): + super(TestSillySquare, self).setUp() + self.server = self.useFixture(SillySquareServer()) + self.addCleanup(self.attach_log_file) + + def attach_log_file(self): + self.addDetail( + 'log-file', + Content(UTF8_TEXT + lambda: open(self.server.logfile, 'r').readlines())) + + def test_server_is_cool(self): + self.assertThat(self.server.temperature, Equals("cool")) + + def test_square(self): + self.assertThat(self.server.silly_square_of(7), Equals(49)) + + +Why use testtools? +================== + +Better assertion methods +------------------------ + +The standard assertion methods that come with unittest aren't as helpful as +they could be, and there aren't quite enough of them. testtools adds +``assertIn``, ``assertIs``, ``assertIsInstance`` and their negatives. + + +Matchers: better than assertion methods +--------------------------------------- + +Of course, in any serious project you want to be able to have assertions that +are specific to that project and the particular problem that it is addressing. +Rather than forcing you to define your own assertion methods and maintain your +own inheritance hierarchy of ``TestCase`` classes, testtools lets you write +your own "matchers", custom predicates that can be plugged into a unit test:: + + def test_response_has_bold(self): + # The response has bold text. + response = self.server.getResponse() + self.assertThat(response, HTMLContains(Tag('bold', 'b'))) + + +More debugging info, when you need it +-------------------------------------- + +testtools makes it easy to add arbitrary data to your test result. If you +want to know what's in a log file when a test fails, or what the load was on +the computer when a test started, or what files were open, you can add that +information with ``TestCase.addDetail``, and it will appear in the test +results if that test fails. + + +Extend unittest, but stay compatible and re-usable +-------------------------------------------------- + +testtools goes to great lengths to allow serious test authors and test +*framework* authors to do whatever they like with their tests and their +extensions while staying compatible with the standard library's unittest. + +testtools has completely parametrized how exceptions raised in tests are +mapped to ``TestResult`` methods and how tests are actually executed (ever +wanted ``tearDown`` to be called regardless of whether ``setUp`` succeeds?) + +It also provides many simple but handy utilities, like the ability to clone a +test, a ``MultiTestResult`` object that lets many result objects get the +results from one test suite, adapters to bring legacy ``TestResult`` objects +into our new golden age. + + +Cross-Python compatibility +-------------------------- + +testtools gives you the very latest in unit testing technology in a way that +will work with Python 2.4, 2.5, 2.6, 2.7 and 3.1. diff --git a/test/3rdparty/testtools-0.9.12/setup.cfg b/test/3rdparty/testtools-0.9.12/setup.cfg new file mode 100644 index 00000000000..9f95adde2b4 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/setup.cfg @@ -0,0 +1,4 @@ +[test] +test_module = testtools.tests +buffer=1 +catch=1 diff --git a/test/3rdparty/testtools-0.9.12/setup.py b/test/3rdparty/testtools-0.9.12/setup.py new file mode 100755 index 00000000000..d07c8f29359 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/setup.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python +"""Distutils installer for testtools.""" + +from distutils.core import setup +import email +import os + +import testtools + + +def get_revno(): + import bzrlib.errors + import bzrlib.workingtree + try: + t = bzrlib.workingtree.WorkingTree.open_containing(__file__)[0] + except (bzrlib.errors.NotBranchError, bzrlib.errors.NoWorkingTree): + return None + else: + return t.branch.revno() + + +def get_version_from_pkg_info(): + """Get the version from PKG-INFO file if we can.""" + pkg_info_path = os.path.join(os.path.dirname(__file__), 'PKG-INFO') + try: + pkg_info_file = open(pkg_info_path, 'r') + except (IOError, OSError): + return None + try: + pkg_info = email.message_from_file(pkg_info_file) + except email.MessageError: + return None + return pkg_info.get('Version', None) + + +def get_version(): + """Return the version of testtools that we are building.""" + version = '.'.join( + str(component) for component in testtools.__version__[0:3]) + phase = testtools.__version__[3] + if phase == 'final': + return version + pkg_info_version = get_version_from_pkg_info() + if pkg_info_version: + return pkg_info_version + revno = get_revno() + if revno is None: + return "snapshot" + if phase == 'alpha': + # No idea what the next version will be + return 'next-r%s' % revno + else: + # Preserve the version number but give it a revno prefix + return version + '-r%s' % revno + + +def get_long_description(): + manual_path = os.path.join( + os.path.dirname(__file__), 'doc/overview.rst') + return open(manual_path).read() + + +setup(name='testtools', + author='Jonathan M. Lange', + author_email='jml+testtools@mumak.net', + url='https://launchpad.net/testtools', + description=('Extensions to the Python standard library unit testing ' + 'framework'), + long_description=get_long_description(), + version=get_version(), + classifiers=["License :: OSI Approved :: MIT License"], + packages=['testtools', 'testtools.testresult', 'testtools.tests'], + cmdclass={'test': testtools.TestCommand}) diff --git a/test/3rdparty/testtools-0.9.12/testtools/__init__.py b/test/3rdparty/testtools-0.9.12/testtools/__init__.py new file mode 100644 index 00000000000..11b5662b146 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/__init__.py @@ -0,0 +1,83 @@ +# Copyright (c) 2008-2011 testtools developers. See LICENSE for details. + +"""Extensions to the standard Python unittest library.""" + +__all__ = [ + 'clone_test_with_new_id', + 'ConcurrentTestSuite', + 'ErrorHolder', + 'ExpectedException', + 'ExtendedToOriginalDecorator', + 'FixtureSuite', + 'iterate_tests', + 'MultipleExceptions', + 'MultiTestResult', + 'PlaceHolder', + 'run_test_with', + 'TestCase', + 'TestCommand', + 'TestResult', + 'TextTestResult', + 'RunTest', + 'skip', + 'skipIf', + 'skipUnless', + 'ThreadsafeForwardingResult', + 'try_import', + 'try_imports', + ] + +from testtools.helpers import ( + try_import, + try_imports, + ) +from testtools.matchers import ( + Matcher, + ) +# Shut up, pyflakes. We are importing for documentation, not for namespacing. +Matcher + +from testtools.runtest import ( + MultipleExceptions, + RunTest, + ) +from testtools.testcase import ( + ErrorHolder, + ExpectedException, + PlaceHolder, + TestCase, + clone_test_with_new_id, + run_test_with, + skip, + skipIf, + skipUnless, + ) +from testtools.testresult import ( + ExtendedToOriginalDecorator, + MultiTestResult, + TestResult, + TextTestResult, + ThreadsafeForwardingResult, + ) +from testtools.testsuite import ( + ConcurrentTestSuite, + FixtureSuite, + iterate_tests, + ) +from testtools.distutilscmd import ( + TestCommand, +) + +# same format as sys.version_info: "A tuple containing the five components of +# the version number: major, minor, micro, releaselevel, and serial. All +# values except releaselevel are integers; the release level is 'alpha', +# 'beta', 'candidate', or 'final'. The version_info value corresponding to the +# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a +# releaselevel of 'dev' for unreleased under-development code. +# +# If the releaselevel is 'alpha' then the major/minor/micro components are not +# established at this point, and setup.py will use a version of next-$(revno). +# If the releaselevel is 'final', then the tarball will be major.minor.micro. +# Otherwise it is major.minor.micro~$(revno). + +__version__ = (0, 9, 12, 'final', 0) diff --git a/test/3rdparty/testtools-0.9.12/testtools/_compat2x.py b/test/3rdparty/testtools-0.9.12/testtools/_compat2x.py new file mode 100644 index 00000000000..2b25c13e081 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/_compat2x.py @@ -0,0 +1,17 @@ +# Copyright (c) 2011 testtools developers. See LICENSE for details. + +"""Compatibility helpers that are valid syntax in Python 2.x. + +Only add things here if they *only* work in Python 2.x or are Python 2 +alternatives to things that *only* work in Python 3.x. +""" + +__all__ = [ + 'reraise', + ] + + +def reraise(exc_class, exc_obj, exc_tb, _marker=object()): + """Re-raise an exception received from sys.exc_info() or similar.""" + raise exc_class, exc_obj, exc_tb + diff --git a/test/3rdparty/testtools-0.9.12/testtools/_compat3x.py b/test/3rdparty/testtools-0.9.12/testtools/_compat3x.py new file mode 100644 index 00000000000..f3d569662da --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/_compat3x.py @@ -0,0 +1,17 @@ +# Copyright (c) 2011 testtools developers. See LICENSE for details. + +"""Compatibility helpers that are valid syntax in Python 3.x. + +Only add things here if they *only* work in Python 3.x or are Python 3 +alternatives to things that *only* work in Python 2.x. +""" + +__all__ = [ + 'reraise', + ] + + +def reraise(exc_class, exc_obj, exc_tb, _marker=object()): + """Re-raise an exception received from sys.exc_info() or similar.""" + raise exc_class(*exc_obj.args).with_traceback(exc_tb) + diff --git a/test/3rdparty/testtools-0.9.12/testtools/_spinner.py b/test/3rdparty/testtools-0.9.12/testtools/_spinner.py new file mode 100644 index 00000000000..baf455a5f94 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/_spinner.py @@ -0,0 +1,316 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Evil reactor-spinning logic for running Twisted tests. + +This code is highly experimental, liable to change and not to be trusted. If +you couldn't write this yourself, you should not be using it. +""" + +__all__ = [ + 'DeferredNotFired', + 'extract_result', + 'NoResultError', + 'not_reentrant', + 'ReentryError', + 'Spinner', + 'StaleJunkError', + 'TimeoutError', + 'trap_unhandled_errors', + ] + +import signal + +from testtools.monkey import MonkeyPatcher + +from twisted.internet import defer +from twisted.internet.base import DelayedCall +from twisted.internet.interfaces import IReactorThreads +from twisted.python.failure import Failure +from twisted.python.util import mergeFunctionMetadata + + +class ReentryError(Exception): + """Raised when we try to re-enter a function that forbids it.""" + + def __init__(self, function): + Exception.__init__(self, + "%r in not re-entrant but was called within a call to itself." + % (function,)) + + +def not_reentrant(function, _calls={}): + """Decorates a function as not being re-entrant. + + The decorated function will raise an error if called from within itself. + """ + def decorated(*args, **kwargs): + if _calls.get(function, False): + raise ReentryError(function) + _calls[function] = True + try: + return function(*args, **kwargs) + finally: + _calls[function] = False + return mergeFunctionMetadata(function, decorated) + + +class DeferredNotFired(Exception): + """Raised when we extract a result from a Deferred that's not fired yet.""" + + +def extract_result(deferred): + """Extract the result from a fired deferred. + + It can happen that you have an API that returns Deferreds for + compatibility with Twisted code, but is in fact synchronous, i.e. the + Deferreds it returns have always fired by the time it returns. In this + case, you can use this function to convert the result back into the usual + form for a synchronous API, i.e. the result itself or a raised exception. + + It would be very bad form to use this as some way of checking if a + Deferred has fired. + """ + failures = [] + successes = [] + deferred.addCallbacks(successes.append, failures.append) + if len(failures) == 1: + failures[0].raiseException() + elif len(successes) == 1: + return successes[0] + else: + raise DeferredNotFired("%r has not fired yet." % (deferred,)) + + +def trap_unhandled_errors(function, *args, **kwargs): + """Run a function, trapping any unhandled errors in Deferreds. + + Assumes that 'function' will have handled any errors in Deferreds by the + time it is complete. This is almost never true of any Twisted code, since + you can never tell when someone has added an errback to a Deferred. + + If 'function' raises, then don't bother doing any unhandled error + jiggery-pokery, since something horrible has probably happened anyway. + + :return: A tuple of '(result, error)', where 'result' is the value + returned by 'function' and 'error' is a list of 'defer.DebugInfo' + objects that have unhandled errors in Deferreds. + """ + real_DebugInfo = defer.DebugInfo + debug_infos = [] + def DebugInfo(): + info = real_DebugInfo() + debug_infos.append(info) + return info + defer.DebugInfo = DebugInfo + try: + result = function(*args, **kwargs) + finally: + defer.DebugInfo = real_DebugInfo + errors = [] + for info in debug_infos: + if info.failResult is not None: + errors.append(info) + # Disable the destructor that logs to error. We are already + # catching the error here. + info.__del__ = lambda: None + return result, errors + + +class TimeoutError(Exception): + """Raised when run_in_reactor takes too long to run a function.""" + + def __init__(self, function, timeout): + Exception.__init__(self, + "%r took longer than %s seconds" % (function, timeout)) + + +class NoResultError(Exception): + """Raised when the reactor has stopped but we don't have any result.""" + + def __init__(self): + Exception.__init__(self, + "Tried to get test's result from Deferred when no result is " + "available. Probably means we received SIGINT or similar.") + + +class StaleJunkError(Exception): + """Raised when there's junk in the spinner from a previous run.""" + + def __init__(self, junk): + Exception.__init__(self, + "There was junk in the spinner from a previous run. " + "Use clear_junk() to clear it out: %r" % (junk,)) + + +class Spinner(object): + """Spin the reactor until a function is done. + + This class emulates the behaviour of twisted.trial in that it grotesquely + and horribly spins the Twisted reactor while a function is running, and + then kills the reactor when that function is complete and all the + callbacks in its chains are done. + """ + + _UNSET = object() + + # Signals that we save and restore for each spin. + _PRESERVED_SIGNALS = [ + 'SIGINT', + 'SIGTERM', + 'SIGCHLD', + ] + + # There are many APIs within Twisted itself where a Deferred fires but + # leaves cleanup work scheduled for the reactor to do. Arguably, many of + # these are bugs. As such, we provide a facility to iterate the reactor + # event loop a number of times after every call, in order to shake out + # these buggy-but-commonplace events. The default is 0, because that is + # the ideal, and it actually works for many cases. + _OBLIGATORY_REACTOR_ITERATIONS = 0 + + def __init__(self, reactor, debug=False): + """Construct a Spinner. + + :param reactor: A Twisted reactor. + :param debug: Whether or not to enable Twisted's debugging. Defaults + to False. + """ + self._reactor = reactor + self._timeout_call = None + self._success = self._UNSET + self._failure = self._UNSET + self._saved_signals = [] + self._junk = [] + self._debug = debug + + def _cancel_timeout(self): + if self._timeout_call: + self._timeout_call.cancel() + + def _get_result(self): + if self._failure is not self._UNSET: + self._failure.raiseException() + if self._success is not self._UNSET: + return self._success + raise NoResultError() + + def _got_failure(self, result): + self._cancel_timeout() + self._failure = result + + def _got_success(self, result): + self._cancel_timeout() + self._success = result + + def _stop_reactor(self, ignored=None): + """Stop the reactor!""" + self._reactor.crash() + + def _timed_out(self, function, timeout): + e = TimeoutError(function, timeout) + self._failure = Failure(e) + self._stop_reactor() + + def _clean(self): + """Clean up any junk in the reactor. + + Will always iterate the reactor a number of times equal to + ``Spinner._OBLIGATORY_REACTOR_ITERATIONS``. This is to work around + bugs in various Twisted APIs where a Deferred fires but still leaves + work (e.g. cancelling a call, actually closing a connection) for the + reactor to do. + """ + for i in range(self._OBLIGATORY_REACTOR_ITERATIONS): + self._reactor.iterate(0) + junk = [] + for delayed_call in self._reactor.getDelayedCalls(): + delayed_call.cancel() + junk.append(delayed_call) + for selectable in self._reactor.removeAll(): + # Twisted sends a 'KILL' signal to selectables that provide + # IProcessTransport. Since only _dumbwin32proc processes do this, + # we aren't going to bother. + junk.append(selectable) + if IReactorThreads.providedBy(self._reactor): + if self._reactor.threadpool is not None: + self._reactor._stopThreadPool() + self._junk.extend(junk) + return junk + + def clear_junk(self): + """Clear out our recorded junk. + + :return: Whatever junk was there before. + """ + junk = self._junk + self._junk = [] + return junk + + def get_junk(self): + """Return any junk that has been found on the reactor.""" + return self._junk + + def _save_signals(self): + available_signals = [ + getattr(signal, name, None) for name in self._PRESERVED_SIGNALS] + self._saved_signals = [ + (sig, signal.getsignal(sig)) for sig in available_signals if sig] + + def _restore_signals(self): + for sig, hdlr in self._saved_signals: + signal.signal(sig, hdlr) + self._saved_signals = [] + + @not_reentrant + def run(self, timeout, function, *args, **kwargs): + """Run 'function' in a reactor. + + If 'function' returns a Deferred, the reactor will keep spinning until + the Deferred fires and its chain completes or until the timeout is + reached -- whichever comes first. + + :raise TimeoutError: If 'timeout' is reached before the Deferred + returned by 'function' has completed its callback chain. + :raise NoResultError: If the reactor is somehow interrupted before + the Deferred returned by 'function' has completed its callback + chain. + :raise StaleJunkError: If there's junk in the spinner from a previous + run. + :return: Whatever is at the end of the function's callback chain. If + it's an error, then raise that. + """ + debug = MonkeyPatcher() + if self._debug: + debug.add_patch(defer.Deferred, 'debug', True) + debug.add_patch(DelayedCall, 'debug', True) + debug.patch() + try: + junk = self.get_junk() + if junk: + raise StaleJunkError(junk) + self._save_signals() + self._timeout_call = self._reactor.callLater( + timeout, self._timed_out, function, timeout) + # Calling 'stop' on the reactor will make it impossible to + # re-start the reactor. Since the default signal handlers for + # TERM, BREAK and INT all call reactor.stop(), we'll patch it over + # with crash. XXX: It might be a better idea to either install + # custom signal handlers or to override the methods that are + # Twisted's signal handlers. + stop, self._reactor.stop = self._reactor.stop, self._reactor.crash + def run_function(): + d = defer.maybeDeferred(function, *args, **kwargs) + d.addCallbacks(self._got_success, self._got_failure) + d.addBoth(self._stop_reactor) + try: + self._reactor.callWhenRunning(run_function) + self._reactor.run() + finally: + self._reactor.stop = stop + self._restore_signals() + try: + return self._get_result() + finally: + self._clean() + finally: + debug.restore() diff --git a/test/3rdparty/testtools-0.9.12/testtools/compat.py b/test/3rdparty/testtools-0.9.12/testtools/compat.py new file mode 100644 index 00000000000..b7e23c8fec6 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/compat.py @@ -0,0 +1,393 @@ +# Copyright (c) 2008-2011 testtools developers. See LICENSE for details. + +"""Compatibility support for python 2 and 3.""" + +__metaclass__ = type +__all__ = [ + '_b', + '_u', + 'advance_iterator', + 'all', + 'BytesIO', + 'classtypes', + 'isbaseexception', + 'istext', + 'str_is_unicode', + 'StringIO', + 'reraise', + 'unicode_output_stream', + ] + +import codecs +import linecache +import locale +import os +import re +import sys +import traceback +import unicodedata + +from testtools.helpers import try_imports + +BytesIO = try_imports(['StringIO.StringIO', 'io.BytesIO']) +StringIO = try_imports(['StringIO.StringIO', 'io.StringIO']) + +try: + from testtools import _compat2x as _compat + _compat +except SyntaxError: + from testtools import _compat3x as _compat + +reraise = _compat.reraise + + +__u_doc = """A function version of the 'u' prefix. + +This is needed becayse the u prefix is not usable in Python 3 but is required +in Python 2 to get a unicode object. + +To migrate code that was written as u'\u1234' in Python 2 to 2+3 change +it to be _u('\u1234'). The Python 3 interpreter will decode it +appropriately and the no-op _u for Python 3 lets it through, in Python +2 we then call unicode-escape in the _u function. +""" + +if sys.version_info > (3, 0): + import builtins + def _u(s): + return s + _r = ascii + def _b(s): + """A byte literal.""" + return s.encode("latin-1") + advance_iterator = next + # GZ 2011-08-24: Seems istext() is easy to misuse and makes for bad code. + def istext(x): + return isinstance(x, str) + def classtypes(): + return (type,) + str_is_unicode = True +else: + import __builtin__ as builtins + def _u(s): + # The double replace mangling going on prepares the string for + # unicode-escape - \foo is preserved, \u and \U are decoded. + return (s.replace("\\", "\\\\").replace("\\\\u", "\\u") + .replace("\\\\U", "\\U").decode("unicode-escape")) + _r = repr + def _b(s): + return s + advance_iterator = lambda it: it.next() + def istext(x): + return isinstance(x, basestring) + def classtypes(): + import types + return (type, types.ClassType) + str_is_unicode = sys.platform == "cli" + +_u.__doc__ = __u_doc + + +if sys.version_info > (2, 5): + all = all + _error_repr = BaseException.__repr__ + def isbaseexception(exception): + """Return whether exception inherits from BaseException only""" + return (isinstance(exception, BaseException) + and not isinstance(exception, Exception)) +else: + def all(iterable): + """If contents of iterable all evaluate as boolean True""" + for obj in iterable: + if not obj: + return False + return True + def _error_repr(exception): + """Format an exception instance as Python 2.5 and later do""" + return exception.__class__.__name__ + repr(exception.args) + def isbaseexception(exception): + """Return whether exception would inherit from BaseException only + + This approximates the hierarchy in Python 2.5 and later, compare the + difference between the diagrams at the bottom of the pages: + <http://docs.python.org/release/2.4.4/lib/module-exceptions.html> + <http://docs.python.org/release/2.5.4/lib/module-exceptions.html> + """ + return isinstance(exception, (KeyboardInterrupt, SystemExit)) + + +# GZ 2011-08-24: Using isinstance checks like this encourages bad interfaces, +# there should be better ways to write code needing this. +if not issubclass(getattr(builtins, "bytes", str), str): + def _isbytes(x): + return isinstance(x, bytes) +else: + # Never return True on Pythons that provide the name but not the real type + def _isbytes(x): + return False + + +def _slow_escape(text): + """Escape unicode `text` leaving printable characters unmodified + + The behaviour emulates the Python 3 implementation of repr, see + unicode_repr in unicodeobject.c and isprintable definition. + + Because this iterates over the input a codepoint at a time, it's slow, and + does not handle astral characters correctly on Python builds with 16 bit + rather than 32 bit unicode type. + """ + output = [] + for c in text: + o = ord(c) + if o < 256: + if o < 32 or 126 < o < 161: + output.append(c.encode("unicode-escape")) + elif o == 92: + # Separate due to bug in unicode-escape codec in Python 2.4 + output.append("\\\\") + else: + output.append(c) + else: + # To get correct behaviour would need to pair up surrogates here + if unicodedata.category(c)[0] in "CZ": + output.append(c.encode("unicode-escape")) + else: + output.append(c) + return "".join(output) + + +def text_repr(text, multiline=None): + """Rich repr for `text` returning unicode, triple quoted if `multiline`""" + is_py3k = sys.version_info > (3, 0) + nl = _isbytes(text) and bytes((0xA,)) or "\n" + if multiline is None: + multiline = nl in text + if not multiline and (is_py3k or not str_is_unicode and type(text) is str): + # Use normal repr for single line of unicode on Python 3 or bytes + return repr(text) + prefix = repr(text[:0])[:-2] + if multiline: + # To escape multiline strings, split and process each line in turn, + # making sure that quotes are not escaped. + if is_py3k: + offset = len(prefix) + 1 + lines = [] + for l in text.split(nl): + r = repr(l) + q = r[-1] + lines.append(r[offset:-1].replace("\\" + q, q)) + elif not str_is_unicode and isinstance(text, str): + lines = [l.encode("string-escape").replace("\\'", "'") + for l in text.split("\n")] + else: + lines = [_slow_escape(l) for l in text.split("\n")] + # Combine the escaped lines and append two of the closing quotes, + # then iterate over the result to escape triple quotes correctly. + _semi_done = "\n".join(lines) + "''" + p = 0 + while True: + p = _semi_done.find("'''", p) + if p == -1: + break + _semi_done = "\\".join([_semi_done[:p], _semi_done[p:]]) + p += 2 + return "".join([prefix, "'''\\\n", _semi_done, "'"]) + escaped_text = _slow_escape(text) + # Determine which quote character to use and if one gets prefixed with a + # backslash following the same logic Python uses for repr() on strings + quote = "'" + if "'" in text: + if '"' in text: + escaped_text = escaped_text.replace("'", "\\'") + else: + quote = '"' + return "".join([prefix, quote, escaped_text, quote]) + + +def unicode_output_stream(stream): + """Get wrapper for given stream that writes any unicode without exception + + Characters that can't be coerced to the encoding of the stream, or 'ascii' + if valid encoding is not found, will be replaced. The original stream may + be returned in situations where a wrapper is determined unneeded. + + The wrapper only allows unicode to be written, not non-ascii bytestrings, + which is a good thing to ensure sanity and sanitation. + """ + if sys.platform == "cli": + # Best to never encode before writing in IronPython + return stream + try: + writer = codecs.getwriter(stream.encoding or "") + except (AttributeError, LookupError): + # GZ 2010-06-16: Python 3 StringIO ends up here, but probably needs + # different handling as it doesn't want bytestrings + return codecs.getwriter("ascii")(stream, "replace") + if writer.__module__.rsplit(".", 1)[1].startswith("utf"): + # The current stream has a unicode encoding so no error handler is needed + if sys.version_info > (3, 0): + return stream + return writer(stream) + if sys.version_info > (3, 0): + # Python 3 doesn't seem to make this easy, handle a common case + try: + return stream.__class__(stream.buffer, stream.encoding, "replace", + stream.newlines, stream.line_buffering) + except AttributeError: + pass + return writer(stream, "replace") + + +# The default source encoding is actually "iso-8859-1" until Python 2.5 but +# using non-ascii causes a deprecation warning in 2.4 and it's cleaner to +# treat all versions the same way +_default_source_encoding = "ascii" + +# Pattern specified in <http://www.python.org/dev/peps/pep-0263/> +_cookie_search=re.compile("coding[:=]\s*([-\w.]+)").search + +def _detect_encoding(lines): + """Get the encoding of a Python source file from a list of lines as bytes + + This function does less than tokenize.detect_encoding added in Python 3 as + it does not attempt to raise a SyntaxError when the interpreter would, it + just wants the encoding of a source file Python has already compiled and + determined is valid. + """ + if not lines: + return _default_source_encoding + if lines[0].startswith("\xef\xbb\xbf"): + # Source starting with UTF-8 BOM is either UTF-8 or a SyntaxError + return "utf-8" + # Only the first two lines of the source file are examined + magic = _cookie_search("".join(lines[:2])) + if magic is None: + return _default_source_encoding + encoding = magic.group(1) + try: + codecs.lookup(encoding) + except LookupError: + # Some codecs raise something other than LookupError if they don't + # support the given error handler, but not the text ones that could + # actually be used for Python source code + return _default_source_encoding + return encoding + + +class _EncodingTuple(tuple): + """A tuple type that can have an encoding attribute smuggled on""" + + +def _get_source_encoding(filename): + """Detect, cache and return the encoding of Python source at filename""" + try: + return linecache.cache[filename].encoding + except (AttributeError, KeyError): + encoding = _detect_encoding(linecache.getlines(filename)) + if filename in linecache.cache: + newtuple = _EncodingTuple(linecache.cache[filename]) + newtuple.encoding = encoding + linecache.cache[filename] = newtuple + return encoding + + +def _get_exception_encoding(): + """Return the encoding we expect messages from the OS to be encoded in""" + if os.name == "nt": + # GZ 2010-05-24: Really want the codepage number instead, the error + # handling of standard codecs is more deterministic + return "mbcs" + # GZ 2010-05-23: We need this call to be after initialisation, but there's + # no benefit in asking more than once as it's a global + # setting that can change after the message is formatted. + return locale.getlocale(locale.LC_MESSAGES)[1] or "ascii" + + +def _exception_to_text(evalue): + """Try hard to get a sensible text value out of an exception instance""" + try: + return unicode(evalue) + except KeyboardInterrupt: + raise + except: + # Apparently this is what traceback._some_str does. Sigh - RBC 20100623 + pass + try: + return str(evalue).decode(_get_exception_encoding(), "replace") + except KeyboardInterrupt: + raise + except: + # Apparently this is what traceback._some_str does. Sigh - RBC 20100623 + pass + # Okay, out of ideas, let higher level handle it + return None + + +# GZ 2010-05-23: This function is huge and horrible and I welcome suggestions +# on the best way to break it up +_TB_HEADER = _u('Traceback (most recent call last):\n') +def _format_exc_info(eclass, evalue, tb, limit=None): + """Format a stack trace and the exception information as unicode + + Compatibility function for Python 2 which ensures each component of a + traceback is correctly decoded according to its origins. + + Based on traceback.format_exception and related functions. + """ + fs_enc = sys.getfilesystemencoding() + if tb: + list = [_TB_HEADER] + extracted_list = [] + for filename, lineno, name, line in traceback.extract_tb(tb, limit): + extracted_list.append(( + filename.decode(fs_enc, "replace"), + lineno, + name.decode("ascii", "replace"), + line and line.decode( + _get_source_encoding(filename), "replace"))) + list.extend(traceback.format_list(extracted_list)) + else: + list = [] + if evalue is None: + # Is a (deprecated) string exception + list.append((eclass + "\n").decode("ascii", "replace")) + return list + if isinstance(evalue, SyntaxError): + # Avoid duplicating the special formatting for SyntaxError here, + # instead create a new instance with unicode filename and line + # Potentially gives duff spacing, but that's a pre-existing issue + try: + msg, (filename, lineno, offset, line) = evalue + except (TypeError, ValueError): + pass # Strange exception instance, fall through to generic code + else: + # Errors during parsing give the line from buffer encoded as + # latin-1 or utf-8 or the encoding of the file depending on the + # coding and whether the patch for issue #1031213 is applied, so + # give up on trying to decode it and just read the file again + if line: + bytestr = linecache.getline(filename, lineno) + if bytestr: + if lineno == 1 and bytestr.startswith("\xef\xbb\xbf"): + bytestr = bytestr[3:] + line = bytestr.decode( + _get_source_encoding(filename), "replace") + del linecache.cache[filename] + else: + line = line.decode("ascii", "replace") + if filename: + filename = filename.decode(fs_enc, "replace") + evalue = eclass(msg, (filename, lineno, offset, line)) + list.extend(traceback.format_exception_only(eclass, evalue)) + return list + sclass = eclass.__name__ + svalue = _exception_to_text(evalue) + if svalue: + list.append("%s: %s\n" % (sclass, svalue)) + elif svalue is None: + # GZ 2010-05-24: Not a great fallback message, but keep for the moment + list.append("%s: <unprintable %s object>\n" % (sclass, sclass)) + else: + list.append("%s\n" % sclass) + return list diff --git a/test/3rdparty/testtools-0.9.12/testtools/content.py b/test/3rdparty/testtools-0.9.12/testtools/content.py new file mode 100644 index 00000000000..2c6ed9f5860 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/content.py @@ -0,0 +1,238 @@ +# Copyright (c) 2009-2011 testtools developers. See LICENSE for details. + +"""Content - a MIME-like Content object.""" + +__all__ = [ + 'attach_file', + 'Content', + 'content_from_file', + 'content_from_stream', + 'text_content', + 'TracebackContent', + ] + +import codecs +import os + +from testtools import try_import +from testtools.compat import _b +from testtools.content_type import ContentType, UTF8_TEXT +from testtools.testresult import TestResult + +functools = try_import('functools') + +_join_b = _b("").join + + +DEFAULT_CHUNK_SIZE = 4096 + + +def _iter_chunks(stream, chunk_size): + """Read 'stream' in chunks of 'chunk_size'. + + :param stream: A file-like object to read from. + :param chunk_size: The size of each read from 'stream'. + """ + chunk = stream.read(chunk_size) + while chunk: + yield chunk + chunk = stream.read(chunk_size) + + +class Content(object): + """A MIME-like Content object. + + Content objects can be serialised to bytes using the iter_bytes method. + If the Content-Type is recognised by other code, they are welcome to + look for richer contents that mere byte serialisation - for example in + memory object graphs etc. However, such code MUST be prepared to receive + a generic Content object that has been reconstructed from a byte stream. + + :ivar content_type: The content type of this Content. + """ + + def __init__(self, content_type, get_bytes): + """Create a ContentType.""" + if None in (content_type, get_bytes): + raise ValueError("None not permitted in %r, %r" % ( + content_type, get_bytes)) + self.content_type = content_type + self._get_bytes = get_bytes + + def __eq__(self, other): + return (self.content_type == other.content_type and + _join_b(self.iter_bytes()) == _join_b(other.iter_bytes())) + + def iter_bytes(self): + """Iterate over bytestrings of the serialised content.""" + return self._get_bytes() + + def iter_text(self): + """Iterate over the text of the serialised content. + + This is only valid for text MIME types, and will use ISO-8859-1 if + no charset parameter is present in the MIME type. (This is somewhat + arbitrary, but consistent with RFC2617 3.7.1). + + :raises ValueError: If the content type is not text/\*. + """ + if self.content_type.type != "text": + raise ValueError("Not a text type %r" % self.content_type) + return self._iter_text() + + def _iter_text(self): + """Worker for iter_text - does the decoding.""" + encoding = self.content_type.parameters.get('charset', 'ISO-8859-1') + try: + # 2.5+ + decoder = codecs.getincrementaldecoder(encoding)() + for bytes in self.iter_bytes(): + yield decoder.decode(bytes) + final = decoder.decode(_b(''), True) + if final: + yield final + except AttributeError: + # < 2.5 + bytes = ''.join(self.iter_bytes()) + yield bytes.decode(encoding) + + def __repr__(self): + return "<Content type=%r, value=%r>" % ( + self.content_type, _join_b(self.iter_bytes())) + + +class TracebackContent(Content): + """Content object for tracebacks. + + This adapts an exc_info tuple to the Content interface. + text/x-traceback;language=python is used for the mime type, in order to + provide room for other languages to format their tracebacks differently. + """ + + def __init__(self, err, test): + """Create a TracebackContent for err.""" + if err is None: + raise ValueError("err may not be None") + content_type = ContentType('text', 'x-traceback', + {"language": "python", "charset": "utf8"}) + self._result = TestResult() + value = self._result._exc_info_to_unicode(err, test) + super(TracebackContent, self).__init__( + content_type, lambda: [value.encode("utf8")]) + + +def text_content(text): + """Create a `Content` object from some text. + + This is useful for adding details which are short strings. + """ + return Content(UTF8_TEXT, lambda: [text.encode('utf8')]) + + + +def maybe_wrap(wrapper, func): + """Merge metadata for func into wrapper if functools is present.""" + if functools is not None: + wrapper = functools.update_wrapper(wrapper, func) + return wrapper + + +def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE, + buffer_now=False): + """Create a `Content` object from a file on disk. + + Note that unless 'read_now' is explicitly passed in as True, the file + will only be read from when ``iter_bytes`` is called. + + :param path: The path to the file to be used as content. + :param content_type: The type of content. If not specified, defaults + to UTF8-encoded text/plain. + :param chunk_size: The size of chunks to read from the file. + Defaults to `DEFAULT_CHUNK_SIZE`. + :param buffer_now: If True, read the file from disk now and keep it in + memory. Otherwise, only read when the content is serialized. + """ + if content_type is None: + content_type = UTF8_TEXT + def reader(): + # This should be try:finally:, but python2.4 makes that hard. When + # We drop older python support we can make this use a context manager + # for maximum simplicity. + stream = open(path, 'rb') + for chunk in _iter_chunks(stream, chunk_size): + yield chunk + stream.close() + return content_from_reader(reader, content_type, buffer_now) + + +def content_from_stream(stream, content_type=None, + chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False): + """Create a `Content` object from a file-like stream. + + Note that the stream will only be read from when ``iter_bytes`` is + called. + + :param stream: A file-like object to read the content from. The stream + is not closed by this function or the content object it returns. + :param content_type: The type of content. If not specified, defaults + to UTF8-encoded text/plain. + :param chunk_size: The size of chunks to read from the file. + Defaults to `DEFAULT_CHUNK_SIZE`. + :param buffer_now: If True, reads from the stream right now. Otherwise, + only reads when the content is serialized. Defaults to False. + """ + if content_type is None: + content_type = UTF8_TEXT + reader = lambda: _iter_chunks(stream, chunk_size) + return content_from_reader(reader, content_type, buffer_now) + + +def content_from_reader(reader, content_type, buffer_now): + """Create a Content object that will obtain the content from reader. + + :param reader: A callback to read the content. Should return an iterable of + bytestrings. + :param content_type: The content type to create. + :param buffer_now: If True the reader is evaluated immediately and + buffered. + """ + if content_type is None: + content_type = UTF8_TEXT + if buffer_now: + contents = list(reader()) + reader = lambda: contents + return Content(content_type, reader) + + +def attach_file(detailed, path, name=None, content_type=None, + chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=True): + """Attach a file to this test as a detail. + + This is a convenience method wrapping around `addDetail`. + + Note that unless 'read_now' is explicitly passed in as True, the file + *must* exist when the test result is called with the results of this + test, after the test has been torn down. + + :param detailed: An object with details + :param path: The path to the file to attach. + :param name: The name to give to the detail for the attached file. + :param content_type: The content type of the file. If not provided, + defaults to UTF8-encoded text/plain. + :param chunk_size: The size of chunks to read from the file. Defaults + to something sensible. + :param buffer_now: If False the file content is read when the content + object is evaluated rather than when attach_file is called. + Note that this may be after any cleanups that obj_with_details has, so + if the file is a temporary file disabling buffer_now may cause the file + to be read after it is deleted. To handle those cases, using + attach_file as a cleanup is recommended because it guarantees a + sequence for when the attach_file call is made:: + + detailed.addCleanup(attach_file, 'foo.txt', detailed) + """ + if name is None: + name = os.path.basename(path) + content_object = content_from_file( + path, content_type, chunk_size, buffer_now) + detailed.addDetail(name, content_object) diff --git a/test/3rdparty/testtools-0.9.12/testtools/content_type.py b/test/3rdparty/testtools-0.9.12/testtools/content_type.py new file mode 100644 index 00000000000..82c301b38df --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/content_type.py @@ -0,0 +1,39 @@ +# Copyright (c) 2009-2011 testtools developers. See LICENSE for details. + +"""ContentType - a MIME Content Type.""" + + +class ContentType(object): + """A content type from http://www.iana.org/assignments/media-types/ + + :ivar type: The primary type, e.g. "text" or "application" + :ivar subtype: The subtype, e.g. "plain" or "octet-stream" + :ivar parameters: A dict of additional parameters specific to the + content type. + """ + + def __init__(self, primary_type, sub_type, parameters=None): + """Create a ContentType.""" + if None in (primary_type, sub_type): + raise ValueError("None not permitted in %r, %r" % ( + primary_type, sub_type)) + self.type = primary_type + self.subtype = sub_type + self.parameters = parameters or {} + + def __eq__(self, other): + if type(other) != ContentType: + return False + return self.__dict__ == other.__dict__ + + def __repr__(self): + if self.parameters: + params = '; ' + params += ', '.join( + '%s="%s"' % (k, v) for k, v in self.parameters.items()) + else: + params = '' + return "%s/%s%s" % (self.type, self.subtype, params) + + +UTF8_TEXT = ContentType('text', 'plain', {'charset': 'utf8'}) diff --git a/test/3rdparty/testtools-0.9.12/testtools/deferredruntest.py b/test/3rdparty/testtools-0.9.12/testtools/deferredruntest.py new file mode 100644 index 00000000000..b8bfaaaa39f --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/deferredruntest.py @@ -0,0 +1,336 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Individual test case execution for tests that return Deferreds. + +This module is highly experimental and is liable to change in ways that cause +subtle failures in tests. Use at your own peril. +""" + +__all__ = [ + 'assert_fails_with', + 'AsynchronousDeferredRunTest', + 'AsynchronousDeferredRunTestForBrokenTwisted', + 'SynchronousDeferredRunTest', + ] + +import sys + +from testtools.compat import StringIO +from testtools.content import ( + Content, + text_content, + ) +from testtools.content_type import UTF8_TEXT +from testtools.runtest import RunTest +from testtools._spinner import ( + extract_result, + NoResultError, + Spinner, + TimeoutError, + trap_unhandled_errors, + ) + +from twisted.internet import defer +from twisted.python import log +from twisted.trial.unittest import _LogObserver + + +class _DeferredRunTest(RunTest): + """Base for tests that return Deferreds.""" + + def _got_user_failure(self, failure, tb_label='traceback'): + """We got a failure from user code.""" + return self._got_user_exception( + (failure.type, failure.value, failure.getTracebackObject()), + tb_label=tb_label) + + +class SynchronousDeferredRunTest(_DeferredRunTest): + """Runner for tests that return synchronous Deferreds.""" + + def _run_user(self, function, *args): + d = defer.maybeDeferred(function, *args) + d.addErrback(self._got_user_failure) + result = extract_result(d) + return result + + +def run_with_log_observers(observers, function, *args, **kwargs): + """Run 'function' with the given Twisted log observers.""" + real_observers = log.theLogPublisher.observers + for observer in real_observers: + log.theLogPublisher.removeObserver(observer) + for observer in observers: + log.theLogPublisher.addObserver(observer) + try: + return function(*args, **kwargs) + finally: + for observer in observers: + log.theLogPublisher.removeObserver(observer) + for observer in real_observers: + log.theLogPublisher.addObserver(observer) + + +# Observer of the Twisted log that we install during tests. +_log_observer = _LogObserver() + + + +class AsynchronousDeferredRunTest(_DeferredRunTest): + """Runner for tests that return Deferreds that fire asynchronously. + + That is, this test runner assumes that the Deferreds will only fire if the + reactor is left to spin for a while. + + Do not rely too heavily on the nuances of the behaviour of this class. + What it does to the reactor is black magic, and if we can find nicer ways + of doing it we will gladly break backwards compatibility. + + This is highly experimental code. Use at your own risk. + """ + + def __init__(self, case, handlers=None, reactor=None, timeout=0.005, + debug=False): + """Construct an `AsynchronousDeferredRunTest`. + + :param case: The `TestCase` to run. + :param handlers: A list of exception handlers (ExceptionType, handler) + where 'handler' is a callable that takes a `TestCase`, a + ``testtools.TestResult`` and the exception raised. + :param reactor: The Twisted reactor to use. If not given, we use the + default reactor. + :param timeout: The maximum time allowed for running a test. The + default is 0.005s. + :param debug: Whether or not to enable Twisted's debugging. Use this + to get information about unhandled Deferreds and left-over + DelayedCalls. Defaults to False. + """ + super(AsynchronousDeferredRunTest, self).__init__(case, handlers) + if reactor is None: + from twisted.internet import reactor + self._reactor = reactor + self._timeout = timeout + self._debug = debug + + @classmethod + def make_factory(cls, reactor=None, timeout=0.005, debug=False): + """Make a factory that conforms to the RunTest factory interface.""" + # This is horrible, but it means that the return value of the method + # will be able to be assigned to a class variable *and* also be + # invoked directly. + class AsynchronousDeferredRunTestFactory: + def __call__(self, case, handlers=None): + return cls(case, handlers, reactor, timeout, debug) + return AsynchronousDeferredRunTestFactory() + + @defer.deferredGenerator + def _run_cleanups(self): + """Run the cleanups on the test case. + + We expect that the cleanups on the test case can also return + asynchronous Deferreds. As such, we take the responsibility for + running the cleanups, rather than letting TestCase do it. + """ + while self.case._cleanups: + f, args, kwargs = self.case._cleanups.pop() + d = defer.maybeDeferred(f, *args, **kwargs) + thing = defer.waitForDeferred(d) + yield thing + try: + thing.getResult() + except Exception: + exc_info = sys.exc_info() + self.case._report_traceback(exc_info) + last_exception = exc_info[1] + yield last_exception + + def _make_spinner(self): + """Make the `Spinner` to be used to run the tests.""" + return Spinner(self._reactor, debug=self._debug) + + def _run_deferred(self): + """Run the test, assuming everything in it is Deferred-returning. + + This should return a Deferred that fires with True if the test was + successful and False if the test was not successful. It should *not* + call addSuccess on the result, because there's reactor clean up that + we needs to be done afterwards. + """ + fails = [] + + def fail_if_exception_caught(exception_caught): + if self.exception_caught == exception_caught: + fails.append(None) + + def clean_up(ignored=None): + """Run the cleanups.""" + d = self._run_cleanups() + def clean_up_done(result): + if result is not None: + self._exceptions.append(result) + fails.append(None) + return d.addCallback(clean_up_done) + + def set_up_done(exception_caught): + """Set up is done, either clean up or run the test.""" + if self.exception_caught == exception_caught: + fails.append(None) + return clean_up() + else: + d = self._run_user(self.case._run_test_method, self.result) + d.addCallback(fail_if_exception_caught) + d.addBoth(tear_down) + return d + + def tear_down(ignored): + d = self._run_user(self.case._run_teardown, self.result) + d.addCallback(fail_if_exception_caught) + d.addBoth(clean_up) + return d + + d = self._run_user(self.case._run_setup, self.result) + d.addCallback(set_up_done) + d.addBoth(lambda ignored: len(fails) == 0) + return d + + def _log_user_exception(self, e): + """Raise 'e' and report it as a user exception.""" + try: + raise e + except e.__class__: + self._got_user_exception(sys.exc_info()) + + def _blocking_run_deferred(self, spinner): + try: + return trap_unhandled_errors( + spinner.run, self._timeout, self._run_deferred) + except NoResultError: + # We didn't get a result at all! This could be for any number of + # reasons, but most likely someone hit Ctrl-C during the test. + raise KeyboardInterrupt + except TimeoutError: + # The function took too long to run. + self._log_user_exception(TimeoutError(self.case, self._timeout)) + return False, [] + + def _run_core(self): + # Add an observer to trap all logged errors. + self.case.reactor = self._reactor + error_observer = _log_observer + full_log = StringIO() + full_observer = log.FileLogObserver(full_log) + spinner = self._make_spinner() + successful, unhandled = run_with_log_observers( + [error_observer.gotEvent, full_observer.emit], + self._blocking_run_deferred, spinner) + + self.case.addDetail( + 'twisted-log', Content(UTF8_TEXT, full_log.readlines)) + + logged_errors = error_observer.flushErrors() + for logged_error in logged_errors: + successful = False + self._got_user_failure(logged_error, tb_label='logged-error') + + if unhandled: + successful = False + for debug_info in unhandled: + f = debug_info.failResult + info = debug_info._getDebugTracebacks() + if info: + self.case.addDetail( + 'unhandled-error-in-deferred-debug', + text_content(info)) + self._got_user_failure(f, 'unhandled-error-in-deferred') + + junk = spinner.clear_junk() + if junk: + successful = False + self._log_user_exception(UncleanReactorError(junk)) + + if successful: + self.result.addSuccess(self.case, details=self.case.getDetails()) + + def _run_user(self, function, *args): + """Run a user-supplied function. + + This just makes sure that it returns a Deferred, regardless of how the + user wrote it. + """ + d = defer.maybeDeferred(function, *args) + return d.addErrback(self._got_user_failure) + + +class AsynchronousDeferredRunTestForBrokenTwisted(AsynchronousDeferredRunTest): + """Test runner that works around Twisted brokenness re reactor junk. + + There are many APIs within Twisted itself where a Deferred fires but + leaves cleanup work scheduled for the reactor to do. Arguably, many of + these are bugs. This runner iterates the reactor event loop a number of + times after every test, in order to shake out these buggy-but-commonplace + events. + """ + + def _make_spinner(self): + spinner = super( + AsynchronousDeferredRunTestForBrokenTwisted, self)._make_spinner() + spinner._OBLIGATORY_REACTOR_ITERATIONS = 2 + return spinner + + +def assert_fails_with(d, *exc_types, **kwargs): + """Assert that 'd' will fail with one of 'exc_types'. + + The normal way to use this is to return the result of 'assert_fails_with' + from your unit test. + + Note that this function is experimental and unstable. Use at your own + peril; expect the API to change. + + :param d: A Deferred that is expected to fail. + :param exc_types: The exception types that the Deferred is expected to + fail with. + :param failureException: An optional keyword argument. If provided, will + raise that exception instead of + ``testtools.TestCase.failureException``. + :return: A Deferred that will fail with an ``AssertionError`` if 'd' does + not fail with one of the exception types. + """ + failureException = kwargs.pop('failureException', None) + if failureException is None: + # Avoid circular imports. + from testtools import TestCase + failureException = TestCase.failureException + expected_names = ", ".join(exc_type.__name__ for exc_type in exc_types) + def got_success(result): + raise failureException( + "%s not raised (%r returned)" % (expected_names, result)) + def got_failure(failure): + if failure.check(*exc_types): + return failure.value + raise failureException("%s raised instead of %s:\n %s" % ( + failure.type.__name__, expected_names, failure.getTraceback())) + return d.addCallbacks(got_success, got_failure) + + +def flush_logged_errors(*error_types): + return _log_observer.flushErrors(*error_types) + + +class UncleanReactorError(Exception): + """Raised when the reactor has junk in it.""" + + def __init__(self, junk): + Exception.__init__(self, + "The reactor still thinks it needs to do things. Close all " + "connections, kill all processes and make sure all delayed " + "calls have either fired or been cancelled:\n%s" + % ''.join(map(self._get_junk_info, junk))) + + def _get_junk_info(self, junk): + from twisted.internet.base import DelayedCall + if isinstance(junk, DelayedCall): + ret = str(junk) + else: + ret = repr(junk) + return ' %s\n' % (ret,) diff --git a/test/3rdparty/testtools-0.9.12/testtools/distutilscmd.py b/test/3rdparty/testtools-0.9.12/testtools/distutilscmd.py new file mode 100644 index 00000000000..91e14ca504f --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/distutilscmd.py @@ -0,0 +1,62 @@ +# Copyright (c) 2010-2011 testtools developers . See LICENSE for details. + +"""Extensions to the standard Python unittest library.""" + +import sys + +from distutils.core import Command +from distutils.errors import DistutilsOptionError + +from testtools.run import TestProgram, TestToolsTestRunner + + +class TestCommand(Command): + """Command to run unit tests with testtools""" + + description = "run unit tests with testtools" + + user_options = [ + ('catch', 'c', "Catch ctrl-C and display results so far"), + ('buffer', 'b', "Buffer stdout and stderr during tests"), + ('failfast', 'f', "Stop on first fail or error"), + ('test-module=','m', "Run 'test_suite' in specified module"), + ('test-suite=','s', + "Test suite to run (e.g. 'some_module.test_suite')") + ] + + def __init__(self, dist): + Command.__init__(self, dist) + self.runner = TestToolsTestRunner(sys.stdout) + + + def initialize_options(self): + self.test_suite = None + self.test_module = None + self.catch = None + self.buffer = None + self.failfast = None + + def finalize_options(self): + if self.test_suite is None: + if self.test_module is None: + raise DistutilsOptionError( + "You must specify a module or a suite to run tests from") + else: + self.test_suite = self.test_module+".test_suite" + elif self.test_module: + raise DistutilsOptionError( + "You may specify a module or a suite, but not both") + self.test_args = [self.test_suite] + if self.verbose: + self.test_args.insert(0, '--verbose') + if self.buffer: + self.test_args.insert(0, '--buffer') + if self.catch: + self.test_args.insert(0, '--catch') + if self.failfast: + self.test_args.insert(0, '--failfast') + + def run(self): + self.program = TestProgram( + argv=self.test_args, testRunner=self.runner, stdout=sys.stdout, + exit=False) diff --git a/test/3rdparty/testtools-0.9.12/testtools/helpers.py b/test/3rdparty/testtools-0.9.12/testtools/helpers.py new file mode 100644 index 00000000000..dbf66719edf --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/helpers.py @@ -0,0 +1,87 @@ +# Copyright (c) 2010-2011 testtools developers. See LICENSE for details. + +__all__ = [ + 'safe_hasattr', + 'try_import', + 'try_imports', + ] + +import sys + + +def try_import(name, alternative=None, error_callback=None): + """Attempt to import ``name``. If it fails, return ``alternative``. + + When supporting multiple versions of Python or optional dependencies, it + is useful to be able to try to import a module. + + :param name: The name of the object to import, e.g. ``os.path`` or + ``os.path.join``. + :param alternative: The value to return if no module can be imported. + Defaults to None. + :param error_callback: If non-None, a callable that is passed the ImportError + when the module cannot be loaded. + """ + module_segments = name.split('.') + last_error = None + while module_segments: + module_name = '.'.join(module_segments) + try: + module = __import__(module_name) + except ImportError: + last_error = sys.exc_info()[1] + module_segments.pop() + continue + else: + break + else: + if last_error is not None and error_callback is not None: + error_callback(last_error) + return alternative + nonexistent = object() + for segment in name.split('.')[1:]: + module = getattr(module, segment, nonexistent) + if module is nonexistent: + if last_error is not None and error_callback is not None: + error_callback(last_error) + return alternative + return module + + +_RAISE_EXCEPTION = object() +def try_imports(module_names, alternative=_RAISE_EXCEPTION, error_callback=None): + """Attempt to import modules. + + Tries to import the first module in ``module_names``. If it can be + imported, we return it. If not, we go on to the second module and try + that. The process continues until we run out of modules to try. If none + of the modules can be imported, either raise an exception or return the + provided ``alternative`` value. + + :param module_names: A sequence of module names to try to import. + :param alternative: The value to return if no module can be imported. + If unspecified, we raise an ImportError. + :param error_callback: If None, called with the ImportError for *each* + module that fails to load. + :raises ImportError: If none of the modules can be imported and no + alternative value was specified. + """ + module_names = list(module_names) + for module_name in module_names: + module = try_import(module_name, error_callback=error_callback) + if module: + return module + if alternative is _RAISE_EXCEPTION: + raise ImportError( + "Could not import any of: %s" % ', '.join(module_names)) + return alternative + + +def safe_hasattr(obj, attr, _marker=object()): + """Does 'obj' have an attribute 'attr'? + + Use this rather than built-in hasattr, as the built-in swallows exceptions + in some versions of Python and behaves unpredictably with respect to + properties. + """ + return getattr(obj, attr, _marker) is not _marker diff --git a/test/3rdparty/testtools-0.9.12/testtools/matchers.py b/test/3rdparty/testtools-0.9.12/testtools/matchers.py new file mode 100644 index 00000000000..693a20befa5 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/matchers.py @@ -0,0 +1,1059 @@ +# Copyright (c) 2009-2011 testtools developers. See LICENSE for details. + +"""Matchers, a way to express complex assertions outside the testcase. + +Inspired by 'hamcrest'. + +Matcher provides the abstract API that all matchers need to implement. + +Bundled matchers are listed in __all__: a list can be obtained by running +$ python -c 'import testtools.matchers; print testtools.matchers.__all__' +""" + +__metaclass__ = type +__all__ = [ + 'AfterPreprocessing', + 'AllMatch', + 'Annotate', + 'Contains', + 'DocTestMatches', + 'EndsWith', + 'Equals', + 'GreaterThan', + 'Is', + 'IsInstance', + 'KeysEqual', + 'LessThan', + 'MatchesAll', + 'MatchesAny', + 'MatchesException', + 'MatchesListwise', + 'MatchesRegex', + 'MatchesSetwise', + 'MatchesStructure', + 'NotEquals', + 'Not', + 'Raises', + 'raises', + 'StartsWith', + ] + +import doctest +import operator +from pprint import pformat +import re +import sys +import types + +from testtools.compat import ( + classtypes, + _error_repr, + isbaseexception, + _isbytes, + istext, + str_is_unicode, + text_repr + ) + + +class Matcher(object): + """A pattern matcher. + + A Matcher must implement match and __str__ to be used by + testtools.TestCase.assertThat. Matcher.match(thing) returns None when + thing is completely matched, and a Mismatch object otherwise. + + Matchers can be useful outside of test cases, as they are simply a + pattern matching language expressed as objects. + + testtools.matchers is inspired by hamcrest, but is pythonic rather than + a Java transcription. + """ + + def match(self, something): + """Return None if this matcher matches something, a Mismatch otherwise. + """ + raise NotImplementedError(self.match) + + def __str__(self): + """Get a sensible human representation of the matcher. + + This should include the parameters given to the matcher and any + state that would affect the matches operation. + """ + raise NotImplementedError(self.__str__) + + +class Mismatch(object): + """An object describing a mismatch detected by a Matcher.""" + + def __init__(self, description=None, details=None): + """Construct a `Mismatch`. + + :param description: A description to use. If not provided, + `Mismatch.describe` must be implemented. + :param details: Extra details about the mismatch. Defaults + to the empty dict. + """ + if description: + self._description = description + if details is None: + details = {} + self._details = details + + def describe(self): + """Describe the mismatch. + + This should be either a human-readable string or castable to a string. + In particular, is should either be plain ascii or unicode on Python 2, + and care should be taken to escape control characters. + """ + try: + return self._description + except AttributeError: + raise NotImplementedError(self.describe) + + def get_details(self): + """Get extra details about the mismatch. + + This allows the mismatch to provide extra information beyond the basic + description, including large text or binary files, or debugging internals + without having to force it to fit in the output of 'describe'. + + The testtools assertion assertThat will query get_details and attach + all its values to the test, permitting them to be reported in whatever + manner the test environment chooses. + + :return: a dict mapping names to Content objects. name is a string to + name the detail, and the Content object is the detail to add + to the result. For more information see the API to which items from + this dict are passed testtools.TestCase.addDetail. + """ + return getattr(self, '_details', {}) + + def __repr__(self): + return "<testtools.matchers.Mismatch object at %x attributes=%r>" % ( + id(self), self.__dict__) + + +class MismatchError(AssertionError): + """Raised when a mismatch occurs.""" + + # This class exists to work around + # <https://bugs.launchpad.net/testtools/+bug/804127>. It provides a + # guaranteed way of getting a readable exception, no matter what crazy + # characters are in the matchee, matcher or mismatch. + + def __init__(self, matchee, matcher, mismatch, verbose=False): + # Have to use old-style upcalling for Python 2.4 and 2.5 + # compatibility. + AssertionError.__init__(self) + self.matchee = matchee + self.matcher = matcher + self.mismatch = mismatch + self.verbose = verbose + + def __str__(self): + difference = self.mismatch.describe() + if self.verbose: + # GZ 2011-08-24: Smelly API? Better to take any object and special + # case text inside? + if istext(self.matchee) or _isbytes(self.matchee): + matchee = text_repr(self.matchee, multiline=False) + else: + matchee = repr(self.matchee) + return ( + 'Match failed. Matchee: %s\nMatcher: %s\nDifference: %s\n' + % (matchee, self.matcher, difference)) + else: + return difference + + if not str_is_unicode: + + __unicode__ = __str__ + + def __str__(self): + return self.__unicode__().encode("ascii", "backslashreplace") + + +class MismatchDecorator(object): + """Decorate a ``Mismatch``. + + Forwards all messages to the original mismatch object. Probably the best + way to use this is inherit from this class and then provide your own + custom decoration logic. + """ + + def __init__(self, original): + """Construct a `MismatchDecorator`. + + :param original: A `Mismatch` object to decorate. + """ + self.original = original + + def __repr__(self): + return '<testtools.matchers.MismatchDecorator(%r)>' % (self.original,) + + def describe(self): + return self.original.describe() + + def get_details(self): + return self.original.get_details() + + +class _NonManglingOutputChecker(doctest.OutputChecker): + """Doctest checker that works with unicode rather than mangling strings + + This is needed because current Python versions have tried to fix string + encoding related problems, but regressed the default behaviour with unicode + inputs in the process. + + In Python 2.6 and 2.7 `OutputChecker.output_difference` is was changed to + return a bytestring encoded as per `sys.stdout.encoding`, or utf-8 if that + can't be determined. Worse, that encoding process happens in the innocent + looking `_indent` global function. Because the `DocTestMismatch.describe` + result may well not be destined for printing to stdout, this is no good + for us. To get a unicode return as before, the method is monkey patched if + `doctest._encoding` exists. + + Python 3 has a different problem. For some reason both inputs are encoded + to ascii with 'backslashreplace', making an escaped string matches its + unescaped form. Overriding the offending `OutputChecker._toAscii` method + is sufficient to revert this. + """ + + def _toAscii(self, s): + """Return `s` unchanged rather than mangling it to ascii""" + return s + + # Only do this overriding hackery if doctest has a broken _input function + if getattr(doctest, "_encoding", None) is not None: + from types import FunctionType as __F + __f = doctest.OutputChecker.output_difference.im_func + __g = dict(__f.func_globals) + def _indent(s, indent=4, _pattern=re.compile("^(?!$)", re.MULTILINE)): + """Prepend non-empty lines in `s` with `indent` number of spaces""" + return _pattern.sub(indent*" ", s) + __g["_indent"] = _indent + output_difference = __F(__f.func_code, __g, "output_difference") + del __F, __f, __g, _indent + + +class DocTestMatches(object): + """See if a string matches a doctest example.""" + + def __init__(self, example, flags=0): + """Create a DocTestMatches to match example. + + :param example: The example to match e.g. 'foo bar baz' + :param flags: doctest comparison flags to match on. e.g. + doctest.ELLIPSIS. + """ + if not example.endswith('\n'): + example += '\n' + self.want = example # required variable name by doctest. + self.flags = flags + self._checker = _NonManglingOutputChecker() + + def __str__(self): + if self.flags: + flagstr = ", flags=%d" % self.flags + else: + flagstr = "" + return 'DocTestMatches(%r%s)' % (self.want, flagstr) + + def _with_nl(self, actual): + result = self.want.__class__(actual) + if not result.endswith('\n'): + result += '\n' + return result + + def match(self, actual): + with_nl = self._with_nl(actual) + if self._checker.check_output(self.want, with_nl, self.flags): + return None + return DocTestMismatch(self, with_nl) + + def _describe_difference(self, with_nl): + return self._checker.output_difference(self, with_nl, self.flags) + + +class DocTestMismatch(Mismatch): + """Mismatch object for DocTestMatches.""" + + def __init__(self, matcher, with_nl): + self.matcher = matcher + self.with_nl = with_nl + + def describe(self): + s = self.matcher._describe_difference(self.with_nl) + if str_is_unicode or isinstance(s, unicode): + return s + # GZ 2011-08-24: This is actually pretty bogus, most C0 codes should + # be escaped, in addition to non-ascii bytes. + return s.decode("latin1").encode("ascii", "backslashreplace") + + +class DoesNotContain(Mismatch): + + def __init__(self, matchee, needle): + """Create a DoesNotContain Mismatch. + + :param matchee: the object that did not contain needle. + :param needle: the needle that 'matchee' was expected to contain. + """ + self.matchee = matchee + self.needle = needle + + def describe(self): + return "%r not in %r" % (self.needle, self.matchee) + + +class DoesNotStartWith(Mismatch): + + def __init__(self, matchee, expected): + """Create a DoesNotStartWith Mismatch. + + :param matchee: the string that did not match. + :param expected: the string that 'matchee' was expected to start with. + """ + self.matchee = matchee + self.expected = expected + + def describe(self): + return "%s does not start with %s." % ( + text_repr(self.matchee), text_repr(self.expected)) + + +class DoesNotEndWith(Mismatch): + + def __init__(self, matchee, expected): + """Create a DoesNotEndWith Mismatch. + + :param matchee: the string that did not match. + :param expected: the string that 'matchee' was expected to end with. + """ + self.matchee = matchee + self.expected = expected + + def describe(self): + return "%s does not end with %s." % ( + text_repr(self.matchee), text_repr(self.expected)) + + +class _BinaryComparison(object): + """Matcher that compares an object to another object.""" + + def __init__(self, expected): + self.expected = expected + + def __str__(self): + return "%s(%r)" % (self.__class__.__name__, self.expected) + + def match(self, other): + if self.comparator(other, self.expected): + return None + return _BinaryMismatch(self.expected, self.mismatch_string, other) + + def comparator(self, expected, other): + raise NotImplementedError(self.comparator) + + +class _BinaryMismatch(Mismatch): + """Two things did not match.""" + + def __init__(self, expected, mismatch_string, other): + self.expected = expected + self._mismatch_string = mismatch_string + self.other = other + + def _format(self, thing): + # Blocks of text with newlines are formatted as triple-quote + # strings. Everything else is pretty-printed. + if istext(thing) or _isbytes(thing): + return text_repr(thing) + return pformat(thing) + + def describe(self): + left = repr(self.expected) + right = repr(self.other) + if len(left) + len(right) > 70: + return "%s:\nreference = %s\nactual = %s\n" % ( + self._mismatch_string, self._format(self.expected), + self._format(self.other)) + else: + return "%s %s %s" % (left, self._mismatch_string, right) + + +class Equals(_BinaryComparison): + """Matches if the items are equal.""" + + comparator = operator.eq + mismatch_string = '!=' + + +class NotEquals(_BinaryComparison): + """Matches if the items are not equal. + + In most cases, this is equivalent to ``Not(Equals(foo))``. The difference + only matters when testing ``__ne__`` implementations. + """ + + comparator = operator.ne + mismatch_string = '==' + + +class Is(_BinaryComparison): + """Matches if the items are identical.""" + + comparator = operator.is_ + mismatch_string = 'is not' + + +class IsInstance(object): + """Matcher that wraps isinstance.""" + + def __init__(self, *types): + self.types = tuple(types) + + def __str__(self): + return "%s(%s)" % (self.__class__.__name__, + ', '.join(type.__name__ for type in self.types)) + + def match(self, other): + if isinstance(other, self.types): + return None + return NotAnInstance(other, self.types) + + +class NotAnInstance(Mismatch): + + def __init__(self, matchee, types): + """Create a NotAnInstance Mismatch. + + :param matchee: the thing which is not an instance of any of types. + :param types: A tuple of the types which were expected. + """ + self.matchee = matchee + self.types = types + + def describe(self): + if len(self.types) == 1: + typestr = self.types[0].__name__ + else: + typestr = 'any of (%s)' % ', '.join(type.__name__ for type in + self.types) + return "'%s' is not an instance of %s" % (self.matchee, typestr) + + +class LessThan(_BinaryComparison): + """Matches if the item is less than the matchers reference object.""" + + comparator = operator.__lt__ + mismatch_string = 'is not >' + + +class GreaterThan(_BinaryComparison): + """Matches if the item is greater than the matchers reference object.""" + + comparator = operator.__gt__ + mismatch_string = 'is not <' + + +class MatchesAny(object): + """Matches if any of the matchers it is created with match.""" + + def __init__(self, *matchers): + self.matchers = matchers + + def match(self, matchee): + results = [] + for matcher in self.matchers: + mismatch = matcher.match(matchee) + if mismatch is None: + return None + results.append(mismatch) + return MismatchesAll(results) + + def __str__(self): + return "MatchesAny(%s)" % ', '.join([ + str(matcher) for matcher in self.matchers]) + + +class MatchesAll(object): + """Matches if all of the matchers it is created with match.""" + + def __init__(self, *matchers): + self.matchers = matchers + + def __str__(self): + return 'MatchesAll(%s)' % ', '.join(map(str, self.matchers)) + + def match(self, matchee): + results = [] + for matcher in self.matchers: + mismatch = matcher.match(matchee) + if mismatch is not None: + results.append(mismatch) + if results: + return MismatchesAll(results) + else: + return None + + +class MismatchesAll(Mismatch): + """A mismatch with many child mismatches.""" + + def __init__(self, mismatches): + self.mismatches = mismatches + + def describe(self): + descriptions = ["Differences: ["] + for mismatch in self.mismatches: + descriptions.append(mismatch.describe()) + descriptions.append("]") + return '\n'.join(descriptions) + + +class Not(object): + """Inverts a matcher.""" + + def __init__(self, matcher): + self.matcher = matcher + + def __str__(self): + return 'Not(%s)' % (self.matcher,) + + def match(self, other): + mismatch = self.matcher.match(other) + if mismatch is None: + return MatchedUnexpectedly(self.matcher, other) + else: + return None + + +class MatchedUnexpectedly(Mismatch): + """A thing matched when it wasn't supposed to.""" + + def __init__(self, matcher, other): + self.matcher = matcher + self.other = other + + def describe(self): + return "%r matches %s" % (self.other, self.matcher) + + +class MatchesException(Matcher): + """Match an exc_info tuple against an exception instance or type.""" + + def __init__(self, exception, value_re=None): + """Create a MatchesException that will match exc_info's for exception. + + :param exception: Either an exception instance or type. + If an instance is given, the type and arguments of the exception + are checked. If a type is given only the type of the exception is + checked. If a tuple is given, then as with isinstance, any of the + types in the tuple matching is sufficient to match. + :param value_re: If 'exception' is a type, and the matchee exception + is of the right type, then match against this. If value_re is a + string, then assume value_re is a regular expression and match + the str() of the exception against it. Otherwise, assume value_re + is a matcher, and match the exception against it. + """ + Matcher.__init__(self) + self.expected = exception + if istext(value_re): + value_re = AfterPreproccessing(str, MatchesRegex(value_re), False) + self.value_re = value_re + self._is_instance = type(self.expected) not in classtypes() + (tuple,) + + def match(self, other): + if type(other) != tuple: + return Mismatch('%r is not an exc_info tuple' % other) + expected_class = self.expected + if self._is_instance: + expected_class = expected_class.__class__ + if not issubclass(other[0], expected_class): + return Mismatch('%r is not a %r' % (other[0], expected_class)) + if self._is_instance: + if other[1].args != self.expected.args: + return Mismatch('%s has different arguments to %s.' % ( + _error_repr(other[1]), _error_repr(self.expected))) + elif self.value_re is not None: + return self.value_re.match(other[1]) + + def __str__(self): + if self._is_instance: + return "MatchesException(%s)" % _error_repr(self.expected) + return "MatchesException(%s)" % repr(self.expected) + + +class Contains(Matcher): + """Checks whether something is container in another thing.""" + + def __init__(self, needle): + """Create a Contains Matcher. + + :param needle: the thing that needs to be contained by matchees. + """ + self.needle = needle + + def __str__(self): + return "Contains(%r)" % (self.needle,) + + def match(self, matchee): + try: + if self.needle not in matchee: + return DoesNotContain(matchee, self.needle) + except TypeError: + # e.g. 1 in 2 will raise TypeError + return DoesNotContain(matchee, self.needle) + return None + + +class StartsWith(Matcher): + """Checks whether one string starts with another.""" + + def __init__(self, expected): + """Create a StartsWith Matcher. + + :param expected: the string that matchees should start with. + """ + self.expected = expected + + def __str__(self): + return "StartsWith(%r)" % (self.expected,) + + def match(self, matchee): + if not matchee.startswith(self.expected): + return DoesNotStartWith(matchee, self.expected) + return None + + +class EndsWith(Matcher): + """Checks whether one string starts with another.""" + + def __init__(self, expected): + """Create a EndsWith Matcher. + + :param expected: the string that matchees should end with. + """ + self.expected = expected + + def __str__(self): + return "EndsWith(%r)" % (self.expected,) + + def match(self, matchee): + if not matchee.endswith(self.expected): + return DoesNotEndWith(matchee, self.expected) + return None + + +class KeysEqual(Matcher): + """Checks whether a dict has particular keys.""" + + def __init__(self, *expected): + """Create a `KeysEqual` Matcher. + + :param expected: The keys the dict is expected to have. If a dict, + then we use the keys of that dict, if a collection, we assume it + is a collection of expected keys. + """ + try: + self.expected = expected.keys() + except AttributeError: + self.expected = list(expected) + + def __str__(self): + return "KeysEqual(%s)" % ', '.join(map(repr, self.expected)) + + def match(self, matchee): + expected = sorted(self.expected) + matched = Equals(expected).match(sorted(matchee.keys())) + if matched: + return AnnotatedMismatch( + 'Keys not equal', + _BinaryMismatch(expected, 'does not match', matchee)) + return None + + +class Annotate(object): + """Annotates a matcher with a descriptive string. + + Mismatches are then described as '<mismatch>: <annotation>'. + """ + + def __init__(self, annotation, matcher): + self.annotation = annotation + self.matcher = matcher + + @classmethod + def if_message(cls, annotation, matcher): + """Annotate ``matcher`` only if ``annotation`` is non-empty.""" + if not annotation: + return matcher + return cls(annotation, matcher) + + def __str__(self): + return 'Annotate(%r, %s)' % (self.annotation, self.matcher) + + def match(self, other): + mismatch = self.matcher.match(other) + if mismatch is not None: + return AnnotatedMismatch(self.annotation, mismatch) + + +class AnnotatedMismatch(MismatchDecorator): + """A mismatch annotated with a descriptive string.""" + + def __init__(self, annotation, mismatch): + super(AnnotatedMismatch, self).__init__(mismatch) + self.annotation = annotation + self.mismatch = mismatch + + def describe(self): + return '%s: %s' % (self.original.describe(), self.annotation) + + +class Raises(Matcher): + """Match if the matchee raises an exception when called. + + Exceptions which are not subclasses of Exception propogate out of the + Raises.match call unless they are explicitly matched. + """ + + def __init__(self, exception_matcher=None): + """Create a Raises matcher. + + :param exception_matcher: Optional validator for the exception raised + by matchee. If supplied the exc_info tuple for the exception raised + is passed into that matcher. If no exception_matcher is supplied + then the simple fact of raising an exception is considered enough + to match on. + """ + self.exception_matcher = exception_matcher + + def match(self, matchee): + try: + result = matchee() + return Mismatch('%r returned %r' % (matchee, result)) + # Catch all exceptions: Raises() should be able to match a + # KeyboardInterrupt or SystemExit. + except: + exc_info = sys.exc_info() + if self.exception_matcher: + mismatch = self.exception_matcher.match(exc_info) + if not mismatch: + del exc_info + return + else: + mismatch = None + # The exception did not match, or no explicit matching logic was + # performed. If the exception is a non-user exception (that is, not + # a subclass of Exception on Python 2.5+) then propogate it. + if isbaseexception(exc_info[1]): + del exc_info + raise + return mismatch + + def __str__(self): + return 'Raises()' + + +def raises(exception): + """Make a matcher that checks that a callable raises an exception. + + This is a convenience function, exactly equivalent to:: + + return Raises(MatchesException(exception)) + + See `Raises` and `MatchesException` for more information. + """ + return Raises(MatchesException(exception)) + + +class MatchesListwise(object): + """Matches if each matcher matches the corresponding value. + + More easily explained by example than in words: + + >>> MatchesListwise([Equals(1)]).match([1]) + >>> MatchesListwise([Equals(1), Equals(2)]).match([1, 2]) + >>> print (MatchesListwise([Equals(1), Equals(2)]).match([2, 1]).describe()) + Differences: [ + 1 != 2 + 2 != 1 + ] + """ + + def __init__(self, matchers): + self.matchers = matchers + + def match(self, values): + mismatches = [] + length_mismatch = Annotate( + "Length mismatch", Equals(len(self.matchers))).match(len(values)) + if length_mismatch: + mismatches.append(length_mismatch) + for matcher, value in zip(self.matchers, values): + mismatch = matcher.match(value) + if mismatch: + mismatches.append(mismatch) + if mismatches: + return MismatchesAll(mismatches) + + +class MatchesStructure(object): + """Matcher that matches an object structurally. + + 'Structurally' here means that attributes of the object being matched are + compared against given matchers. + + `fromExample` allows the creation of a matcher from a prototype object and + then modified versions can be created with `update`. + + `byEquality` creates a matcher in much the same way as the constructor, + except that the matcher for each of the attributes is assumed to be + `Equals`. + + `byMatcher` creates a similar matcher to `byEquality`, but you get to pick + the matcher, rather than just using `Equals`. + """ + + def __init__(self, **kwargs): + """Construct a `MatchesStructure`. + + :param kwargs: A mapping of attributes to matchers. + """ + self.kws = kwargs + + @classmethod + def byEquality(cls, **kwargs): + """Matches an object where the attributes equal the keyword values. + + Similar to the constructor, except that the matcher is assumed to be + Equals. + """ + return cls.byMatcher(Equals, **kwargs) + + @classmethod + def byMatcher(cls, matcher, **kwargs): + """Matches an object where the attributes match the keyword values. + + Similar to the constructor, except that the provided matcher is used + to match all of the values. + """ + return cls( + **dict((name, matcher(value)) for name, value in kwargs.items())) + + @classmethod + def fromExample(cls, example, *attributes): + kwargs = {} + for attr in attributes: + kwargs[attr] = Equals(getattr(example, attr)) + return cls(**kwargs) + + def update(self, **kws): + new_kws = self.kws.copy() + for attr, matcher in kws.items(): + if matcher is None: + new_kws.pop(attr, None) + else: + new_kws[attr] = matcher + return type(self)(**new_kws) + + def __str__(self): + kws = [] + for attr, matcher in sorted(self.kws.items()): + kws.append("%s=%s" % (attr, matcher)) + return "%s(%s)" % (self.__class__.__name__, ', '.join(kws)) + + def match(self, value): + matchers = [] + values = [] + for attr, matcher in sorted(self.kws.items()): + matchers.append(Annotate(attr, matcher)) + values.append(getattr(value, attr)) + return MatchesListwise(matchers).match(values) + + +class MatchesRegex(object): + """Matches if the matchee is matched by a regular expression.""" + + def __init__(self, pattern, flags=0): + self.pattern = pattern + self.flags = flags + + def __str__(self): + args = ['%r' % self.pattern] + flag_arg = [] + # dir() sorts the attributes for us, so we don't need to do it again. + for flag in dir(re): + if len(flag) == 1: + if self.flags & getattr(re, flag): + flag_arg.append('re.%s' % flag) + if flag_arg: + args.append('|'.join(flag_arg)) + return '%s(%s)' % (self.__class__.__name__, ', '.join(args)) + + def match(self, value): + if not re.match(self.pattern, value, self.flags): + pattern = self.pattern + if not isinstance(pattern, str_is_unicode and str or unicode): + pattern = pattern.decode("latin1") + pattern = pattern.encode("unicode_escape").decode("ascii") + return Mismatch("%r does not match /%s/" % ( + value, pattern.replace("\\\\", "\\"))) + + +class MatchesSetwise(object): + """Matches if all the matchers match elements of the value being matched. + + That is, each element in the 'observed' set must match exactly one matcher + from the set of matchers, with no matchers left over. + + The difference compared to `MatchesListwise` is that the order of the + matchings does not matter. + """ + + def __init__(self, *matchers): + self.matchers = matchers + + def match(self, observed): + remaining_matchers = set(self.matchers) + not_matched = [] + for value in observed: + for matcher in remaining_matchers: + if matcher.match(value) is None: + remaining_matchers.remove(matcher) + break + else: + not_matched.append(value) + if not_matched or remaining_matchers: + remaining_matchers = list(remaining_matchers) + # There are various cases that all should be reported somewhat + # differently. + + # There are two trivial cases: + # 1) There are just some matchers left over. + # 2) There are just some values left over. + + # Then there are three more interesting cases: + # 3) There are the same number of matchers and values left over. + # 4) There are more matchers left over than values. + # 5) There are more values left over than matchers. + + if len(not_matched) == 0: + if len(remaining_matchers) > 1: + msg = "There were %s matchers left over: " % ( + len(remaining_matchers),) + else: + msg = "There was 1 matcher left over: " + msg += ', '.join(map(str, remaining_matchers)) + return Mismatch(msg) + elif len(remaining_matchers) == 0: + if len(not_matched) > 1: + return Mismatch( + "There were %s values left over: %s" % ( + len(not_matched), not_matched)) + else: + return Mismatch( + "There was 1 value left over: %s" % ( + not_matched, )) + else: + common_length = min(len(remaining_matchers), len(not_matched)) + if common_length == 0: + raise AssertionError("common_length can't be 0 here") + if common_length > 1: + msg = "There were %s mismatches" % (common_length,) + else: + msg = "There was 1 mismatch" + if len(remaining_matchers) > len(not_matched): + extra_matchers = remaining_matchers[common_length:] + msg += " and %s extra matcher" % (len(extra_matchers), ) + if len(extra_matchers) > 1: + msg += "s" + msg += ': ' + ', '.join(map(str, extra_matchers)) + elif len(not_matched) > len(remaining_matchers): + extra_values = not_matched[common_length:] + msg += " and %s extra value" % (len(extra_values), ) + if len(extra_values) > 1: + msg += "s" + msg += ': ' + str(extra_values) + return Annotate( + msg, MatchesListwise(remaining_matchers[:common_length]) + ).match(not_matched[:common_length]) + + +class AfterPreprocessing(object): + """Matches if the value matches after passing through a function. + + This can be used to aid in creating trivial matchers as functions, for + example:: + + def PathHasFileContent(content): + def _read(path): + return open(path).read() + return AfterPreprocessing(_read, Equals(content)) + """ + + def __init__(self, preprocessor, matcher, annotate=True): + """Create an AfterPreprocessing matcher. + + :param preprocessor: A function called with the matchee before + matching. + :param matcher: What to match the preprocessed matchee against. + :param annotate: Whether or not to annotate the matcher with + something explaining how we transformed the matchee. Defaults + to True. + """ + self.preprocessor = preprocessor + self.matcher = matcher + self.annotate = annotate + + def _str_preprocessor(self): + if isinstance(self.preprocessor, types.FunctionType): + return '<function %s>' % self.preprocessor.__name__ + return str(self.preprocessor) + + def __str__(self): + return "AfterPreprocessing(%s, %s)" % ( + self._str_preprocessor(), self.matcher) + + def match(self, value): + after = self.preprocessor(value) + if self.annotate: + matcher = Annotate( + "after %s on %r" % (self._str_preprocessor(), value), + self.matcher) + else: + matcher = self.matcher + return matcher.match(after) + +# This is the old, deprecated. spelling of the name, kept for backwards +# compatibility. +AfterPreproccessing = AfterPreprocessing + + +class AllMatch(object): + """Matches if all provided values match the given matcher.""" + + def __init__(self, matcher): + self.matcher = matcher + + def __str__(self): + return 'AllMatch(%s)' % (self.matcher,) + + def match(self, values): + mismatches = [] + for value in values: + mismatch = self.matcher.match(value) + if mismatch: + mismatches.append(mismatch) + if mismatches: + return MismatchesAll(mismatches) + + +# Signal that this is part of the testing framework, and that code from this +# should not normally appear in tracebacks. +__unittest = True diff --git a/test/3rdparty/testtools-0.9.12/testtools/monkey.py b/test/3rdparty/testtools-0.9.12/testtools/monkey.py new file mode 100644 index 00000000000..ba0ac8fd8bf --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/monkey.py @@ -0,0 +1,97 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Helpers for monkey-patching Python code.""" + +__all__ = [ + 'MonkeyPatcher', + 'patch', + ] + + +class MonkeyPatcher(object): + """A set of monkey-patches that can be applied and removed all together. + + Use this to cover up attributes with new objects. Particularly useful for + testing difficult code. + """ + + # Marker used to indicate that the patched attribute did not exist on the + # object before we patched it. + _NO_SUCH_ATTRIBUTE = object() + + def __init__(self, *patches): + """Construct a `MonkeyPatcher`. + + :param patches: The patches to apply, each should be (obj, name, + new_value). Providing patches here is equivalent to calling + `add_patch`. + """ + # List of patches to apply in (obj, name, value). + self._patches_to_apply = [] + # List of the original values for things that have been patched. + # (obj, name, value) format. + self._originals = [] + for patch in patches: + self.add_patch(*patch) + + def add_patch(self, obj, name, value): + """Add a patch to overwrite 'name' on 'obj' with 'value'. + + The attribute C{name} on C{obj} will be assigned to C{value} when + C{patch} is called or during C{run_with_patches}. + + You can restore the original values with a call to restore(). + """ + self._patches_to_apply.append((obj, name, value)) + + def patch(self): + """Apply all of the patches that have been specified with `add_patch`. + + Reverse this operation using L{restore}. + """ + for obj, name, value in self._patches_to_apply: + original_value = getattr(obj, name, self._NO_SUCH_ATTRIBUTE) + self._originals.append((obj, name, original_value)) + setattr(obj, name, value) + + def restore(self): + """Restore all original values to any patched objects. + + If the patched attribute did not exist on an object before it was + patched, `restore` will delete the attribute so as to return the + object to its original state. + """ + while self._originals: + obj, name, value = self._originals.pop() + if value is self._NO_SUCH_ATTRIBUTE: + delattr(obj, name) + else: + setattr(obj, name, value) + + def run_with_patches(self, f, *args, **kw): + """Run 'f' with the given args and kwargs with all patches applied. + + Restores all objects to their original state when finished. + """ + self.patch() + try: + return f(*args, **kw) + finally: + self.restore() + + +def patch(obj, attribute, value): + """Set 'obj.attribute' to 'value' and return a callable to restore 'obj'. + + If 'attribute' is not set on 'obj' already, then the returned callable + will delete the attribute when called. + + :param obj: An object to monkey-patch. + :param attribute: The name of the attribute to patch. + :param value: The value to set 'obj.attribute' to. + :return: A nullary callable that, when run, will restore 'obj' to its + original state. + """ + patcher = MonkeyPatcher((obj, attribute, value)) + patcher.patch() + return patcher.restore diff --git a/test/3rdparty/testtools-0.9.12/testtools/run.py b/test/3rdparty/testtools-0.9.12/testtools/run.py new file mode 100755 index 00000000000..72011c74cab --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/run.py @@ -0,0 +1,332 @@ +# Copyright (c) 2009 testtools developers. See LICENSE for details. + +"""python -m testtools.run testspec [testspec...] + +Run some tests with the testtools extended API. + +For instance, to run the testtools test suite. + $ python -m testtools.run testtools.tests.test_suite +""" + +import os +import unittest +import sys + +from testtools import TextTestResult +from testtools.compat import classtypes, istext, unicode_output_stream +from testtools.testsuite import iterate_tests + + +defaultTestLoader = unittest.defaultTestLoader +defaultTestLoaderCls = unittest.TestLoader + +if getattr(defaultTestLoader, 'discover', None) is None: + try: + import discover + defaultTestLoader = discover.DiscoveringTestLoader() + defaultTestLoaderCls = discover.DiscoveringTestLoader + have_discover = True + except ImportError: + have_discover = False +else: + have_discover = True + + +class TestToolsTestRunner(object): + """ A thunk object to support unittest.TestProgram.""" + + def __init__(self, stdout): + self.stdout = stdout + + def run(self, test): + "Run the given test case or test suite." + result = TextTestResult(unicode_output_stream(self.stdout)) + result.startTestRun() + try: + return test.run(result) + finally: + result.stopTestRun() + + +#################### +# Taken from python 2.7 and slightly modified for compatibility with +# older versions. Delete when 2.7 is the oldest supported version. +# Modifications: +# - Use have_discover to raise an error if the user tries to use +# discovery on an old version and doesn't have discover installed. +# - If --catch is given check that installHandler is available, as +# it won't be on old python versions. +# - print calls have been been made single-source python3 compatibile. +# - exception handling likewise. +# - The default help has been changed to USAGE_AS_MAIN and USAGE_FROM_MODULE +# removed. +# - A tweak has been added to detect 'python -m *.run' and use a +# better progName in that case. +# - self.module is more comprehensively set to None when being invoked from +# the commandline - __name__ is used as a sentinel value. +# - --list has been added which can list tests (should be upstreamed). +# - --load-list has been added which can reduce the tests used (should be +# upstreamed). +# - The limitation of using getopt is declared to the user. + +FAILFAST = " -f, --failfast Stop on first failure\n" +CATCHBREAK = " -c, --catch Catch control-C and display results\n" +BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n" + +USAGE_AS_MAIN = """\ +Usage: %(progName)s [options] [tests] + +Options: + -h, --help Show this message + -v, --verbose Verbose output + -q, --quiet Minimal output + -l, --list List tests rather than executing them. + --load-list Specifies a file containing test ids, only tests matching + those ids are executed. +%(failfast)s%(catchbreak)s%(buffer)s +Examples: + %(progName)s test_module - run tests from test_module + %(progName)s module.TestClass - run tests from module.TestClass + %(progName)s module.Class.test_method - run specified test method + +All options must come before [tests]. [tests] can be a list of any number of +test modules, classes and test methods. + +Alternative Usage: %(progName)s discover [options] + +Options: + -v, --verbose Verbose output +%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default) + -p pattern Pattern to match test files ('test*.py' default) + -t directory Top level directory of project (default to + start directory) + -l, --list List tests rather than executing them. + --load-list Specifies a file containing test ids, only tests matching + those ids are executed. + +For test discovery all test modules must be importable from the top +level directory of the project. +""" + + +class TestProgram(object): + """A command-line program that runs a set of tests; this is primarily + for making test modules conveniently executable. + """ + USAGE = USAGE_AS_MAIN + + # defaults for testing + failfast = catchbreak = buffer = progName = None + + def __init__(self, module=__name__, defaultTest=None, argv=None, + testRunner=None, testLoader=defaultTestLoader, + exit=True, verbosity=1, failfast=None, catchbreak=None, + buffer=None, stdout=None): + if module == __name__: + self.module = None + elif istext(module): + self.module = __import__(module) + for part in module.split('.')[1:]: + self.module = getattr(self.module, part) + else: + self.module = module + if argv is None: + argv = sys.argv + if stdout is None: + stdout = sys.stdout + + self.exit = exit + self.failfast = failfast + self.catchbreak = catchbreak + self.verbosity = verbosity + self.buffer = buffer + self.defaultTest = defaultTest + self.listtests = False + self.load_list = None + self.testRunner = testRunner + self.testLoader = testLoader + progName = argv[0] + if progName.endswith('%srun.py' % os.path.sep): + elements = progName.split(os.path.sep) + progName = '%s.run' % elements[-2] + else: + progName = os.path.basename(argv[0]) + self.progName = progName + self.parseArgs(argv) + if self.load_list: + # TODO: preserve existing suites (like testresources does in + # OptimisingTestSuite.add, but with a standard protocol). + # This is needed because the load_tests hook allows arbitrary + # suites, even if that is rarely used. + source = open(self.load_list, 'rb') + try: + lines = source.readlines() + finally: + source.close() + test_ids = set(line.strip().decode('utf-8') for line in lines) + filtered = unittest.TestSuite() + for test in iterate_tests(self.test): + if test.id() in test_ids: + filtered.addTest(test) + self.test = filtered + if not self.listtests: + self.runTests() + else: + for test in iterate_tests(self.test): + stdout.write('%s\n' % test.id()) + + def usageExit(self, msg=None): + if msg: + print(msg) + usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '', + 'buffer': ''} + if self.failfast != False: + usage['failfast'] = FAILFAST + if self.catchbreak != False: + usage['catchbreak'] = CATCHBREAK + if self.buffer != False: + usage['buffer'] = BUFFEROUTPUT + print(self.USAGE % usage) + sys.exit(2) + + def parseArgs(self, argv): + if len(argv) > 1 and argv[1].lower() == 'discover': + self._do_discovery(argv[2:]) + return + + import getopt + long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer', + 'list', 'load-list='] + try: + options, args = getopt.getopt(argv[1:], 'hHvqfcbl', long_opts) + for opt, value in options: + if opt in ('-h','-H','--help'): + self.usageExit() + if opt in ('-q','--quiet'): + self.verbosity = 0 + if opt in ('-v','--verbose'): + self.verbosity = 2 + if opt in ('-f','--failfast'): + if self.failfast is None: + self.failfast = True + # Should this raise an exception if -f is not valid? + if opt in ('-c','--catch'): + if self.catchbreak is None: + self.catchbreak = True + # Should this raise an exception if -c is not valid? + if opt in ('-b','--buffer'): + if self.buffer is None: + self.buffer = True + # Should this raise an exception if -b is not valid? + if opt in ('-l', '--list'): + self.listtests = True + if opt == '--load-list': + self.load_list = value + if len(args) == 0 and self.defaultTest is None: + # createTests will load tests from self.module + self.testNames = None + elif len(args) > 0: + self.testNames = args + else: + self.testNames = (self.defaultTest,) + self.createTests() + except getopt.error: + self.usageExit(sys.exc_info()[1]) + + def createTests(self): + if self.testNames is None: + self.test = self.testLoader.loadTestsFromModule(self.module) + else: + self.test = self.testLoader.loadTestsFromNames(self.testNames, + self.module) + + def _do_discovery(self, argv, Loader=defaultTestLoaderCls): + # handle command line args for test discovery + if not have_discover: + raise AssertionError("Unable to use discovery, must use python 2.7 " + "or greater, or install the discover package.") + self.progName = '%s discover' % self.progName + import optparse + parser = optparse.OptionParser() + parser.prog = self.progName + parser.add_option('-v', '--verbose', dest='verbose', default=False, + help='Verbose output', action='store_true') + if self.failfast != False: + parser.add_option('-f', '--failfast', dest='failfast', default=False, + help='Stop on first fail or error', + action='store_true') + if self.catchbreak != False: + parser.add_option('-c', '--catch', dest='catchbreak', default=False, + help='Catch ctrl-C and display results so far', + action='store_true') + if self.buffer != False: + parser.add_option('-b', '--buffer', dest='buffer', default=False, + help='Buffer stdout and stderr during tests', + action='store_true') + parser.add_option('-s', '--start-directory', dest='start', default='.', + help="Directory to start discovery ('.' default)") + parser.add_option('-p', '--pattern', dest='pattern', default='test*.py', + help="Pattern to match tests ('test*.py' default)") + parser.add_option('-t', '--top-level-directory', dest='top', default=None, + help='Top level directory of project (defaults to start directory)') + parser.add_option('-l', '--list', dest='listtests', default=False, + help='List tests rather than running them.') + parser.add_option('--load-list', dest='load_list', default=None, + help='Specify a filename containing the test ids to use.') + + options, args = parser.parse_args(argv) + if len(args) > 3: + self.usageExit() + + for name, value in zip(('start', 'pattern', 'top'), args): + setattr(options, name, value) + + # only set options from the parsing here + # if they weren't set explicitly in the constructor + if self.failfast is None: + self.failfast = options.failfast + if self.catchbreak is None: + self.catchbreak = options.catchbreak + if self.buffer is None: + self.buffer = options.buffer + self.listtests = options.listtests + self.load_list = options.load_list + + if options.verbose: + self.verbosity = 2 + + start_dir = options.start + pattern = options.pattern + top_level_dir = options.top + + loader = Loader() + self.test = loader.discover(start_dir, pattern, top_level_dir) + + def runTests(self): + if (self.catchbreak + and getattr(unittest, 'installHandler', None) is not None): + unittest.installHandler() + if self.testRunner is None: + self.testRunner = runner.TextTestRunner + if isinstance(self.testRunner, classtypes()): + try: + testRunner = self.testRunner(verbosity=self.verbosity, + failfast=self.failfast, + buffer=self.buffer) + except TypeError: + # didn't accept the verbosity, buffer or failfast arguments + testRunner = self.testRunner() + else: + # it is assumed to be a TestRunner instance + testRunner = self.testRunner + self.result = testRunner.run(self.test) + if self.exit: + sys.exit(not self.result.wasSuccessful()) +################ + +def main(argv, stdout): + runner = TestToolsTestRunner(stdout) + program = TestProgram(argv=argv, testRunner=runner, stdout=stdout) + +if __name__ == '__main__': + main(sys.argv, sys.stdout) diff --git a/test/3rdparty/testtools-0.9.12/testtools/runtest.py b/test/3rdparty/testtools-0.9.12/testtools/runtest.py new file mode 100644 index 00000000000..507ad87c276 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/runtest.py @@ -0,0 +1,205 @@ +# Copyright (c) 2009-2010 testtools developers. See LICENSE for details. + +"""Individual test case execution.""" + +__all__ = [ + 'MultipleExceptions', + 'RunTest', + ] + +import sys + +from testtools.testresult import ExtendedToOriginalDecorator + + +class MultipleExceptions(Exception): + """Represents many exceptions raised from some operation. + + :ivar args: The sys.exc_info() tuples for each exception. + """ + + +class RunTest(object): + """An object to run a test. + + RunTest objects are used to implement the internal logic involved in + running a test. TestCase.__init__ stores _RunTest as the class of RunTest + to execute. Passing the runTest= parameter to TestCase.__init__ allows a + different RunTest class to be used to execute the test. + + Subclassing or replacing RunTest can be useful to add functionality to the + way that tests are run in a given project. + + :ivar case: The test case that is to be run. + :ivar result: The result object a case is reporting to. + :ivar handlers: A list of (ExceptionClass, handler_function) for + exceptions that should be caught if raised from the user + code. Exceptions that are caught are checked against this list in + first to last order. There is a catch-all of 'Exception' at the end + of the list, so to add a new exception to the list, insert it at the + front (which ensures that it will be checked before any existing base + classes in the list. If you add multiple exceptions some of which are + subclasses of each other, add the most specific exceptions last (so + they come before their parent classes in the list). + :ivar exception_caught: An object returned when _run_user catches an + exception. + :ivar _exceptions: A list of caught exceptions, used to do the single + reporting of error/failure/skip etc. + """ + + def __init__(self, case, handlers=None): + """Create a RunTest to run a case. + + :param case: A testtools.TestCase test case object. + :param handlers: Exception handlers for this RunTest. These are stored + in self.handlers and can be modified later if needed. + """ + self.case = case + self.handlers = handlers or [] + self.exception_caught = object() + self._exceptions = [] + + def run(self, result=None): + """Run self.case reporting activity to result. + + :param result: Optional testtools.TestResult to report activity to. + :return: The result object the test was run against. + """ + if result is None: + actual_result = self.case.defaultTestResult() + actual_result.startTestRun() + else: + actual_result = result + try: + return self._run_one(actual_result) + finally: + if result is None: + actual_result.stopTestRun() + + def _run_one(self, result): + """Run one test reporting to result. + + :param result: A testtools.TestResult to report activity to. + This result object is decorated with an ExtendedToOriginalDecorator + to ensure that the latest TestResult API can be used with + confidence by client code. + :return: The result object the test was run against. + """ + return self._run_prepared_result(ExtendedToOriginalDecorator(result)) + + def _run_prepared_result(self, result): + """Run one test reporting to result. + + :param result: A testtools.TestResult to report activity to. + :return: The result object the test was run against. + """ + result.startTest(self.case) + self.result = result + try: + self._exceptions = [] + self._run_core() + if self._exceptions: + # One or more caught exceptions, now trigger the test's + # reporting method for just one. + e = self._exceptions.pop() + for exc_class, handler in self.handlers: + if isinstance(e, exc_class): + handler(self.case, self.result, e) + break + finally: + result.stopTest(self.case) + return result + + def _run_core(self): + """Run the user supplied test code.""" + if self.exception_caught == self._run_user(self.case._run_setup, + self.result): + # Don't run the test method if we failed getting here. + self._run_cleanups(self.result) + return + # Run everything from here on in. If any of the methods raise an + # exception we'll have failed. + failed = False + try: + if self.exception_caught == self._run_user( + self.case._run_test_method, self.result): + failed = True + finally: + try: + if self.exception_caught == self._run_user( + self.case._run_teardown, self.result): + failed = True + finally: + try: + if self.exception_caught == self._run_user( + self._run_cleanups, self.result): + failed = True + finally: + if not failed: + self.result.addSuccess(self.case, + details=self.case.getDetails()) + + def _run_cleanups(self, result): + """Run the cleanups that have been added with addCleanup. + + See the docstring for addCleanup for more information. + + :return: None if all cleanups ran without error, + ``exception_caught`` if there was an error. + """ + failing = False + while self.case._cleanups: + function, arguments, keywordArguments = self.case._cleanups.pop() + got_exception = self._run_user( + function, *arguments, **keywordArguments) + if got_exception == self.exception_caught: + failing = True + if failing: + return self.exception_caught + + def _run_user(self, fn, *args, **kwargs): + """Run a user supplied function. + + Exceptions are processed by `_got_user_exception`. + + :return: Either whatever 'fn' returns or ``exception_caught`` if + 'fn' raised an exception. + """ + try: + return fn(*args, **kwargs) + except KeyboardInterrupt: + raise + except: + return self._got_user_exception(sys.exc_info()) + + def _got_user_exception(self, exc_info, tb_label='traceback'): + """Called when user code raises an exception. + + If 'exc_info' is a `MultipleExceptions`, then we recurse into it + unpacking the errors that it's made up from. + + :param exc_info: A sys.exc_info() tuple for the user error. + :param tb_label: An optional string label for the error. If + not specified, will default to 'traceback'. + :return: 'exception_caught' if we catch one of the exceptions that + have handlers in 'handlers', otherwise raise the error. + """ + if exc_info[0] is MultipleExceptions: + for sub_exc_info in exc_info[1].args: + self._got_user_exception(sub_exc_info, tb_label) + return self.exception_caught + try: + e = exc_info[1] + self.case.onException(exc_info, tb_label=tb_label) + finally: + del exc_info + for exc_class, handler in self.handlers: + if isinstance(e, exc_class): + self._exceptions.append(e) + return self.exception_caught + raise e + + +# Signal that this is part of the testing framework, and that code from this +# should not normally appear in tracebacks. +__unittest = True diff --git a/test/3rdparty/testtools-0.9.12/testtools/testcase.py b/test/3rdparty/testtools-0.9.12/testtools/testcase.py new file mode 100644 index 00000000000..ee5e296cd46 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/testcase.py @@ -0,0 +1,782 @@ +# Copyright (c) 2008-2011 testtools developers. See LICENSE for details. + +"""Test case related stuff.""" + +__metaclass__ = type +__all__ = [ + 'clone_test_with_new_id', + 'ExpectedException', + 'gather_details', + 'run_test_with', + 'skip', + 'skipIf', + 'skipUnless', + 'TestCase', + ] + +import copy +import itertools +import sys +import types +import unittest + +from testtools import ( + content, + try_import, + ) +from testtools.compat import ( + advance_iterator, + reraise, + ) +from testtools.matchers import ( + Annotate, + Contains, + Equals, + MatchesAll, + MatchesException, + MismatchError, + Is, + IsInstance, + Not, + Raises, + ) +from testtools.monkey import patch +from testtools.runtest import RunTest +from testtools.testresult import TestResult + +wraps = try_import('functools.wraps') + +class TestSkipped(Exception): + """Raised within TestCase.run() when a test is skipped.""" +testSkipped = try_import('unittest2.case.SkipTest', TestSkipped) +TestSkipped = try_import('unittest.case.SkipTest', TestSkipped) + + +class _UnexpectedSuccess(Exception): + """An unexpected success was raised. + + Note that this exception is private plumbing in testtools' testcase + module. + """ +_UnexpectedSuccess = try_import( + 'unittest2.case._UnexpectedSuccess', _UnexpectedSuccess) +_UnexpectedSuccess = try_import( + 'unittest.case._UnexpectedSuccess', _UnexpectedSuccess) + +class _ExpectedFailure(Exception): + """An expected failure occured. + + Note that this exception is private plumbing in testtools' testcase + module. + """ +_ExpectedFailure = try_import( + 'unittest2.case._ExpectedFailure', _ExpectedFailure) +_ExpectedFailure = try_import( + 'unittest.case._ExpectedFailure', _ExpectedFailure) + + +def run_test_with(test_runner, **kwargs): + """Decorate a test as using a specific ``RunTest``. + + e.g.:: + + @run_test_with(CustomRunner, timeout=42) + def test_foo(self): + self.assertTrue(True) + + The returned decorator works by setting an attribute on the decorated + function. `TestCase.__init__` looks for this attribute when deciding on a + ``RunTest`` factory. If you wish to use multiple decorators on a test + method, then you must either make this one the top-most decorator, or you + must write your decorators so that they update the wrapping function with + the attributes of the wrapped function. The latter is recommended style + anyway. ``functools.wraps``, ``functools.wrapper`` and + ``twisted.python.util.mergeFunctionMetadata`` can help you do this. + + :param test_runner: A ``RunTest`` factory that takes a test case and an + optional list of exception handlers. See ``RunTest``. + :param kwargs: Keyword arguments to pass on as extra arguments to + 'test_runner'. + :return: A decorator to be used for marking a test as needing a special + runner. + """ + def decorator(function): + # Set an attribute on 'function' which will inform TestCase how to + # make the runner. + function._run_test_with = ( + lambda case, handlers=None: + test_runner(case, handlers=handlers, **kwargs)) + return function + return decorator + + +def _copy_content(content_object): + """Make a copy of the given content object. + + The content within `content_object` is iterated and saved. This is useful + when the source of the content is volatile, a log file in a temporary + directory for example. + + :param content_object: A `content.Content` instance. + :return: A `content.Content` instance with the same mime-type as + `content_object` and a non-volatile copy of its content. + """ + content_bytes = list(content_object.iter_bytes()) + content_callback = lambda: content_bytes + return content.Content(content_object.content_type, content_callback) + + +def gather_details(source_dict, target_dict): + """Merge the details from `source_dict` into `target_dict`. + + :param source_dict: A dictionary of details will be gathered. + :param target_dict: A dictionary into which details will be gathered. + """ + for name, content_object in source_dict.items(): + new_name = name + disambiguator = itertools.count(1) + while new_name in target_dict: + new_name = '%s-%d' % (name, advance_iterator(disambiguator)) + name = new_name + target_dict[name] = _copy_content(content_object) + + +class TestCase(unittest.TestCase): + """Extensions to the basic TestCase. + + :ivar exception_handlers: Exceptions to catch from setUp, runTest and + tearDown. This list is able to be modified at any time and consists of + (exception_class, handler(case, result, exception_value)) pairs. + :cvar run_tests_with: A factory to make the ``RunTest`` to run tests with. + Defaults to ``RunTest``. The factory is expected to take a test case + and an optional list of exception handlers. + """ + + skipException = TestSkipped + + run_tests_with = RunTest + + def __init__(self, *args, **kwargs): + """Construct a TestCase. + + :param testMethod: The name of the method to run. + :keyword runTest: Optional class to use to execute the test. If not + supplied ``RunTest`` is used. The instance to be used is created + when run() is invoked, so will be fresh each time. Overrides + ``TestCase.run_tests_with`` if given. + """ + runTest = kwargs.pop('runTest', None) + super(TestCase, self).__init__(*args, **kwargs) + self._cleanups = [] + self._unique_id_gen = itertools.count(1) + # Generators to ensure unique traceback ids. Maps traceback label to + # iterators. + self._traceback_id_gens = {} + self.__setup_called = False + self.__teardown_called = False + # __details is lazy-initialized so that a constructed-but-not-run + # TestCase is safe to use with clone_test_with_new_id. + self.__details = None + test_method = self._get_test_method() + if runTest is None: + runTest = getattr( + test_method, '_run_test_with', self.run_tests_with) + self.__RunTest = runTest + self.__exception_handlers = [] + self.exception_handlers = [ + (self.skipException, self._report_skip), + (self.failureException, self._report_failure), + (_ExpectedFailure, self._report_expected_failure), + (_UnexpectedSuccess, self._report_unexpected_success), + (Exception, self._report_error), + ] + if sys.version_info < (2, 6): + # Catch old-style string exceptions with None as the instance + self.exception_handlers.append((type(None), self._report_error)) + + def __eq__(self, other): + eq = getattr(unittest.TestCase, '__eq__', None) + if eq is not None and not unittest.TestCase.__eq__(self, other): + return False + return self.__dict__ == other.__dict__ + + def __repr__(self): + # We add id to the repr because it makes testing testtools easier. + return "<%s id=0x%0x>" % (self.id(), id(self)) + + def addDetail(self, name, content_object): + """Add a detail to be reported with this test's outcome. + + For more details see pydoc testtools.TestResult. + + :param name: The name to give this detail. + :param content_object: The content object for this detail. See + testtools.content for more detail. + """ + if self.__details is None: + self.__details = {} + self.__details[name] = content_object + + def getDetails(self): + """Get the details dict that will be reported with this test's outcome. + + For more details see pydoc testtools.TestResult. + """ + if self.__details is None: + self.__details = {} + return self.__details + + def patch(self, obj, attribute, value): + """Monkey-patch 'obj.attribute' to 'value' while the test is running. + + If 'obj' has no attribute, then the monkey-patch will still go ahead, + and the attribute will be deleted instead of restored to its original + value. + + :param obj: The object to patch. Can be anything. + :param attribute: The attribute on 'obj' to patch. + :param value: The value to set 'obj.attribute' to. + """ + self.addCleanup(patch(obj, attribute, value)) + + def shortDescription(self): + return self.id() + + def skipTest(self, reason): + """Cause this test to be skipped. + + This raises self.skipException(reason). skipException is raised + to permit a skip to be triggered at any point (during setUp or the + testMethod itself). The run() method catches skipException and + translates that into a call to the result objects addSkip method. + + :param reason: The reason why the test is being skipped. This must + support being cast into a unicode string for reporting. + """ + raise self.skipException(reason) + + # skipTest is how python2.7 spells this. Sometime in the future + # This should be given a deprecation decorator - RBC 20100611. + skip = skipTest + + def _formatTypes(self, classOrIterable): + """Format a class or a bunch of classes for display in an error.""" + className = getattr(classOrIterable, '__name__', None) + if className is None: + className = ', '.join(klass.__name__ for klass in classOrIterable) + return className + + def addCleanup(self, function, *arguments, **keywordArguments): + """Add a cleanup function to be called after tearDown. + + Functions added with addCleanup will be called in reverse order of + adding after tearDown, or after setUp if setUp raises an exception. + + If a function added with addCleanup raises an exception, the error + will be recorded as a test error, and the next cleanup will then be + run. + + Cleanup functions are always called before a test finishes running, + even if setUp is aborted by an exception. + """ + self._cleanups.append((function, arguments, keywordArguments)) + + def addOnException(self, handler): + """Add a handler to be called when an exception occurs in test code. + + This handler cannot affect what result methods are called, and is + called before any outcome is called on the result object. An example + use for it is to add some diagnostic state to the test details dict + which is expensive to calculate and not interesting for reporting in + the success case. + + Handlers are called before the outcome (such as addFailure) that + the exception has caused. + + Handlers are called in first-added, first-called order, and if they + raise an exception, that will propogate out of the test running + machinery, halting test processing. As a result, do not call code that + may unreasonably fail. + """ + self.__exception_handlers.append(handler) + + def _add_reason(self, reason): + self.addDetail('reason', content.Content( + content.ContentType('text', 'plain'), + lambda: [reason.encode('utf8')])) + + def assertEqual(self, expected, observed, message=''): + """Assert that 'expected' is equal to 'observed'. + + :param expected: The expected value. + :param observed: The observed value. + :param message: An optional message to include in the error. + """ + matcher = Equals(expected) + self.assertThat(observed, matcher, message) + + failUnlessEqual = assertEquals = assertEqual + + def assertIn(self, needle, haystack): + """Assert that needle is in haystack.""" + self.assertThat(haystack, Contains(needle)) + + def assertIsNone(self, observed, message=''): + """Assert that 'observed' is equal to None. + + :param observed: The observed value. + :param message: An optional message describing the error. + """ + matcher = Is(None) + self.assertThat(observed, matcher, message) + + def assertIsNotNone(self, observed, message=''): + """Assert that 'observed' is not equal to None. + + :param observed: The observed value. + :param message: An optional message describing the error. + """ + matcher = Not(Is(None)) + self.assertThat(observed, matcher, message) + + def assertIs(self, expected, observed, message=''): + """Assert that 'expected' is 'observed'. + + :param expected: The expected value. + :param observed: The observed value. + :param message: An optional message describing the error. + """ + matcher = Is(expected) + self.assertThat(observed, matcher, message) + + def assertIsNot(self, expected, observed, message=''): + """Assert that 'expected' is not 'observed'.""" + matcher = Not(Is(expected)) + self.assertThat(observed, matcher, message) + + def assertNotIn(self, needle, haystack): + """Assert that needle is not in haystack.""" + matcher = Not(Contains(needle)) + self.assertThat(haystack, matcher) + + def assertIsInstance(self, obj, klass, msg=None): + if isinstance(klass, tuple): + matcher = IsInstance(*klass) + else: + matcher = IsInstance(klass) + self.assertThat(obj, matcher, msg) + + def assertRaises(self, excClass, callableObj, *args, **kwargs): + """Fail unless an exception of class excClass is thrown + by callableObj when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + """ + class ReRaiseOtherTypes(object): + def match(self, matchee): + if not issubclass(matchee[0], excClass): + reraise(*matchee) + class CaptureMatchee(object): + def match(self, matchee): + self.matchee = matchee[1] + capture = CaptureMatchee() + matcher = Raises(MatchesAll(ReRaiseOtherTypes(), + MatchesException(excClass), capture)) + + self.assertThat(lambda: callableObj(*args, **kwargs), matcher) + return capture.matchee + failUnlessRaises = assertRaises + + def assertThat(self, matchee, matcher, message='', verbose=False): + """Assert that matchee is matched by matcher. + + :param matchee: An object to match with matcher. + :param matcher: An object meeting the testtools.Matcher protocol. + :raises MismatchError: When matcher does not match thing. + """ + matcher = Annotate.if_message(message, matcher) + mismatch = matcher.match(matchee) + if not mismatch: + return + existing_details = self.getDetails() + for (name, content) in mismatch.get_details().items(): + full_name = name + suffix = 1 + while full_name in existing_details: + full_name = "%s-%d" % (name, suffix) + suffix += 1 + self.addDetail(full_name, content) + raise MismatchError(matchee, matcher, mismatch, verbose) + + def defaultTestResult(self): + return TestResult() + + def expectFailure(self, reason, predicate, *args, **kwargs): + """Check that a test fails in a particular way. + + If the test fails in the expected way, a KnownFailure is caused. If it + succeeds an UnexpectedSuccess is caused. + + The expected use of expectFailure is as a barrier at the point in a + test where the test would fail. For example: + >>> def test_foo(self): + >>> self.expectFailure("1 should be 0", self.assertNotEqual, 1, 0) + >>> self.assertEqual(1, 0) + + If in the future 1 were to equal 0, the expectFailure call can simply + be removed. This separation preserves the original intent of the test + while it is in the expectFailure mode. + """ + # TODO: implement with matchers. + self._add_reason(reason) + try: + predicate(*args, **kwargs) + except self.failureException: + # GZ 2010-08-12: Don't know how to avoid exc_info cycle as the new + # unittest _ExpectedFailure wants old traceback + exc_info = sys.exc_info() + try: + self._report_traceback(exc_info) + raise _ExpectedFailure(exc_info) + finally: + del exc_info + else: + raise _UnexpectedSuccess(reason) + + def getUniqueInteger(self): + """Get an integer unique to this test. + + Returns an integer that is guaranteed to be unique to this instance. + Use this when you need an arbitrary integer in your test, or as a + helper for custom anonymous factory methods. + """ + return advance_iterator(self._unique_id_gen) + + def getUniqueString(self, prefix=None): + """Get a string unique to this test. + + Returns a string that is guaranteed to be unique to this instance. Use + this when you need an arbitrary string in your test, or as a helper + for custom anonymous factory methods. + + :param prefix: The prefix of the string. If not provided, defaults + to the id of the tests. + :return: A bytestring of '<prefix>-<unique_int>'. + """ + if prefix is None: + prefix = self.id() + return '%s-%d' % (prefix, self.getUniqueInteger()) + + def onException(self, exc_info, tb_label='traceback'): + """Called when an exception propogates from test code. + + :seealso addOnException: + """ + if exc_info[0] not in [ + TestSkipped, _UnexpectedSuccess, _ExpectedFailure]: + self._report_traceback(exc_info, tb_label=tb_label) + for handler in self.__exception_handlers: + handler(exc_info) + + @staticmethod + def _report_error(self, result, err): + result.addError(self, details=self.getDetails()) + + @staticmethod + def _report_expected_failure(self, result, err): + result.addExpectedFailure(self, details=self.getDetails()) + + @staticmethod + def _report_failure(self, result, err): + result.addFailure(self, details=self.getDetails()) + + @staticmethod + def _report_skip(self, result, err): + if err.args: + reason = err.args[0] + else: + reason = "no reason given." + self._add_reason(reason) + result.addSkip(self, details=self.getDetails()) + + def _report_traceback(self, exc_info, tb_label='traceback'): + id_gen = self._traceback_id_gens.setdefault( + tb_label, itertools.count(0)) + tb_id = advance_iterator(id_gen) + if tb_id: + tb_label = '%s-%d' % (tb_label, tb_id) + self.addDetail(tb_label, content.TracebackContent(exc_info, self)) + + @staticmethod + def _report_unexpected_success(self, result, err): + result.addUnexpectedSuccess(self, details=self.getDetails()) + + def run(self, result=None): + return self.__RunTest(self, self.exception_handlers).run(result) + + def _run_setup(self, result): + """Run the setUp function for this test. + + :param result: A testtools.TestResult to report activity to. + :raises ValueError: If the base class setUp is not called, a + ValueError is raised. + """ + ret = self.setUp() + if not self.__setup_called: + raise ValueError( + "TestCase.setUp was not called. Have you upcalled all the " + "way up the hierarchy from your setUp? e.g. Call " + "super(%s, self).setUp() from your setUp()." + % self.__class__.__name__) + return ret + + def _run_teardown(self, result): + """Run the tearDown function for this test. + + :param result: A testtools.TestResult to report activity to. + :raises ValueError: If the base class tearDown is not called, a + ValueError is raised. + """ + ret = self.tearDown() + if not self.__teardown_called: + raise ValueError( + "TestCase.tearDown was not called. Have you upcalled all the " + "way up the hierarchy from your tearDown? e.g. Call " + "super(%s, self).tearDown() from your tearDown()." + % self.__class__.__name__) + return ret + + def _get_test_method(self): + absent_attr = object() + # Python 2.5+ + method_name = getattr(self, '_testMethodName', absent_attr) + if method_name is absent_attr: + # Python 2.4 + method_name = getattr(self, '_TestCase__testMethodName') + return getattr(self, method_name) + + def _run_test_method(self, result): + """Run the test method for this test. + + :param result: A testtools.TestResult to report activity to. + :return: None. + """ + return self._get_test_method()() + + def useFixture(self, fixture): + """Use fixture in a test case. + + The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called. + + :param fixture: The fixture to use. + :return: The fixture, after setting it up and scheduling a cleanup for + it. + """ + try: + fixture.setUp() + except: + gather_details(fixture.getDetails(), self.getDetails()) + raise + else: + self.addCleanup(fixture.cleanUp) + self.addCleanup( + gather_details, fixture.getDetails(), self.getDetails()) + return fixture + + def setUp(self): + super(TestCase, self).setUp() + self.__setup_called = True + + def tearDown(self): + super(TestCase, self).tearDown() + unittest.TestCase.tearDown(self) + self.__teardown_called = True + + +class PlaceHolder(object): + """A placeholder test. + + `PlaceHolder` implements much of the same interface as TestCase and is + particularly suitable for being added to TestResults. + """ + + def __init__(self, test_id, short_description=None): + """Construct a `PlaceHolder`. + + :param test_id: The id of the placeholder test. + :param short_description: The short description of the place holder + test. If not provided, the id will be used instead. + """ + self._test_id = test_id + self._short_description = short_description + + def __call__(self, result=None): + return self.run(result=result) + + def __repr__(self): + internal = [self._test_id] + if self._short_description is not None: + internal.append(self._short_description) + return "<%s.%s(%s)>" % ( + self.__class__.__module__, + self.__class__.__name__, + ", ".join(map(repr, internal))) + + def __str__(self): + return self.id() + + def countTestCases(self): + return 1 + + def debug(self): + pass + + def id(self): + return self._test_id + + def run(self, result=None): + if result is None: + result = TestResult() + result.startTest(self) + result.addSuccess(self) + result.stopTest(self) + + def shortDescription(self): + if self._short_description is None: + return self.id() + else: + return self._short_description + + +class ErrorHolder(PlaceHolder): + """A placeholder test that will error out when run.""" + + failureException = None + + def __init__(self, test_id, error, short_description=None): + """Construct an `ErrorHolder`. + + :param test_id: The id of the test. + :param error: The exc info tuple that will be used as the test's error. + :param short_description: An optional short description of the test. + """ + super(ErrorHolder, self).__init__( + test_id, short_description=short_description) + self._error = error + + def __repr__(self): + internal = [self._test_id, self._error] + if self._short_description is not None: + internal.append(self._short_description) + return "<%s.%s(%s)>" % ( + self.__class__.__module__, + self.__class__.__name__, + ", ".join(map(repr, internal))) + + def run(self, result=None): + if result is None: + result = TestResult() + result.startTest(self) + result.addError(self, self._error) + result.stopTest(self) + + +# Python 2.4 did not know how to copy functions. +if types.FunctionType not in copy._copy_dispatch: + copy._copy_dispatch[types.FunctionType] = copy._copy_immutable + + +def clone_test_with_new_id(test, new_id): + """Copy a `TestCase`, and give the copied test a new id. + + This is only expected to be used on tests that have been constructed but + not executed. + """ + newTest = copy.copy(test) + newTest.id = lambda: new_id + return newTest + + +def skip(reason): + """A decorator to skip unit tests. + + This is just syntactic sugar so users don't have to change any of their + unit tests in order to migrate to python 2.7, which provides the + @unittest.skip decorator. + """ + def decorator(test_item): + if wraps is not None: + @wraps(test_item) + def skip_wrapper(*args, **kwargs): + raise TestCase.skipException(reason) + else: + def skip_wrapper(test_item): + test_item.skip(reason) + return skip_wrapper + return decorator + + +def skipIf(condition, reason): + """Skip a test if the condition is true.""" + if condition: + return skip(reason) + def _id(obj): + return obj + return _id + + +def skipUnless(condition, reason): + """Skip a test unless the condition is true.""" + if not condition: + return skip(reason) + def _id(obj): + return obj + return _id + + +class ExpectedException: + """A context manager to handle expected exceptions. + + In Python 2.5 or later:: + + def test_foo(self): + with ExpectedException(ValueError, 'fo.*'): + raise ValueError('foo') + + will pass. If the raised exception has a type other than the specified + type, it will be re-raised. If it has a 'str()' that does not match the + given regular expression, an AssertionError will be raised. If no + exception is raised, an AssertionError will be raised. + """ + + def __init__(self, exc_type, value_re=None): + """Construct an `ExpectedException`. + + :param exc_type: The type of exception to expect. + :param value_re: A regular expression to match against the + 'str()' of the raised exception. + """ + self.exc_type = exc_type + self.value_re = value_re + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, traceback): + if exc_type is None: + raise AssertionError('%s not raised.' % self.exc_type.__name__) + if exc_type != self.exc_type: + return False + if self.value_re: + matcher = MatchesException(self.exc_type, self.value_re) + mismatch = matcher.match((exc_type, exc_value, traceback)) + if mismatch: + raise AssertionError(mismatch.describe()) + return True + + +# Signal that this is part of the testing framework, and that code from this +# should not normally appear in tracebacks. +__unittest = True diff --git a/test/3rdparty/testtools-0.9.12/testtools/testresult/__init__.py b/test/3rdparty/testtools-0.9.12/testtools/testresult/__init__.py new file mode 100644 index 00000000000..19f88bc8a34 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/testresult/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2009 testtools developers. See LICENSE for details. + +"""Test result objects.""" + +__all__ = [ + 'ExtendedToOriginalDecorator', + 'MultiTestResult', + 'TestResult', + 'TextTestResult', + 'ThreadsafeForwardingResult', + ] + +from testtools.testresult.real import ( + ExtendedToOriginalDecorator, + MultiTestResult, + TestResult, + TextTestResult, + ThreadsafeForwardingResult, + ) diff --git a/test/3rdparty/testtools-0.9.12/testtools/testresult/doubles.py b/test/3rdparty/testtools-0.9.12/testtools/testresult/doubles.py new file mode 100644 index 00000000000..9af5b364ffb --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/testresult/doubles.py @@ -0,0 +1,111 @@ +# Copyright (c) 2009-2010 testtools developers. See LICENSE for details. + +"""Doubles of test result objects, useful for testing unittest code.""" + +__all__ = [ + 'Python26TestResult', + 'Python27TestResult', + 'ExtendedTestResult', + ] + + +class LoggingBase(object): + """Basic support for logging of results.""" + + def __init__(self): + self._events = [] + self.shouldStop = False + self._was_successful = True + + +class Python26TestResult(LoggingBase): + """A precisely python 2.6 like test result, that logs.""" + + def addError(self, test, err): + self._was_successful = False + self._events.append(('addError', test, err)) + + def addFailure(self, test, err): + self._was_successful = False + self._events.append(('addFailure', test, err)) + + def addSuccess(self, test): + self._events.append(('addSuccess', test)) + + def startTest(self, test): + self._events.append(('startTest', test)) + + def stop(self): + self.shouldStop = True + + def stopTest(self, test): + self._events.append(('stopTest', test)) + + def wasSuccessful(self): + return self._was_successful + + +class Python27TestResult(Python26TestResult): + """A precisely python 2.7 like test result, that logs.""" + + def addExpectedFailure(self, test, err): + self._events.append(('addExpectedFailure', test, err)) + + def addSkip(self, test, reason): + self._events.append(('addSkip', test, reason)) + + def addUnexpectedSuccess(self, test): + self._events.append(('addUnexpectedSuccess', test)) + + def startTestRun(self): + self._events.append(('startTestRun',)) + + def stopTestRun(self): + self._events.append(('stopTestRun',)) + + +class ExtendedTestResult(Python27TestResult): + """A test result like the proposed extended unittest result API.""" + + def addError(self, test, err=None, details=None): + self._was_successful = False + self._events.append(('addError', test, err or details)) + + def addFailure(self, test, err=None, details=None): + self._was_successful = False + self._events.append(('addFailure', test, err or details)) + + def addExpectedFailure(self, test, err=None, details=None): + self._events.append(('addExpectedFailure', test, err or details)) + + def addSkip(self, test, reason=None, details=None): + self._events.append(('addSkip', test, reason or details)) + + def addSuccess(self, test, details=None): + if details: + self._events.append(('addSuccess', test, details)) + else: + self._events.append(('addSuccess', test)) + + def addUnexpectedSuccess(self, test, details=None): + self._was_successful = False + if details is not None: + self._events.append(('addUnexpectedSuccess', test, details)) + else: + self._events.append(('addUnexpectedSuccess', test)) + + def progress(self, offset, whence): + self._events.append(('progress', offset, whence)) + + def startTestRun(self): + super(ExtendedTestResult, self).startTestRun() + self._was_successful = True + + def tags(self, new_tags, gone_tags): + self._events.append(('tags', new_tags, gone_tags)) + + def time(self, time): + self._events.append(('time', time)) + + def wasSuccessful(self): + return self._was_successful diff --git a/test/3rdparty/testtools-0.9.12/testtools/testresult/real.py b/test/3rdparty/testtools-0.9.12/testtools/testresult/real.py new file mode 100644 index 00000000000..eb548dfa2c8 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/testresult/real.py @@ -0,0 +1,658 @@ +# Copyright (c) 2008 testtools developers. See LICENSE for details. + +"""Test results and related things.""" + +__metaclass__ = type +__all__ = [ + 'ExtendedToOriginalDecorator', + 'MultiTestResult', + 'TestResult', + 'ThreadsafeForwardingResult', + ] + +import datetime +import sys +import unittest + +from testtools.compat import all, _format_exc_info, str_is_unicode, _u + +# From http://docs.python.org/library/datetime.html +_ZERO = datetime.timedelta(0) + +# A UTC class. + +class UTC(datetime.tzinfo): + """UTC""" + + def utcoffset(self, dt): + return _ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return _ZERO + +utc = UTC() + + +class TestResult(unittest.TestResult): + """Subclass of unittest.TestResult extending the protocol for flexability. + + This test result supports an experimental protocol for providing additional + data to in test outcomes. All the outcome methods take an optional dict + 'details'. If supplied any other detail parameters like 'err' or 'reason' + should not be provided. The details dict is a mapping from names to + MIME content objects (see testtools.content). This permits attaching + tracebacks, log files, or even large objects like databases that were + part of the test fixture. Until this API is accepted into upstream + Python it is considered experimental: it may be replaced at any point + by a newer version more in line with upstream Python. Compatibility would + be aimed for in this case, but may not be possible. + + :ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip. + """ + + def __init__(self): + # startTestRun resets all attributes, and older clients don't know to + # call startTestRun, so it is called once here. + # Because subclasses may reasonably not expect this, we call the + # specific version we want to run. + TestResult.startTestRun(self) + + def addExpectedFailure(self, test, err=None, details=None): + """Called when a test has failed in an expected manner. + + Like with addSuccess and addError, testStopped should still be called. + + :param test: The test that has been skipped. + :param err: The exc_info of the error that was raised. + :return: None + """ + # This is the python 2.7 implementation + self.expectedFailures.append( + (test, self._err_details_to_string(test, err, details))) + + def addError(self, test, err=None, details=None): + """Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info(). + + :param details: Alternative way to supply details about the outcome. + see the class docstring for more information. + """ + self.errors.append((test, + self._err_details_to_string(test, err, details))) + + def addFailure(self, test, err=None, details=None): + """Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info(). + + :param details: Alternative way to supply details about the outcome. + see the class docstring for more information. + """ + self.failures.append((test, + self._err_details_to_string(test, err, details))) + + def addSkip(self, test, reason=None, details=None): + """Called when a test has been skipped rather than running. + + Like with addSuccess and addError, testStopped should still be called. + + This must be called by the TestCase. 'addError' and 'addFailure' will + not call addSkip, since they have no assumptions about the kind of + errors that a test can raise. + + :param test: The test that has been skipped. + :param reason: The reason for the test being skipped. For instance, + u"pyGL is not available". + :param details: Alternative way to supply details about the outcome. + see the class docstring for more information. + :return: None + """ + if reason is None: + reason = details.get('reason') + if reason is None: + reason = 'No reason given' + else: + reason = ''.join(reason.iter_text()) + skip_list = self.skip_reasons.setdefault(reason, []) + skip_list.append(test) + + def addSuccess(self, test, details=None): + """Called when a test succeeded.""" + + def addUnexpectedSuccess(self, test, details=None): + """Called when a test was expected to fail, but succeed.""" + self.unexpectedSuccesses.append(test) + + def wasSuccessful(self): + """Has this result been successful so far? + + If there have been any errors, failures or unexpected successes, + return False. Otherwise, return True. + + Note: This differs from standard unittest in that we consider + unexpected successes to be equivalent to failures, rather than + successes. + """ + return not (self.errors or self.failures or self.unexpectedSuccesses) + + if str_is_unicode: + # Python 3 and IronPython strings are unicode, use parent class method + _exc_info_to_unicode = unittest.TestResult._exc_info_to_string + else: + # For Python 2, need to decode components of traceback according to + # their source, so can't use traceback.format_exception + # Here follows a little deep magic to copy the existing method and + # replace the formatter with one that returns unicode instead + from types import FunctionType as __F, ModuleType as __M + __f = unittest.TestResult._exc_info_to_string.im_func + __g = dict(__f.func_globals) + __m = __M("__fake_traceback") + __m.format_exception = _format_exc_info + __g["traceback"] = __m + _exc_info_to_unicode = __F(__f.func_code, __g, "_exc_info_to_unicode") + del __F, __M, __f, __g, __m + + def _err_details_to_string(self, test, err=None, details=None): + """Convert an error in exc_info form or a contents dict to a string.""" + if err is not None: + return self._exc_info_to_unicode(err, test) + return _details_to_str(details, special='traceback') + + def _now(self): + """Return the current 'test time'. + + If the time() method has not been called, this is equivalent to + datetime.now(), otherwise its the last supplied datestamp given to the + time() method. + """ + if self.__now is None: + return datetime.datetime.now(utc) + else: + return self.__now + + def startTestRun(self): + """Called before a test run starts. + + New in Python 2.7. The testtools version resets the result to a + pristine condition ready for use in another test run. Note that this + is different from Python 2.7's startTestRun, which does nothing. + """ + super(TestResult, self).__init__() + self.skip_reasons = {} + self.__now = None + # -- Start: As per python 2.7 -- + self.expectedFailures = [] + self.unexpectedSuccesses = [] + # -- End: As per python 2.7 -- + + def stopTestRun(self): + """Called after a test run completes + + New in python 2.7 + """ + + def time(self, a_datetime): + """Provide a timestamp to represent the current time. + + This is useful when test activity is time delayed, or happening + concurrently and getting the system time between API calls will not + accurately represent the duration of tests (or the whole run). + + Calling time() sets the datetime used by the TestResult object. + Time is permitted to go backwards when using this call. + + :param a_datetime: A datetime.datetime object with TZ information or + None to reset the TestResult to gathering time from the system. + """ + self.__now = a_datetime + + def done(self): + """Called when the test runner is done. + + deprecated in favour of stopTestRun. + """ + + +class MultiTestResult(TestResult): + """A test result that dispatches to many test results.""" + + def __init__(self, *results): + TestResult.__init__(self) + self._results = list(map(ExtendedToOriginalDecorator, results)) + + def _dispatch(self, message, *args, **kwargs): + return tuple( + getattr(result, message)(*args, **kwargs) + for result in self._results) + + def startTest(self, test): + return self._dispatch('startTest', test) + + def stopTest(self, test): + return self._dispatch('stopTest', test) + + def addError(self, test, error=None, details=None): + return self._dispatch('addError', test, error, details=details) + + def addExpectedFailure(self, test, err=None, details=None): + return self._dispatch( + 'addExpectedFailure', test, err, details=details) + + def addFailure(self, test, err=None, details=None): + return self._dispatch('addFailure', test, err, details=details) + + def addSkip(self, test, reason=None, details=None): + return self._dispatch('addSkip', test, reason, details=details) + + def addSuccess(self, test, details=None): + return self._dispatch('addSuccess', test, details=details) + + def addUnexpectedSuccess(self, test, details=None): + return self._dispatch('addUnexpectedSuccess', test, details=details) + + def startTestRun(self): + return self._dispatch('startTestRun') + + def stopTestRun(self): + return self._dispatch('stopTestRun') + + def time(self, a_datetime): + return self._dispatch('time', a_datetime) + + def done(self): + return self._dispatch('done') + + def wasSuccessful(self): + """Was this result successful? + + Only returns True if every constituent result was successful. + """ + return all(self._dispatch('wasSuccessful')) + + +class TextTestResult(TestResult): + """A TestResult which outputs activity to a text stream.""" + + def __init__(self, stream): + """Construct a TextTestResult writing to stream.""" + super(TextTestResult, self).__init__() + self.stream = stream + self.sep1 = '=' * 70 + '\n' + self.sep2 = '-' * 70 + '\n' + + def _delta_to_float(self, a_timedelta): + return (a_timedelta.days * 86400.0 + a_timedelta.seconds + + a_timedelta.microseconds / 1000000.0) + + def _show_list(self, label, error_list): + for test, output in error_list: + self.stream.write(self.sep1) + self.stream.write("%s: %s\n" % (label, test.id())) + self.stream.write(self.sep2) + self.stream.write(output) + + def startTestRun(self): + super(TextTestResult, self).startTestRun() + self.__start = self._now() + self.stream.write("Tests running...\n") + + def stopTestRun(self): + if self.testsRun != 1: + plural = 's' + else: + plural = '' + stop = self._now() + self._show_list('ERROR', self.errors) + self._show_list('FAIL', self.failures) + for test in self.unexpectedSuccesses: + self.stream.write( + "%sUNEXPECTED SUCCESS: %s\n%s" % ( + self.sep1, test.id(), self.sep2)) + self.stream.write("\nRan %d test%s in %.3fs\n" % + (self.testsRun, plural, + self._delta_to_float(stop - self.__start))) + if self.wasSuccessful(): + self.stream.write("OK\n") + else: + self.stream.write("FAILED (") + details = [] + details.append("failures=%d" % ( + sum(map(len, ( + self.failures, self.errors, self.unexpectedSuccesses))))) + self.stream.write(", ".join(details)) + self.stream.write(")\n") + super(TextTestResult, self).stopTestRun() + + +class ThreadsafeForwardingResult(TestResult): + """A TestResult which ensures the target does not receive mixed up calls. + + This is used when receiving test results from multiple sources, and batches + up all the activity for a single test into a thread-safe batch where all + other ThreadsafeForwardingResult objects sharing the same semaphore will be + locked out. + + Typical use of ThreadsafeForwardingResult involves creating one + ThreadsafeForwardingResult per thread in a ConcurrentTestSuite. These + forward to the TestResult that the ConcurrentTestSuite run method was + called with. + + target.done() is called once for each ThreadsafeForwardingResult that + forwards to the same target. If the target's done() takes special action, + care should be taken to accommodate this. + """ + + def __init__(self, target, semaphore): + """Create a ThreadsafeForwardingResult forwarding to target. + + :param target: A TestResult. + :param semaphore: A threading.Semaphore with limit 1. + """ + TestResult.__init__(self) + self.result = ExtendedToOriginalDecorator(target) + self.semaphore = semaphore + + def _add_result_with_semaphore(self, method, test, *args, **kwargs): + self.semaphore.acquire() + try: + self.result.time(self._test_start) + self.result.startTest(test) + self.result.time(self._now()) + try: + method(test, *args, **kwargs) + finally: + self.result.stopTest(test) + finally: + self.semaphore.release() + + def addError(self, test, err=None, details=None): + self._add_result_with_semaphore(self.result.addError, + test, err, details=details) + + def addExpectedFailure(self, test, err=None, details=None): + self._add_result_with_semaphore(self.result.addExpectedFailure, + test, err, details=details) + + def addFailure(self, test, err=None, details=None): + self._add_result_with_semaphore(self.result.addFailure, + test, err, details=details) + + def addSkip(self, test, reason=None, details=None): + self._add_result_with_semaphore(self.result.addSkip, + test, reason, details=details) + + def addSuccess(self, test, details=None): + self._add_result_with_semaphore(self.result.addSuccess, + test, details=details) + + def addUnexpectedSuccess(self, test, details=None): + self._add_result_with_semaphore(self.result.addUnexpectedSuccess, + test, details=details) + + def startTestRun(self): + self.semaphore.acquire() + try: + self.result.startTestRun() + finally: + self.semaphore.release() + + def stopTestRun(self): + self.semaphore.acquire() + try: + self.result.stopTestRun() + finally: + self.semaphore.release() + + def done(self): + self.semaphore.acquire() + try: + self.result.done() + finally: + self.semaphore.release() + + def startTest(self, test): + self._test_start = self._now() + super(ThreadsafeForwardingResult, self).startTest(test) + + def wasSuccessful(self): + return self.result.wasSuccessful() + + +class ExtendedToOriginalDecorator(object): + """Permit new TestResult API code to degrade gracefully with old results. + + This decorates an existing TestResult and converts missing outcomes + such as addSkip to older outcomes such as addSuccess. It also supports + the extended details protocol. In all cases the most recent protocol + is attempted first, and fallbacks only occur when the decorated result + does not support the newer style of calling. + """ + + def __init__(self, decorated): + self.decorated = decorated + + def __getattr__(self, name): + return getattr(self.decorated, name) + + def addError(self, test, err=None, details=None): + self._check_args(err, details) + if details is not None: + try: + return self.decorated.addError(test, details=details) + except TypeError: + # have to convert + err = self._details_to_exc_info(details) + return self.decorated.addError(test, err) + + def addExpectedFailure(self, test, err=None, details=None): + self._check_args(err, details) + addExpectedFailure = getattr( + self.decorated, 'addExpectedFailure', None) + if addExpectedFailure is None: + return self.addSuccess(test) + if details is not None: + try: + return addExpectedFailure(test, details=details) + except TypeError: + # have to convert + err = self._details_to_exc_info(details) + return addExpectedFailure(test, err) + + def addFailure(self, test, err=None, details=None): + self._check_args(err, details) + if details is not None: + try: + return self.decorated.addFailure(test, details=details) + except TypeError: + # have to convert + err = self._details_to_exc_info(details) + return self.decorated.addFailure(test, err) + + def addSkip(self, test, reason=None, details=None): + self._check_args(reason, details) + addSkip = getattr(self.decorated, 'addSkip', None) + if addSkip is None: + return self.decorated.addSuccess(test) + if details is not None: + try: + return addSkip(test, details=details) + except TypeError: + # extract the reason if it's available + try: + reason = ''.join(details['reason'].iter_text()) + except KeyError: + reason = _details_to_str(details) + return addSkip(test, reason) + + def addUnexpectedSuccess(self, test, details=None): + outcome = getattr(self.decorated, 'addUnexpectedSuccess', None) + if outcome is None: + try: + test.fail("") + except test.failureException: + return self.addFailure(test, sys.exc_info()) + if details is not None: + try: + return outcome(test, details=details) + except TypeError: + pass + return outcome(test) + + def addSuccess(self, test, details=None): + if details is not None: + try: + return self.decorated.addSuccess(test, details=details) + except TypeError: + pass + return self.decorated.addSuccess(test) + + def _check_args(self, err, details): + param_count = 0 + if err is not None: + param_count += 1 + if details is not None: + param_count += 1 + if param_count != 1: + raise ValueError("Must pass only one of err '%s' and details '%s" + % (err, details)) + + def _details_to_exc_info(self, details): + """Convert a details dict to an exc_info tuple.""" + return ( + _StringException, + _StringException(_details_to_str(details, special='traceback')), + None) + + def done(self): + try: + return self.decorated.done() + except AttributeError: + return + + def progress(self, offset, whence): + method = getattr(self.decorated, 'progress', None) + if method is None: + return + return method(offset, whence) + + @property + def shouldStop(self): + return self.decorated.shouldStop + + def startTest(self, test): + return self.decorated.startTest(test) + + def startTestRun(self): + try: + return self.decorated.startTestRun() + except AttributeError: + return + + def stop(self): + return self.decorated.stop() + + def stopTest(self, test): + return self.decorated.stopTest(test) + + def stopTestRun(self): + try: + return self.decorated.stopTestRun() + except AttributeError: + return + + def tags(self, new_tags, gone_tags): + method = getattr(self.decorated, 'tags', None) + if method is None: + return + return method(new_tags, gone_tags) + + def time(self, a_datetime): + method = getattr(self.decorated, 'time', None) + if method is None: + return + return method(a_datetime) + + def wasSuccessful(self): + return self.decorated.wasSuccessful() + + +class _StringException(Exception): + """An exception made from an arbitrary string.""" + + if not str_is_unicode: + def __init__(self, string): + if type(string) is not unicode: + raise TypeError("_StringException expects unicode, got %r" % + (string,)) + Exception.__init__(self, string) + + def __str__(self): + return self.args[0].encode("utf-8") + + def __unicode__(self): + return self.args[0] + # For 3.0 and above the default __str__ is fine, so we don't define one. + + def __hash__(self): + return id(self) + + def __eq__(self, other): + try: + return self.args == other.args + except AttributeError: + return False + + +def _format_text_attachment(name, text): + if '\n' in text: + return "%s: {{{\n%s\n}}}\n" % (name, text) + return "%s: {{{%s}}}" % (name, text) + + +def _details_to_str(details, special=None): + """Convert a details dict to a string. + + :param details: A dictionary mapping short names to ``Content`` objects. + :param special: If specified, an attachment that should have special + attention drawn to it. The primary attachment. Normally it's the + traceback that caused the test to fail. + :return: A formatted string that can be included in text test results. + """ + empty_attachments = [] + binary_attachments = [] + text_attachments = [] + special_content = None + # sorted is for testing, may want to remove that and use a dict + # subclass with defined order for items instead. + for key, content in sorted(details.items()): + if content.content_type.type != 'text': + binary_attachments.append((key, content.content_type)) + continue + text = _u('').join(content.iter_text()).strip() + if not text: + empty_attachments.append(key) + continue + # We want the 'special' attachment to be at the bottom. + if key == special: + special_content = '%s\n' % (text,) + continue + text_attachments.append(_format_text_attachment(key, text)) + if text_attachments and not text_attachments[-1].endswith('\n'): + text_attachments.append('') + if special_content: + text_attachments.append(special_content) + lines = [] + if binary_attachments: + lines.append('Binary content:\n') + for name, content_type in binary_attachments: + lines.append(' %s (%s)\n' % (name, content_type)) + if empty_attachments: + lines.append('Empty attachments:\n') + for name in empty_attachments: + lines.append(' %s\n' % (name,)) + if (binary_attachments or empty_attachments) and text_attachments: + lines.append('\n') + lines.append('\n'.join(text_attachments)) + return _u('').join(lines) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/__init__.py b/test/3rdparty/testtools-0.9.12/testtools/tests/__init__.py new file mode 100644 index 00000000000..1b1aa38a1f9 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/__init__.py @@ -0,0 +1,44 @@ +"""Tests for testtools itself.""" + +# See README for copyright and licensing details. + +from unittest import TestSuite + + +def test_suite(): + from testtools.tests import ( + test_compat, + test_content, + test_content_type, + test_deferredruntest, + test_distutilscmd, + test_fixturesupport, + test_helpers, + test_matchers, + test_monkey, + test_run, + test_runtest, + test_spinner, + test_testcase, + test_testresult, + test_testsuite, + ) + modules = [ + test_compat, + test_content, + test_content_type, + test_deferredruntest, + test_distutilscmd, + test_fixturesupport, + test_helpers, + test_matchers, + test_monkey, + test_run, + test_runtest, + test_spinner, + test_testcase, + test_testresult, + test_testsuite, + ] + suites = map(lambda x: x.test_suite(), modules) + return TestSuite(suites) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/helpers.py b/test/3rdparty/testtools-0.9.12/testtools/tests/helpers.py new file mode 100644 index 00000000000..660cfecb72d --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/helpers.py @@ -0,0 +1,111 @@ +# Copyright (c) 2008-2011 testtools developers. See LICENSE for details. + +"""Helpers for tests.""" + +__all__ = [ + 'LoggingResult', + ] + +import sys + +from testtools import TestResult +from testtools.helpers import ( + safe_hasattr, + try_import, + ) +from testtools import runtest + + +# GZ 2010-08-12: Don't do this, pointlessly creates an exc_info cycle +try: + raise Exception +except Exception: + an_exc_info = sys.exc_info() + +# Deprecated: This classes attributes are somewhat non deterministic which +# leads to hard to predict tests (because Python upstream are changing things. +class LoggingResult(TestResult): + """TestResult that logs its event to a list.""" + + def __init__(self, log): + self._events = log + super(LoggingResult, self).__init__() + + def startTest(self, test): + self._events.append(('startTest', test)) + super(LoggingResult, self).startTest(test) + + def stopTest(self, test): + self._events.append(('stopTest', test)) + super(LoggingResult, self).stopTest(test) + + def addFailure(self, test, error): + self._events.append(('addFailure', test, error)) + super(LoggingResult, self).addFailure(test, error) + + def addError(self, test, error): + self._events.append(('addError', test, error)) + super(LoggingResult, self).addError(test, error) + + def addSkip(self, test, reason): + self._events.append(('addSkip', test, reason)) + super(LoggingResult, self).addSkip(test, reason) + + def addSuccess(self, test): + self._events.append(('addSuccess', test)) + super(LoggingResult, self).addSuccess(test) + + def startTestRun(self): + self._events.append('startTestRun') + super(LoggingResult, self).startTestRun() + + def stopTestRun(self): + self._events.append('stopTestRun') + super(LoggingResult, self).stopTestRun() + + def done(self): + self._events.append('done') + super(LoggingResult, self).done() + + def time(self, a_datetime): + self._events.append(('time', a_datetime)) + super(LoggingResult, self).time(a_datetime) + + +def is_stack_hidden(): + return safe_hasattr(runtest, '__unittest') + + +def hide_testtools_stack(should_hide=True): + modules = [ + 'testtools.matchers', + 'testtools.runtest', + 'testtools.testcase', + ] + result = is_stack_hidden() + for module_name in modules: + module = try_import(module_name) + if should_hide: + setattr(module, '__unittest', True) + else: + try: + delattr(module, '__unittest') + except AttributeError: + # Attribute already doesn't exist. Our work here is done. + pass + return result + + +def run_with_stack_hidden(should_hide, f, *args, **kwargs): + old_should_hide = hide_testtools_stack(should_hide) + try: + return f(*args, **kwargs) + finally: + hide_testtools_stack(old_should_hide) + + + +class FullStackRunTest(runtest.RunTest): + + def _run_user(self, fn, *args, **kwargs): + return run_with_stack_hidden(False, fn, *args, **kwargs) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_compat.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_compat.py new file mode 100644 index 00000000000..5e385bf48ce --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_compat.py @@ -0,0 +1,394 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Tests for miscellaneous compatibility functions""" + +import linecache +import os +import sys +import tempfile +import traceback + +import testtools + +from testtools.compat import ( + _b, + _detect_encoding, + _get_source_encoding, + _u, + str_is_unicode, + text_repr, + unicode_output_stream, + ) +from testtools.matchers import ( + MatchesException, + Not, + Raises, + ) + + +class TestDetectEncoding(testtools.TestCase): + """Test detection of Python source encodings""" + + def _check_encoding(self, expected, lines, possibly_invalid=False): + """Check lines are valid Python and encoding is as expected""" + if not possibly_invalid: + compile(_b("".join(lines)), "<str>", "exec") + encoding = _detect_encoding(lines) + self.assertEqual(expected, encoding, + "Encoding %r expected but got %r from lines %r" % + (expected, encoding, lines)) + + def test_examples_from_pep(self): + """Check the examples given in PEP 263 all work as specified + + See 'Examples' section of <http://www.python.org/dev/peps/pep-0263/> + """ + # With interpreter binary and using Emacs style file encoding comment: + self._check_encoding("latin-1", ( + "#!/usr/bin/python\n", + "# -*- coding: latin-1 -*-\n", + "import os, sys\n")) + self._check_encoding("iso-8859-15", ( + "#!/usr/bin/python\n", + "# -*- coding: iso-8859-15 -*-\n", + "import os, sys\n")) + self._check_encoding("ascii", ( + "#!/usr/bin/python\n", + "# -*- coding: ascii -*-\n", + "import os, sys\n")) + # Without interpreter line, using plain text: + self._check_encoding("utf-8", ( + "# This Python file uses the following encoding: utf-8\n", + "import os, sys\n")) + # Text editors might have different ways of defining the file's + # encoding, e.g. + self._check_encoding("latin-1", ( + "#!/usr/local/bin/python\n", + "# coding: latin-1\n", + "import os, sys\n")) + # Without encoding comment, Python's parser will assume ASCII text: + self._check_encoding("ascii", ( + "#!/usr/local/bin/python\n", + "import os, sys\n")) + # Encoding comments which don't work: + # Missing "coding:" prefix: + self._check_encoding("ascii", ( + "#!/usr/local/bin/python\n", + "# latin-1\n", + "import os, sys\n")) + # Encoding comment not on line 1 or 2: + self._check_encoding("ascii", ( + "#!/usr/local/bin/python\n", + "#\n", + "# -*- coding: latin-1 -*-\n", + "import os, sys\n")) + # Unsupported encoding: + self._check_encoding("ascii", ( + "#!/usr/local/bin/python\n", + "# -*- coding: utf-42 -*-\n", + "import os, sys\n"), + possibly_invalid=True) + + def test_bom(self): + """Test the UTF-8 BOM counts as an encoding declaration""" + self._check_encoding("utf-8", ( + "\xef\xbb\xbfimport sys\n", + )) + self._check_encoding("utf-8", ( + "\xef\xbb\xbf# File encoding: UTF-8\n", + )) + self._check_encoding("utf-8", ( + '\xef\xbb\xbf"""Module docstring\n', + '\xef\xbb\xbfThat should just be a ZWNB"""\n')) + self._check_encoding("latin-1", ( + '"""Is this coding: latin-1 or coding: utf-8 instead?\n', + '\xef\xbb\xbfThose should be latin-1 bytes"""\n')) + self._check_encoding("utf-8", ( + "\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n", + '"""Module docstring say \xe2\x98\x86"""\n')) + + def test_multiple_coding_comments(self): + """Test only the first of multiple coding declarations counts""" + self._check_encoding("iso-8859-1", ( + "# Is the coding: iso-8859-1\n", + "# Or is it coding: iso-8859-2\n"), + possibly_invalid=True) + self._check_encoding("iso-8859-1", ( + "#!/usr/bin/python\n", + "# Is the coding: iso-8859-1\n", + "# Or is it coding: iso-8859-2\n")) + self._check_encoding("iso-8859-1", ( + "# Is the coding: iso-8859-1 or coding: iso-8859-2\n", + "# Or coding: iso-8859-3 or coding: iso-8859-4\n"), + possibly_invalid=True) + self._check_encoding("iso-8859-2", ( + "# Is the coding iso-8859-1 or coding: iso-8859-2\n", + "# Spot the missing colon above\n")) + + +class TestGetSourceEncoding(testtools.TestCase): + """Test reading and caching the encodings of source files""" + + def setUp(self): + testtools.TestCase.setUp(self) + dir = tempfile.mkdtemp() + self.addCleanup(os.rmdir, dir) + self.filename = os.path.join(dir, self.id().rsplit(".", 1)[1] + ".py") + self._written = False + + def put_source(self, text): + f = open(self.filename, "w") + try: + f.write(text) + finally: + f.close() + if not self._written: + self._written = True + self.addCleanup(os.remove, self.filename) + self.addCleanup(linecache.cache.pop, self.filename, None) + + def test_nonexistant_file_as_ascii(self): + """When file can't be found, the encoding should default to ascii""" + self.assertEquals("ascii", _get_source_encoding(self.filename)) + + def test_encoding_is_cached(self): + """The encoding should stay the same if the cache isn't invalidated""" + self.put_source( + "# coding: iso-8859-13\n" + "import os\n") + self.assertEquals("iso-8859-13", _get_source_encoding(self.filename)) + self.put_source( + "# coding: rot-13\n" + "vzcbeg bf\n") + self.assertEquals("iso-8859-13", _get_source_encoding(self.filename)) + + def test_traceback_rechecks_encoding(self): + """A traceback function checks the cache and resets the encoding""" + self.put_source( + "# coding: iso-8859-8\n" + "import os\n") + self.assertEquals("iso-8859-8", _get_source_encoding(self.filename)) + self.put_source( + "# coding: utf-8\n" + "import os\n") + try: + exec (compile("raise RuntimeError\n", self.filename, "exec")) + except RuntimeError: + traceback.extract_tb(sys.exc_info()[2]) + else: + self.fail("RuntimeError not raised") + self.assertEquals("utf-8", _get_source_encoding(self.filename)) + + +class _FakeOutputStream(object): + """A simple file-like object for testing""" + + def __init__(self): + self.writelog = [] + + def write(self, obj): + self.writelog.append(obj) + + +class TestUnicodeOutputStream(testtools.TestCase): + """Test wrapping output streams so they work with arbitrary unicode""" + + uni = _u("pa\u026a\u03b8\u0259n") + + def setUp(self): + super(TestUnicodeOutputStream, self).setUp() + if sys.platform == "cli": + self.skip("IronPython shouldn't wrap streams to do encoding") + + def test_no_encoding_becomes_ascii(self): + """A stream with no encoding attribute gets ascii/replace strings""" + sout = _FakeOutputStream() + unicode_output_stream(sout).write(self.uni) + self.assertEqual([_b("pa???n")], sout.writelog) + + def test_encoding_as_none_becomes_ascii(self): + """A stream with encoding value of None gets ascii/replace strings""" + sout = _FakeOutputStream() + sout.encoding = None + unicode_output_stream(sout).write(self.uni) + self.assertEqual([_b("pa???n")], sout.writelog) + + def test_bogus_encoding_becomes_ascii(self): + """A stream with a bogus encoding gets ascii/replace strings""" + sout = _FakeOutputStream() + sout.encoding = "bogus" + unicode_output_stream(sout).write(self.uni) + self.assertEqual([_b("pa???n")], sout.writelog) + + def test_partial_encoding_replace(self): + """A string which can be partly encoded correctly should be""" + sout = _FakeOutputStream() + sout.encoding = "iso-8859-7" + unicode_output_stream(sout).write(self.uni) + self.assertEqual([_b("pa?\xe8?n")], sout.writelog) + + @testtools.skipIf(str_is_unicode, "Tests behaviour when str is not unicode") + def test_unicode_encodings_wrapped_when_str_is_not_unicode(self): + """A unicode encoding is wrapped but needs no error handler""" + sout = _FakeOutputStream() + sout.encoding = "utf-8" + uout = unicode_output_stream(sout) + self.assertEqual(uout.errors, "strict") + uout.write(self.uni) + self.assertEqual([_b("pa\xc9\xaa\xce\xb8\xc9\x99n")], sout.writelog) + + @testtools.skipIf(not str_is_unicode, "Tests behaviour when str is unicode") + def test_unicode_encodings_not_wrapped_when_str_is_unicode(self): + # No wrapping needed if native str type is unicode + sout = _FakeOutputStream() + sout.encoding = "utf-8" + uout = unicode_output_stream(sout) + self.assertIs(uout, sout) + + def test_stringio(self): + """A StringIO object should maybe get an ascii native str type""" + try: + from cStringIO import StringIO + newio = False + except ImportError: + from io import StringIO + newio = True + sout = StringIO() + soutwrapper = unicode_output_stream(sout) + if newio: + self.expectFailure("Python 3 StringIO expects text not bytes", + self.assertThat, lambda: soutwrapper.write(self.uni), + Not(Raises(MatchesException(TypeError)))) + soutwrapper.write(self.uni) + self.assertEqual("pa???n", sout.getvalue()) + + +class TestTextRepr(testtools.TestCase): + """Ensure in extending repr, basic behaviours are not being broken""" + + ascii_examples = ( + # Single character examples + # C0 control codes should be escaped except multiline \n + ("\x00", "'\\x00'", "'''\\\n\\x00'''"), + ("\b", "'\\x08'", "'''\\\n\\x08'''"), + ("\t", "'\\t'", "'''\\\n\\t'''"), + ("\n", "'\\n'", "'''\\\n\n'''"), + ("\r", "'\\r'", "'''\\\n\\r'''"), + # Quotes and backslash should match normal repr behaviour + ('"', "'\"'", "'''\\\n\"'''"), + ("'", "\"'\"", "'''\\\n\\''''"), + ("\\", "'\\\\'", "'''\\\n\\\\'''"), + # DEL is also unprintable and should be escaped + ("\x7F", "'\\x7f'", "'''\\\n\\x7f'''"), + + # Character combinations that need double checking + ("\r\n", "'\\r\\n'", "'''\\\n\\r\n'''"), + ("\"'", "'\"\\''", "'''\\\n\"\\''''"), + ("'\"", "'\\'\"'", "'''\\\n'\"'''"), + ("\\n", "'\\\\n'", "'''\\\n\\\\n'''"), + ("\\\n", "'\\\\\\n'", "'''\\\n\\\\\n'''"), + ("\\' ", "\"\\\\' \"", "'''\\\n\\\\' '''"), + ("\\'\n", "\"\\\\'\\n\"", "'''\\\n\\\\'\n'''"), + ("\\'\"", "'\\\\\\'\"'", "'''\\\n\\\\'\"'''"), + ("\\'''", "\"\\\\'''\"", "'''\\\n\\\\\\'\\'\\''''"), + ) + + # Bytes with the high bit set should always be escaped + bytes_examples = ( + (_b("\x80"), "'\\x80'", "'''\\\n\\x80'''"), + (_b("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"), + (_b("\xC0"), "'\\xc0'", "'''\\\n\\xc0'''"), + (_b("\xFF"), "'\\xff'", "'''\\\n\\xff'''"), + (_b("\xC2\xA7"), "'\\xc2\\xa7'", "'''\\\n\\xc2\\xa7'''"), + ) + + # Unicode doesn't escape printable characters as per the Python 3 model + unicode_examples = ( + # C1 codes are unprintable + (_u("\x80"), "'\\x80'", "'''\\\n\\x80'''"), + (_u("\x9F"), "'\\x9f'", "'''\\\n\\x9f'''"), + # No-break space is unprintable + (_u("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"), + # Letters latin alphabets are printable + (_u("\xA1"), _u("'\xa1'"), _u("'''\\\n\xa1'''")), + (_u("\xFF"), _u("'\xff'"), _u("'''\\\n\xff'''")), + (_u("\u0100"), _u("'\u0100'"), _u("'''\\\n\u0100'''")), + # Line and paragraph seperators are unprintable + (_u("\u2028"), "'\\u2028'", "'''\\\n\\u2028'''"), + (_u("\u2029"), "'\\u2029'", "'''\\\n\\u2029'''"), + # Unpaired surrogates are unprintable + (_u("\uD800"), "'\\ud800'", "'''\\\n\\ud800'''"), + (_u("\uDFFF"), "'\\udfff'", "'''\\\n\\udfff'''"), + # Unprintable general categories not fully tested: Cc, Cf, Co, Cn, Zs + ) + + b_prefix = repr(_b(""))[:-2] + u_prefix = repr(_u(""))[:-2] + + def test_ascii_examples_oneline_bytes(self): + for s, expected, _ in self.ascii_examples: + b = _b(s) + actual = text_repr(b, multiline=False) + # Add self.assertIsInstance check? + self.assertEqual(actual, self.b_prefix + expected) + self.assertEqual(eval(actual), b) + + def test_ascii_examples_oneline_unicode(self): + for s, expected, _ in self.ascii_examples: + u = _u(s) + actual = text_repr(u, multiline=False) + self.assertEqual(actual, self.u_prefix + expected) + self.assertEqual(eval(actual), u) + + def test_ascii_examples_multiline_bytes(self): + for s, _, expected in self.ascii_examples: + b = _b(s) + actual = text_repr(b, multiline=True) + self.assertEqual(actual, self.b_prefix + expected) + self.assertEqual(eval(actual), b) + + def test_ascii_examples_multiline_unicode(self): + for s, _, expected in self.ascii_examples: + u = _u(s) + actual = text_repr(u, multiline=True) + self.assertEqual(actual, self.u_prefix + expected) + self.assertEqual(eval(actual), u) + + def test_ascii_examples_defaultline_bytes(self): + for s, one, multi in self.ascii_examples: + expected = "\n" in s and multi or one + self.assertEqual(text_repr(_b(s)), self.b_prefix + expected) + + def test_ascii_examples_defaultline_unicode(self): + for s, one, multi in self.ascii_examples: + expected = "\n" in s and multi or one + self.assertEqual(text_repr(_u(s)), self.u_prefix + expected) + + def test_bytes_examples_oneline(self): + for b, expected, _ in self.bytes_examples: + actual = text_repr(b, multiline=False) + self.assertEqual(actual, self.b_prefix + expected) + self.assertEqual(eval(actual), b) + + def test_bytes_examples_multiline(self): + for b, _, expected in self.bytes_examples: + actual = text_repr(b, multiline=True) + self.assertEqual(actual, self.b_prefix + expected) + self.assertEqual(eval(actual), b) + + def test_unicode_examples_oneline(self): + for u, expected, _ in self.unicode_examples: + actual = text_repr(u, multiline=False) + self.assertEqual(actual, self.u_prefix + expected) + self.assertEqual(eval(actual), u) + + def test_unicode_examples_multiline(self): + for u, _, expected in self.unicode_examples: + actual = text_repr(u, multiline=True) + self.assertEqual(actual, self.u_prefix + expected) + self.assertEqual(eval(actual), u) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_content.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_content.py new file mode 100644 index 00000000000..14f400f04ec --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_content.py @@ -0,0 +1,227 @@ +# Copyright (c) 2008-2011 testtools developers. See LICENSE for details. + +import os +import tempfile +import unittest + +from testtools import TestCase +from testtools.compat import ( + _b, + _u, + StringIO, + ) +from testtools.content import ( + attach_file, + Content, + content_from_file, + content_from_stream, + TracebackContent, + text_content, + ) +from testtools.content_type import ( + ContentType, + UTF8_TEXT, + ) +from testtools.matchers import ( + Equals, + MatchesException, + Raises, + raises, + ) +from testtools.tests.helpers import an_exc_info + + +raises_value_error = Raises(MatchesException(ValueError)) + + +class TestContent(TestCase): + + def test___init___None_errors(self): + self.assertThat(lambda: Content(None, None), raises_value_error) + self.assertThat( + lambda: Content(None, lambda: ["traceback"]), raises_value_error) + self.assertThat( + lambda: Content(ContentType("text", "traceback"), None), + raises_value_error) + + def test___init___sets_ivars(self): + content_type = ContentType("foo", "bar") + content = Content(content_type, lambda: ["bytes"]) + self.assertEqual(content_type, content.content_type) + self.assertEqual(["bytes"], list(content.iter_bytes())) + + def test___eq__(self): + content_type = ContentType("foo", "bar") + one_chunk = lambda: [_b("bytes")] + two_chunk = lambda: [_b("by"), _b("tes")] + content1 = Content(content_type, one_chunk) + content2 = Content(content_type, one_chunk) + content3 = Content(content_type, two_chunk) + content4 = Content(content_type, lambda: [_b("by"), _b("te")]) + content5 = Content(ContentType("f", "b"), two_chunk) + self.assertEqual(content1, content2) + self.assertEqual(content1, content3) + self.assertNotEqual(content1, content4) + self.assertNotEqual(content1, content5) + + def test___repr__(self): + content = Content(ContentType("application", "octet-stream"), + lambda: [_b("\x00bin"), _b("ary\xff")]) + self.assertIn("\\x00binary\\xff", repr(content)) + + def test_iter_text_not_text_errors(self): + content_type = ContentType("foo", "bar") + content = Content(content_type, lambda: ["bytes"]) + self.assertThat(content.iter_text, raises_value_error) + + def test_iter_text_decodes(self): + content_type = ContentType("text", "strange", {"charset": "utf8"}) + content = Content( + content_type, lambda: [_u("bytes\xea").encode("utf8")]) + self.assertEqual([_u("bytes\xea")], list(content.iter_text())) + + def test_iter_text_default_charset_iso_8859_1(self): + content_type = ContentType("text", "strange") + text = _u("bytes\xea") + iso_version = text.encode("ISO-8859-1") + content = Content(content_type, lambda: [iso_version]) + self.assertEqual([text], list(content.iter_text())) + + def test_from_file(self): + fd, path = tempfile.mkstemp() + self.addCleanup(os.remove, path) + os.write(fd, _b('some data')) + os.close(fd) + content = content_from_file(path, UTF8_TEXT, chunk_size=2) + self.assertThat( + list(content.iter_bytes()), + Equals([_b('so'), _b('me'), _b(' d'), _b('at'), _b('a')])) + + def test_from_nonexistent_file(self): + directory = tempfile.mkdtemp() + nonexistent = os.path.join(directory, 'nonexistent-file') + content = content_from_file(nonexistent) + self.assertThat(content.iter_bytes, raises(IOError)) + + def test_from_file_default_type(self): + content = content_from_file('/nonexistent/path') + self.assertThat(content.content_type, Equals(UTF8_TEXT)) + + def test_from_file_eager_loading(self): + fd, path = tempfile.mkstemp() + os.write(fd, _b('some data')) + os.close(fd) + content = content_from_file(path, UTF8_TEXT, buffer_now=True) + os.remove(path) + self.assertThat( + ''.join(content.iter_text()), Equals('some data')) + + def test_from_stream(self): + data = StringIO('some data') + content = content_from_stream(data, UTF8_TEXT, chunk_size=2) + self.assertThat( + list(content.iter_bytes()), Equals(['so', 'me', ' d', 'at', 'a'])) + + def test_from_stream_default_type(self): + data = StringIO('some data') + content = content_from_stream(data) + self.assertThat(content.content_type, Equals(UTF8_TEXT)) + + def test_from_stream_eager_loading(self): + fd, path = tempfile.mkstemp() + self.addCleanup(os.remove, path) + os.write(fd, _b('some data')) + stream = open(path, 'rb') + content = content_from_stream(stream, UTF8_TEXT, buffer_now=True) + os.write(fd, _b('more data')) + os.close(fd) + self.assertThat( + ''.join(content.iter_text()), Equals('some data')) + + def test_from_text(self): + data = _u("some data") + expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')]) + self.assertEqual(expected, text_content(data)) + + +class TestTracebackContent(TestCase): + + def test___init___None_errors(self): + self.assertThat( + lambda: TracebackContent(None, None), raises_value_error) + + def test___init___sets_ivars(self): + content = TracebackContent(an_exc_info, self) + content_type = ContentType("text", "x-traceback", + {"language": "python", "charset": "utf8"}) + self.assertEqual(content_type, content.content_type) + result = unittest.TestResult() + expected = result._exc_info_to_string(an_exc_info, self) + self.assertEqual(expected, ''.join(list(content.iter_text()))) + + +class TestAttachFile(TestCase): + + def make_file(self, data): + # GZ 2011-04-21: This helper could be useful for methods above trying + # to use mkstemp, but should handle write failures and + # always close the fd. There must be a better way. + fd, path = tempfile.mkstemp() + self.addCleanup(os.remove, path) + os.write(fd, _b(data)) + os.close(fd) + return path + + def test_simple(self): + class SomeTest(TestCase): + def test_foo(self): + pass + test = SomeTest('test_foo') + data = 'some data' + path = self.make_file(data) + my_content = text_content(data) + attach_file(test, path, name='foo') + self.assertEqual({'foo': my_content}, test.getDetails()) + + def test_optional_name(self): + # If no name is provided, attach_file just uses the base name of the + # file. + class SomeTest(TestCase): + def test_foo(self): + pass + test = SomeTest('test_foo') + path = self.make_file('some data') + base_path = os.path.basename(path) + attach_file(test, path) + self.assertEqual([base_path], list(test.getDetails())) + + def test_lazy_read(self): + class SomeTest(TestCase): + def test_foo(self): + pass + test = SomeTest('test_foo') + path = self.make_file('some data') + attach_file(test, path, name='foo', buffer_now=False) + content = test.getDetails()['foo'] + content_file = open(path, 'w') + content_file.write('new data') + content_file.close() + self.assertEqual(''.join(content.iter_text()), 'new data') + + def test_eager_read_by_default(self): + class SomeTest(TestCase): + def test_foo(self): + pass + test = SomeTest('test_foo') + path = self.make_file('some data') + attach_file(test, path, name='foo') + content = test.getDetails()['foo'] + content_file = open(path, 'w') + content_file.write('new data') + content_file.close() + self.assertEqual(''.join(content.iter_text()), 'some data') + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_content_type.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_content_type.py new file mode 100644 index 00000000000..9d8c0f6f7af --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_content_type.py @@ -0,0 +1,56 @@ +# Copyright (c) 2008 testtools developers. See LICENSE for details. + +from testtools import TestCase +from testtools.matchers import Equals, MatchesException, Raises +from testtools.content_type import ContentType, UTF8_TEXT + + +class TestContentType(TestCase): + + def test___init___None_errors(self): + raises_value_error = Raises(MatchesException(ValueError)) + self.assertThat(lambda:ContentType(None, None), raises_value_error) + self.assertThat(lambda:ContentType(None, "traceback"), + raises_value_error) + self.assertThat(lambda:ContentType("text", None), raises_value_error) + + def test___init___sets_ivars(self): + content_type = ContentType("foo", "bar") + self.assertEqual("foo", content_type.type) + self.assertEqual("bar", content_type.subtype) + self.assertEqual({}, content_type.parameters) + + def test___init___with_parameters(self): + content_type = ContentType("foo", "bar", {"quux": "thing"}) + self.assertEqual({"quux": "thing"}, content_type.parameters) + + def test___eq__(self): + content_type1 = ContentType("foo", "bar", {"quux": "thing"}) + content_type2 = ContentType("foo", "bar", {"quux": "thing"}) + content_type3 = ContentType("foo", "bar", {"quux": "thing2"}) + self.assertTrue(content_type1.__eq__(content_type2)) + self.assertFalse(content_type1.__eq__(content_type3)) + + def test_basic_repr(self): + content_type = ContentType('text', 'plain') + self.assertThat(repr(content_type), Equals('text/plain')) + + def test_extended_repr(self): + content_type = ContentType( + 'text', 'plain', {'foo': 'bar', 'baz': 'qux'}) + self.assertThat( + repr(content_type), Equals('text/plain; foo="bar", baz="qux"')) + + +class TestBuiltinContentTypes(TestCase): + + def test_plain_text(self): + # The UTF8_TEXT content type represents UTF-8 encoded text/plain. + self.assertThat(UTF8_TEXT.type, Equals('text')) + self.assertThat(UTF8_TEXT.subtype, Equals('plain')) + self.assertThat(UTF8_TEXT.parameters, Equals({'charset': 'utf8'})) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_deferredruntest.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_deferredruntest.py new file mode 100644 index 00000000000..ab0fd87890a --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_deferredruntest.py @@ -0,0 +1,753 @@ +# Copyright (c) 2010-2011 testtools developers. See LICENSE for details. + +"""Tests for the DeferredRunTest single test execution logic.""" + +import os +import signal + +from testtools import ( + skipIf, + TestCase, + TestResult, + ) +from testtools.content import ( + text_content, + ) +from testtools.helpers import try_import +from testtools.matchers import ( + Equals, + KeysEqual, + MatchesException, + Raises, + ) +from testtools.runtest import RunTest +from testtools.testresult.doubles import ExtendedTestResult +from testtools.tests.test_spinner import NeedsTwistedTestCase + +assert_fails_with = try_import('testtools.deferredruntest.assert_fails_with') +AsynchronousDeferredRunTest = try_import( + 'testtools.deferredruntest.AsynchronousDeferredRunTest') +flush_logged_errors = try_import( + 'testtools.deferredruntest.flush_logged_errors') +SynchronousDeferredRunTest = try_import( + 'testtools.deferredruntest.SynchronousDeferredRunTest') + +defer = try_import('twisted.internet.defer') +failure = try_import('twisted.python.failure') +log = try_import('twisted.python.log') +DelayedCall = try_import('twisted.internet.base.DelayedCall') + + +class X(object): + """Tests that we run as part of our tests, nested to avoid discovery.""" + + class Base(TestCase): + def setUp(self): + super(X.Base, self).setUp() + self.calls = ['setUp'] + self.addCleanup(self.calls.append, 'clean-up') + def test_something(self): + self.calls.append('test') + def tearDown(self): + self.calls.append('tearDown') + super(X.Base, self).tearDown() + + class ErrorInSetup(Base): + expected_calls = ['setUp', 'clean-up'] + expected_results = [('addError', RuntimeError)] + def setUp(self): + super(X.ErrorInSetup, self).setUp() + raise RuntimeError("Error in setUp") + + class ErrorInTest(Base): + expected_calls = ['setUp', 'tearDown', 'clean-up'] + expected_results = [('addError', RuntimeError)] + def test_something(self): + raise RuntimeError("Error in test") + + class FailureInTest(Base): + expected_calls = ['setUp', 'tearDown', 'clean-up'] + expected_results = [('addFailure', AssertionError)] + def test_something(self): + self.fail("test failed") + + class ErrorInTearDown(Base): + expected_calls = ['setUp', 'test', 'clean-up'] + expected_results = [('addError', RuntimeError)] + def tearDown(self): + raise RuntimeError("Error in tearDown") + + class ErrorInCleanup(Base): + expected_calls = ['setUp', 'test', 'tearDown', 'clean-up'] + expected_results = [('addError', ZeroDivisionError)] + def test_something(self): + self.calls.append('test') + self.addCleanup(lambda: 1/0) + + class TestIntegration(NeedsTwistedTestCase): + + def assertResultsMatch(self, test, result): + events = list(result._events) + self.assertEqual(('startTest', test), events.pop(0)) + for expected_result in test.expected_results: + result = events.pop(0) + if len(expected_result) == 1: + self.assertEqual((expected_result[0], test), result) + else: + self.assertEqual((expected_result[0], test), result[:2]) + error_type = expected_result[1] + self.assertIn(error_type.__name__, str(result[2])) + self.assertEqual([('stopTest', test)], events) + + def test_runner(self): + result = ExtendedTestResult() + test = self.test_factory('test_something', runTest=self.runner) + test.run(result) + self.assertEqual(test.calls, self.test_factory.expected_calls) + self.assertResultsMatch(test, result) + + +def make_integration_tests(): + from unittest import TestSuite + from testtools import clone_test_with_new_id + runners = [ + ('RunTest', RunTest), + ('SynchronousDeferredRunTest', SynchronousDeferredRunTest), + ('AsynchronousDeferredRunTest', AsynchronousDeferredRunTest), + ] + + tests = [ + X.ErrorInSetup, + X.ErrorInTest, + X.ErrorInTearDown, + X.FailureInTest, + X.ErrorInCleanup, + ] + base_test = X.TestIntegration('test_runner') + integration_tests = [] + for runner_name, runner in runners: + for test in tests: + new_test = clone_test_with_new_id( + base_test, '%s(%s, %s)' % ( + base_test.id(), + runner_name, + test.__name__)) + new_test.test_factory = test + new_test.runner = runner + integration_tests.append(new_test) + return TestSuite(integration_tests) + + +class TestSynchronousDeferredRunTest(NeedsTwistedTestCase): + + def make_result(self): + return ExtendedTestResult() + + def make_runner(self, test): + return SynchronousDeferredRunTest(test, test.exception_handlers) + + def test_success(self): + class SomeCase(TestCase): + def test_success(self): + return defer.succeed(None) + test = SomeCase('test_success') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + result._events, Equals([ + ('startTest', test), + ('addSuccess', test), + ('stopTest', test)])) + + def test_failure(self): + class SomeCase(TestCase): + def test_failure(self): + return defer.maybeDeferred(self.fail, "Egads!") + test = SomeCase('test_failure') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], Equals([ + ('startTest', test), + ('addFailure', test), + ('stopTest', test)])) + + def test_setUp_followed_by_test(self): + class SomeCase(TestCase): + def setUp(self): + super(SomeCase, self).setUp() + return defer.succeed(None) + def test_failure(self): + return defer.maybeDeferred(self.fail, "Egads!") + test = SomeCase('test_failure') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], Equals([ + ('startTest', test), + ('addFailure', test), + ('stopTest', test)])) + + +class TestAsynchronousDeferredRunTest(NeedsTwistedTestCase): + + def make_reactor(self): + from twisted.internet import reactor + return reactor + + def make_result(self): + return ExtendedTestResult() + + def make_runner(self, test, timeout=None): + if timeout is None: + timeout = self.make_timeout() + return AsynchronousDeferredRunTest( + test, test.exception_handlers, timeout=timeout) + + def make_timeout(self): + return 0.005 + + def test_setUp_returns_deferred_that_fires_later(self): + # setUp can return a Deferred that might fire at any time. + # AsynchronousDeferredRunTest will not go on to running the test until + # the Deferred returned by setUp actually fires. + call_log = [] + marker = object() + d = defer.Deferred().addCallback(call_log.append) + class SomeCase(TestCase): + def setUp(self): + super(SomeCase, self).setUp() + call_log.append('setUp') + return d + def test_something(self): + call_log.append('test') + def fire_deferred(): + self.assertThat(call_log, Equals(['setUp'])) + d.callback(marker) + test = SomeCase('test_something') + timeout = self.make_timeout() + runner = self.make_runner(test, timeout=timeout) + result = self.make_result() + reactor = self.make_reactor() + reactor.callLater(timeout, fire_deferred) + runner.run(result) + self.assertThat(call_log, Equals(['setUp', marker, 'test'])) + + def test_calls_setUp_test_tearDown_in_sequence(self): + # setUp, the test method and tearDown can all return + # Deferreds. AsynchronousDeferredRunTest will make sure that each of + # these are run in turn, only going on to the next stage once the + # Deferred from the previous stage has fired. + call_log = [] + a = defer.Deferred() + a.addCallback(lambda x: call_log.append('a')) + b = defer.Deferred() + b.addCallback(lambda x: call_log.append('b')) + c = defer.Deferred() + c.addCallback(lambda x: call_log.append('c')) + class SomeCase(TestCase): + def setUp(self): + super(SomeCase, self).setUp() + call_log.append('setUp') + return a + def test_success(self): + call_log.append('test') + return b + def tearDown(self): + super(SomeCase, self).tearDown() + call_log.append('tearDown') + return c + test = SomeCase('test_success') + timeout = self.make_timeout() + runner = self.make_runner(test, timeout) + result = self.make_result() + reactor = self.make_reactor() + def fire_a(): + self.assertThat(call_log, Equals(['setUp'])) + a.callback(None) + def fire_b(): + self.assertThat(call_log, Equals(['setUp', 'a', 'test'])) + b.callback(None) + def fire_c(): + self.assertThat( + call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown'])) + c.callback(None) + reactor.callLater(timeout * 0.25, fire_a) + reactor.callLater(timeout * 0.5, fire_b) + reactor.callLater(timeout * 0.75, fire_c) + runner.run(result) + self.assertThat( + call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown', 'c'])) + + def test_async_cleanups(self): + # Cleanups added with addCleanup can return + # Deferreds. AsynchronousDeferredRunTest will run each of them in + # turn. + class SomeCase(TestCase): + def test_whatever(self): + pass + test = SomeCase('test_whatever') + call_log = [] + a = defer.Deferred().addCallback(lambda x: call_log.append('a')) + b = defer.Deferred().addCallback(lambda x: call_log.append('b')) + c = defer.Deferred().addCallback(lambda x: call_log.append('c')) + test.addCleanup(lambda: a) + test.addCleanup(lambda: b) + test.addCleanup(lambda: c) + def fire_a(): + self.assertThat(call_log, Equals([])) + a.callback(None) + def fire_b(): + self.assertThat(call_log, Equals(['a'])) + b.callback(None) + def fire_c(): + self.assertThat(call_log, Equals(['a', 'b'])) + c.callback(None) + timeout = self.make_timeout() + reactor = self.make_reactor() + reactor.callLater(timeout * 0.25, fire_a) + reactor.callLater(timeout * 0.5, fire_b) + reactor.callLater(timeout * 0.75, fire_c) + runner = self.make_runner(test, timeout) + result = self.make_result() + runner.run(result) + self.assertThat(call_log, Equals(['a', 'b', 'c'])) + + def test_clean_reactor(self): + # If there's cruft left over in the reactor, the test fails. + reactor = self.make_reactor() + timeout = self.make_timeout() + class SomeCase(TestCase): + def test_cruft(self): + reactor.callLater(timeout * 10.0, lambda: None) + test = SomeCase('test_cruft') + runner = self.make_runner(test, timeout) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], + Equals( + [('startTest', test), + ('addError', test), + ('stopTest', test)])) + error = result._events[1][2] + self.assertThat(error, KeysEqual('traceback', 'twisted-log')) + + def test_exports_reactor(self): + # The reactor is set as an attribute on the test case. + reactor = self.make_reactor() + timeout = self.make_timeout() + class SomeCase(TestCase): + def test_cruft(self): + self.assertIs(reactor, self.reactor) + test = SomeCase('test_cruft') + runner = self.make_runner(test, timeout) + result = TestResult() + runner.run(result) + self.assertEqual([], result.errors) + self.assertEqual([], result.failures) + + def test_unhandled_error_from_deferred(self): + # If there's a Deferred with an unhandled error, the test fails. Each + # unhandled error is reported with a separate traceback. + class SomeCase(TestCase): + def test_cruft(self): + # Note we aren't returning the Deferred so that the error will + # be unhandled. + defer.maybeDeferred(lambda: 1/0) + defer.maybeDeferred(lambda: 2/0) + test = SomeCase('test_cruft') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + error = result._events[1][2] + result._events[1] = ('addError', test, None) + self.assertThat(result._events, Equals( + [('startTest', test), + ('addError', test, None), + ('stopTest', test)])) + self.assertThat( + error, KeysEqual( + 'twisted-log', + 'unhandled-error-in-deferred', + 'unhandled-error-in-deferred-1', + )) + + def test_unhandled_error_from_deferred_combined_with_error(self): + # If there's a Deferred with an unhandled error, the test fails. Each + # unhandled error is reported with a separate traceback, and the error + # is still reported. + class SomeCase(TestCase): + def test_cruft(self): + # Note we aren't returning the Deferred so that the error will + # be unhandled. + defer.maybeDeferred(lambda: 1/0) + 2 / 0 + test = SomeCase('test_cruft') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + error = result._events[1][2] + result._events[1] = ('addError', test, None) + self.assertThat(result._events, Equals( + [('startTest', test), + ('addError', test, None), + ('stopTest', test)])) + self.assertThat( + error, KeysEqual( + 'traceback', + 'twisted-log', + 'unhandled-error-in-deferred', + )) + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_keyboard_interrupt_stops_test_run(self): + # If we get a SIGINT during a test run, the test stops and no more + # tests run. + SIGINT = getattr(signal, 'SIGINT', None) + if not SIGINT: + raise self.skipTest("SIGINT unavailable") + class SomeCase(TestCase): + def test_pause(self): + return defer.Deferred() + test = SomeCase('test_pause') + reactor = self.make_reactor() + timeout = self.make_timeout() + runner = self.make_runner(test, timeout * 5) + result = self.make_result() + reactor.callLater(timeout, os.kill, os.getpid(), SIGINT) + self.assertThat(lambda:runner.run(result), + Raises(MatchesException(KeyboardInterrupt))) + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_fast_keyboard_interrupt_stops_test_run(self): + # If we get a SIGINT during a test run, the test stops and no more + # tests run. + SIGINT = getattr(signal, 'SIGINT', None) + if not SIGINT: + raise self.skipTest("SIGINT unavailable") + class SomeCase(TestCase): + def test_pause(self): + return defer.Deferred() + test = SomeCase('test_pause') + reactor = self.make_reactor() + timeout = self.make_timeout() + runner = self.make_runner(test, timeout * 5) + result = self.make_result() + reactor.callWhenRunning(os.kill, os.getpid(), SIGINT) + self.assertThat(lambda:runner.run(result), + Raises(MatchesException(KeyboardInterrupt))) + + def test_timeout_causes_test_error(self): + # If a test times out, it reports itself as having failed with a + # TimeoutError. + class SomeCase(TestCase): + def test_pause(self): + return defer.Deferred() + test = SomeCase('test_pause') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + error = result._events[1][2] + self.assertThat( + [event[:2] for event in result._events], Equals( + [('startTest', test), + ('addError', test), + ('stopTest', test)])) + self.assertIn('TimeoutError', str(error['traceback'])) + + def test_convenient_construction(self): + # As a convenience method, AsynchronousDeferredRunTest has a + # classmethod that returns an AsynchronousDeferredRunTest + # factory. This factory has the same API as the RunTest constructor. + reactor = object() + timeout = object() + handler = object() + factory = AsynchronousDeferredRunTest.make_factory(reactor, timeout) + runner = factory(self, [handler]) + self.assertIs(reactor, runner._reactor) + self.assertIs(timeout, runner._timeout) + self.assertIs(self, runner.case) + self.assertEqual([handler], runner.handlers) + + def test_use_convenient_factory(self): + # Make sure that the factory can actually be used. + factory = AsynchronousDeferredRunTest.make_factory() + class SomeCase(TestCase): + run_tests_with = factory + def test_something(self): + pass + case = SomeCase('test_something') + case.run() + + def test_convenient_construction_default_reactor(self): + # As a convenience method, AsynchronousDeferredRunTest has a + # classmethod that returns an AsynchronousDeferredRunTest + # factory. This factory has the same API as the RunTest constructor. + reactor = object() + handler = object() + factory = AsynchronousDeferredRunTest.make_factory(reactor=reactor) + runner = factory(self, [handler]) + self.assertIs(reactor, runner._reactor) + self.assertIs(self, runner.case) + self.assertEqual([handler], runner.handlers) + + def test_convenient_construction_default_timeout(self): + # As a convenience method, AsynchronousDeferredRunTest has a + # classmethod that returns an AsynchronousDeferredRunTest + # factory. This factory has the same API as the RunTest constructor. + timeout = object() + handler = object() + factory = AsynchronousDeferredRunTest.make_factory(timeout=timeout) + runner = factory(self, [handler]) + self.assertIs(timeout, runner._timeout) + self.assertIs(self, runner.case) + self.assertEqual([handler], runner.handlers) + + def test_convenient_construction_default_debugging(self): + # As a convenience method, AsynchronousDeferredRunTest has a + # classmethod that returns an AsynchronousDeferredRunTest + # factory. This factory has the same API as the RunTest constructor. + handler = object() + factory = AsynchronousDeferredRunTest.make_factory(debug=True) + runner = factory(self, [handler]) + self.assertIs(self, runner.case) + self.assertEqual([handler], runner.handlers) + self.assertEqual(True, runner._debug) + + def test_deferred_error(self): + class SomeTest(TestCase): + def test_something(self): + return defer.maybeDeferred(lambda: 1/0) + test = SomeTest('test_something') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], + Equals([ + ('startTest', test), + ('addError', test), + ('stopTest', test)])) + error = result._events[1][2] + self.assertThat(error, KeysEqual('traceback', 'twisted-log')) + + def test_only_addError_once(self): + # Even if the reactor is unclean and the test raises an error and the + # cleanups raise errors, we only called addError once per test. + reactor = self.make_reactor() + class WhenItRains(TestCase): + def it_pours(self): + # Add a dirty cleanup. + self.addCleanup(lambda: 3 / 0) + # Dirty the reactor. + from twisted.internet.protocol import ServerFactory + reactor.listenTCP(0, ServerFactory()) + # Unhandled error. + defer.maybeDeferred(lambda: 2 / 0) + # Actual error. + raise RuntimeError("Excess precipitation") + test = WhenItRains('it_pours') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], + Equals([ + ('startTest', test), + ('addError', test), + ('stopTest', test)])) + error = result._events[1][2] + self.assertThat( + error, KeysEqual( + 'traceback', + 'traceback-1', + 'traceback-2', + 'twisted-log', + 'unhandled-error-in-deferred', + )) + + def test_log_err_is_error(self): + # An error logged during the test run is recorded as an error in the + # tests. + class LogAnError(TestCase): + def test_something(self): + try: + 1/0 + except ZeroDivisionError: + f = failure.Failure() + log.err(f) + test = LogAnError('test_something') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], + Equals([ + ('startTest', test), + ('addError', test), + ('stopTest', test)])) + error = result._events[1][2] + self.assertThat(error, KeysEqual('logged-error', 'twisted-log')) + + def test_log_err_flushed_is_success(self): + # An error logged during the test run is recorded as an error in the + # tests. + class LogAnError(TestCase): + def test_something(self): + try: + 1/0 + except ZeroDivisionError: + f = failure.Failure() + log.err(f) + flush_logged_errors(ZeroDivisionError) + test = LogAnError('test_something') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + result._events, + Equals([ + ('startTest', test), + ('addSuccess', test, {'twisted-log': text_content('')}), + ('stopTest', test)])) + + def test_log_in_details(self): + class LogAnError(TestCase): + def test_something(self): + log.msg("foo") + 1/0 + test = LogAnError('test_something') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], + Equals([ + ('startTest', test), + ('addError', test), + ('stopTest', test)])) + error = result._events[1][2] + self.assertThat(error, KeysEqual('traceback', 'twisted-log')) + + def test_debugging_unchanged_during_test_by_default(self): + debugging = [(defer.Deferred.debug, DelayedCall.debug)] + class SomeCase(TestCase): + def test_debugging_enabled(self): + debugging.append((defer.Deferred.debug, DelayedCall.debug)) + test = SomeCase('test_debugging_enabled') + runner = AsynchronousDeferredRunTest( + test, handlers=test.exception_handlers, + reactor=self.make_reactor(), timeout=self.make_timeout()) + runner.run(self.make_result()) + self.assertEqual(debugging[0], debugging[1]) + + def test_debugging_enabled_during_test_with_debug_flag(self): + self.patch(defer.Deferred, 'debug', False) + self.patch(DelayedCall, 'debug', False) + debugging = [] + class SomeCase(TestCase): + def test_debugging_enabled(self): + debugging.append((defer.Deferred.debug, DelayedCall.debug)) + test = SomeCase('test_debugging_enabled') + runner = AsynchronousDeferredRunTest( + test, handlers=test.exception_handlers, + reactor=self.make_reactor(), timeout=self.make_timeout(), + debug=True) + runner.run(self.make_result()) + self.assertEqual([(True, True)], debugging) + self.assertEqual(False, defer.Deferred.debug) + self.assertEqual(False, defer.Deferred.debug) + + +class TestAssertFailsWith(NeedsTwistedTestCase): + """Tests for `assert_fails_with`.""" + + if SynchronousDeferredRunTest is not None: + run_tests_with = SynchronousDeferredRunTest + + def test_assert_fails_with_success(self): + # assert_fails_with fails the test if it's given a Deferred that + # succeeds. + marker = object() + d = assert_fails_with(defer.succeed(marker), RuntimeError) + def check_result(failure): + failure.trap(self.failureException) + self.assertThat( + str(failure.value), + Equals("RuntimeError not raised (%r returned)" % (marker,))) + d.addCallbacks( + lambda x: self.fail("Should not have succeeded"), check_result) + return d + + def test_assert_fails_with_success_multiple_types(self): + # assert_fails_with fails the test if it's given a Deferred that + # succeeds. + marker = object() + d = assert_fails_with( + defer.succeed(marker), RuntimeError, ZeroDivisionError) + def check_result(failure): + failure.trap(self.failureException) + self.assertThat( + str(failure.value), + Equals("RuntimeError, ZeroDivisionError not raised " + "(%r returned)" % (marker,))) + d.addCallbacks( + lambda x: self.fail("Should not have succeeded"), check_result) + return d + + def test_assert_fails_with_wrong_exception(self): + # assert_fails_with fails the test if it's given a Deferred that + # succeeds. + d = assert_fails_with( + defer.maybeDeferred(lambda: 1/0), RuntimeError, KeyboardInterrupt) + def check_result(failure): + failure.trap(self.failureException) + lines = str(failure.value).splitlines() + self.assertThat( + lines[:2], + Equals([ + ("ZeroDivisionError raised instead of RuntimeError, " + "KeyboardInterrupt:"), + " Traceback (most recent call last):", + ])) + d.addCallbacks( + lambda x: self.fail("Should not have succeeded"), check_result) + return d + + def test_assert_fails_with_expected_exception(self): + # assert_fails_with calls back with the value of the failure if it's + # one of the expected types of failures. + try: + 1/0 + except ZeroDivisionError: + f = failure.Failure() + d = assert_fails_with(defer.fail(f), ZeroDivisionError) + return d.addCallback(self.assertThat, Equals(f.value)) + + def test_custom_failure_exception(self): + # If assert_fails_with is passed a 'failureException' keyword + # argument, then it will raise that instead of `AssertionError`. + class CustomException(Exception): + pass + marker = object() + d = assert_fails_with( + defer.succeed(marker), RuntimeError, + failureException=CustomException) + def check_result(failure): + failure.trap(CustomException) + self.assertThat( + str(failure.value), + Equals("RuntimeError not raised (%r returned)" % (marker,))) + return d.addCallbacks( + lambda x: self.fail("Should not have succeeded"), check_result) + + +def test_suite(): + from unittest import TestLoader, TestSuite + return TestSuite( + [TestLoader().loadTestsFromName(__name__), + make_integration_tests()]) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_distutilscmd.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_distutilscmd.py new file mode 100644 index 00000000000..c485a473d39 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_distutilscmd.py @@ -0,0 +1,98 @@ +# Copyright (c) 2010-2011 Testtools authors. See LICENSE for details. + +"""Tests for the distutils test command logic.""" + +from distutils.dist import Distribution + +from testtools.compat import ( + _b, + BytesIO, + ) +from testtools.helpers import try_import +fixtures = try_import('fixtures') + +import testtools +from testtools import TestCase +from testtools.distutilscmd import TestCommand +from testtools.matchers import MatchesRegex + + +if fixtures: + class SampleTestFixture(fixtures.Fixture): + """Creates testtools.runexample temporarily.""" + + def __init__(self): + self.package = fixtures.PythonPackage( + 'runexample', [('__init__.py', _b(""" +from testtools import TestCase + +class TestFoo(TestCase): + def test_bar(self): + pass + def test_quux(self): + pass +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) +"""))]) + + def setUp(self): + super(SampleTestFixture, self).setUp() + self.useFixture(self.package) + testtools.__path__.append(self.package.base) + self.addCleanup(testtools.__path__.remove, self.package.base) + + +class TestCommandTest(TestCase): + + def setUp(self): + super(TestCommandTest, self).setUp() + if fixtures is None: + self.skipTest("Need fixtures") + + def test_test_module(self): + self.useFixture(SampleTestFixture()) + stream = BytesIO() + dist = Distribution() + dist.script_name = 'setup.py' + dist.script_args = ['test'] + dist.cmdclass = {'test': TestCommand} + dist.command_options = { + 'test': {'test_module': ('command line', 'testtools.runexample')}} + cmd = dist.reinitialize_command('test') + cmd.runner.stdout = stream + dist.run_command('test') + self.assertThat( + stream.getvalue(), + MatchesRegex(_b("""Tests running... + +Ran 2 tests in \\d.\\d\\d\\ds +OK +"""))) + + def test_test_suite(self): + self.useFixture(SampleTestFixture()) + stream = BytesIO() + dist = Distribution() + dist.script_name = 'setup.py' + dist.script_args = ['test'] + dist.cmdclass = {'test': TestCommand} + dist.command_options = { + 'test': { + 'test_suite': ( + 'command line', 'testtools.runexample.test_suite')}} + cmd = dist.reinitialize_command('test') + cmd.runner.stdout = stream + dist.run_command('test') + self.assertThat( + stream.getvalue(), + MatchesRegex(_b("""Tests running... + +Ran 2 tests in \\d.\\d\\d\\ds +OK +"""))) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_fixturesupport.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_fixturesupport.py new file mode 100644 index 00000000000..ae6f2ec86e3 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_fixturesupport.py @@ -0,0 +1,117 @@ +# Copyright (c) 2010-2011 testtools developers. See LICENSE for details. + +import unittest + +from testtools import ( + TestCase, + content, + content_type, + ) +from testtools.compat import _b, _u +from testtools.helpers import try_import +from testtools.testresult.doubles import ( + ExtendedTestResult, + ) + +fixtures = try_import('fixtures') +LoggingFixture = try_import('fixtures.tests.helpers.LoggingFixture') + + +class TestFixtureSupport(TestCase): + + def setUp(self): + super(TestFixtureSupport, self).setUp() + if fixtures is None or LoggingFixture is None: + self.skipTest("Need fixtures") + + def test_useFixture(self): + fixture = LoggingFixture() + class SimpleTest(TestCase): + def test_foo(self): + self.useFixture(fixture) + result = unittest.TestResult() + SimpleTest('test_foo').run(result) + self.assertTrue(result.wasSuccessful()) + self.assertEqual(['setUp', 'cleanUp'], fixture.calls) + + def test_useFixture_cleanups_raise_caught(self): + calls = [] + def raiser(ignored): + calls.append('called') + raise Exception('foo') + fixture = fixtures.FunctionFixture(lambda:None, raiser) + class SimpleTest(TestCase): + def test_foo(self): + self.useFixture(fixture) + result = unittest.TestResult() + SimpleTest('test_foo').run(result) + self.assertFalse(result.wasSuccessful()) + self.assertEqual(['called'], calls) + + def test_useFixture_details_captured(self): + class DetailsFixture(fixtures.Fixture): + def setUp(self): + fixtures.Fixture.setUp(self) + self.addCleanup(delattr, self, 'content') + self.content = [_b('content available until cleanUp')] + self.addDetail('content', + content.Content(content_type.UTF8_TEXT, self.get_content)) + def get_content(self): + return self.content + fixture = DetailsFixture() + class SimpleTest(TestCase): + def test_foo(self): + self.useFixture(fixture) + # Add a colliding detail (both should show up) + self.addDetail('content', + content.Content(content_type.UTF8_TEXT, lambda:[_b('foo')])) + result = ExtendedTestResult() + SimpleTest('test_foo').run(result) + self.assertEqual('addSuccess', result._events[-2][0]) + details = result._events[-2][2] + self.assertEqual(['content', 'content-1'], sorted(details.keys())) + self.assertEqual('foo', _u('').join(details['content'].iter_text())) + self.assertEqual('content available until cleanUp', + ''.join(details['content-1'].iter_text())) + + def test_useFixture_multiple_details_captured(self): + class DetailsFixture(fixtures.Fixture): + def setUp(self): + fixtures.Fixture.setUp(self) + self.addDetail('aaa', content.text_content("foo")) + self.addDetail('bbb', content.text_content("bar")) + fixture = DetailsFixture() + class SimpleTest(TestCase): + def test_foo(self): + self.useFixture(fixture) + result = ExtendedTestResult() + SimpleTest('test_foo').run(result) + self.assertEqual('addSuccess', result._events[-2][0]) + details = result._events[-2][2] + self.assertEqual(['aaa', 'bbb'], sorted(details)) + self.assertEqual('foo', ''.join(details['aaa'].iter_text())) + self.assertEqual('bar', ''.join(details['bbb'].iter_text())) + + def test_useFixture_details_captured_from_setUp(self): + # Details added during fixture set-up are gathered even if setUp() + # fails with an exception. + class BrokenFixture(fixtures.Fixture): + def setUp(self): + fixtures.Fixture.setUp(self) + self.addDetail('content', content.text_content("foobar")) + raise Exception() + fixture = BrokenFixture() + class SimpleTest(TestCase): + def test_foo(self): + self.useFixture(fixture) + result = ExtendedTestResult() + SimpleTest('test_foo').run(result) + self.assertEqual('addError', result._events[-2][0]) + details = result._events[-2][2] + self.assertEqual(['content', 'traceback'], sorted(details)) + self.assertEqual('foobar', ''.join(details['content'].iter_text())) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_helpers.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_helpers.py new file mode 100644 index 00000000000..55de34b7e72 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_helpers.py @@ -0,0 +1,240 @@ +# Copyright (c) 2010-2011 testtools developers. See LICENSE for details. + +from testtools import TestCase +from testtools.helpers import ( + try_import, + try_imports, + ) +from testtools.matchers import ( + AllMatch, + AfterPreprocessing, + Equals, + Is, + Not, + ) +from testtools.tests.helpers import ( + FullStackRunTest, + hide_testtools_stack, + is_stack_hidden, + safe_hasattr, + ) + + +def check_error_callback(test, function, arg, expected_error_count, + expect_result): + """General test template for error_callback argument. + + :param test: Test case instance. + :param function: Either try_import or try_imports. + :param arg: Name or names to import. + :param expected_error_count: Expected number of calls to the callback. + :param expect_result: Boolean for whether a module should + ultimately be returned or not. + """ + cb_calls = [] + def cb(e): + test.assertIsInstance(e, ImportError) + cb_calls.append(e) + try: + result = function(arg, error_callback=cb) + except ImportError: + test.assertFalse(expect_result) + else: + if expect_result: + test.assertThat(result, Not(Is(None))) + else: + test.assertThat(result, Is(None)) + test.assertEquals(len(cb_calls), expected_error_count) + + +class TestSafeHasattr(TestCase): + + def test_attribute_not_there(self): + class Foo(object): + pass + self.assertEqual(False, safe_hasattr(Foo(), 'anything')) + + def test_attribute_there(self): + class Foo(object): + pass + foo = Foo() + foo.attribute = None + self.assertEqual(True, safe_hasattr(foo, 'attribute')) + + def test_property_there(self): + class Foo(object): + @property + def attribute(self): + return None + foo = Foo() + self.assertEqual(True, safe_hasattr(foo, 'attribute')) + + def test_property_raises(self): + class Foo(object): + @property + def attribute(self): + 1/0 + foo = Foo() + self.assertRaises(ZeroDivisionError, safe_hasattr, foo, 'attribute') + + +class TestTryImport(TestCase): + + def test_doesnt_exist(self): + # try_import('thing', foo) returns foo if 'thing' doesn't exist. + marker = object() + result = try_import('doesntexist', marker) + self.assertThat(result, Is(marker)) + + def test_None_is_default_alternative(self): + # try_import('thing') returns None if 'thing' doesn't exist. + result = try_import('doesntexist') + self.assertThat(result, Is(None)) + + def test_existing_module(self): + # try_import('thing', foo) imports 'thing' and returns it if it's a + # module that exists. + result = try_import('os', object()) + import os + self.assertThat(result, Is(os)) + + def test_existing_submodule(self): + # try_import('thing.another', foo) imports 'thing' and returns it if + # it's a module that exists. + result = try_import('os.path', object()) + import os + self.assertThat(result, Is(os.path)) + + def test_nonexistent_submodule(self): + # try_import('thing.another', foo) imports 'thing' and returns foo if + # 'another' doesn't exist. + marker = object() + result = try_import('os.doesntexist', marker) + self.assertThat(result, Is(marker)) + + def test_object_from_module(self): + # try_import('thing.object') imports 'thing' and returns + # 'thing.object' if 'thing' is a module and 'object' is not. + result = try_import('os.path.join') + import os + self.assertThat(result, Is(os.path.join)) + + def test_error_callback(self): + # the error callback is called on failures. + check_error_callback(self, try_import, 'doesntexist', 1, False) + + def test_error_callback_missing_module_member(self): + # the error callback is called on failures to find an object + # inside an existing module. + check_error_callback(self, try_import, 'os.nonexistent', 1, False) + + def test_error_callback_not_on_success(self): + # the error callback is not called on success. + check_error_callback(self, try_import, 'os.path', 0, True) + + +class TestTryImports(TestCase): + + def test_doesnt_exist(self): + # try_imports('thing', foo) returns foo if 'thing' doesn't exist. + marker = object() + result = try_imports(['doesntexist'], marker) + self.assertThat(result, Is(marker)) + + def test_fallback(self): + result = try_imports(['doesntexist', 'os']) + import os + self.assertThat(result, Is(os)) + + def test_None_is_default_alternative(self): + # try_imports('thing') returns None if 'thing' doesn't exist. + e = self.assertRaises( + ImportError, try_imports, ['doesntexist', 'noreally']) + self.assertThat( + str(e), + Equals("Could not import any of: doesntexist, noreally")) + + def test_existing_module(self): + # try_imports('thing', foo) imports 'thing' and returns it if it's a + # module that exists. + result = try_imports(['os'], object()) + import os + self.assertThat(result, Is(os)) + + def test_existing_submodule(self): + # try_imports('thing.another', foo) imports 'thing' and returns it if + # it's a module that exists. + result = try_imports(['os.path'], object()) + import os + self.assertThat(result, Is(os.path)) + + def test_nonexistent_submodule(self): + # try_imports('thing.another', foo) imports 'thing' and returns foo if + # 'another' doesn't exist. + marker = object() + result = try_imports(['os.doesntexist'], marker) + self.assertThat(result, Is(marker)) + + def test_fallback_submodule(self): + result = try_imports(['os.doesntexist', 'os.path']) + import os + self.assertThat(result, Is(os.path)) + + def test_error_callback(self): + # One error for every class that doesn't exist. + check_error_callback(self, try_imports, + ['os.doesntexist', 'os.notthiseither'], + 2, False) + check_error_callback(self, try_imports, + ['os.doesntexist', 'os.notthiseither', 'os'], + 2, True) + check_error_callback(self, try_imports, + ['os.path'], + 0, True) + + +import testtools.matchers +import testtools.runtest +import testtools.testcase + + +def StackHidden(is_hidden): + return AllMatch( + AfterPreprocessing( + lambda module: safe_hasattr(module, '__unittest'), + Equals(is_hidden))) + + +class TestStackHiding(TestCase): + + modules = [ + testtools.matchers, + testtools.runtest, + testtools.testcase, + ] + + run_tests_with = FullStackRunTest + + def setUp(self): + super(TestStackHiding, self).setUp() + self.addCleanup(hide_testtools_stack, is_stack_hidden()) + + def test_shown_during_testtools_testsuite(self): + self.assertThat(self.modules, StackHidden(False)) + + def test_is_stack_hidden_consistent_true(self): + hide_testtools_stack(True) + self.assertEqual(True, is_stack_hidden()) + + def test_is_stack_hidden_consistent_false(self): + hide_testtools_stack(False) + self.assertEqual(False, is_stack_hidden()) + + def test_show_stack(self): + hide_testtools_stack(False) + self.assertThat(self.modules, StackHidden(False)) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_matchers.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_matchers.py new file mode 100644 index 00000000000..ebdd4a95102 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_matchers.py @@ -0,0 +1,1071 @@ +# Copyright (c) 2008-2011 testtools developers. See LICENSE for details. + +"""Tests for matchers.""" + +import doctest +import re +import sys + +from testtools import ( + Matcher, # check that Matcher is exposed at the top level for docs. + TestCase, + ) +from testtools.compat import ( + StringIO, + str_is_unicode, + text_repr, + _b, + _u, + ) +from testtools.matchers import ( + AfterPreprocessing, + AllMatch, + Annotate, + AnnotatedMismatch, + _BinaryMismatch, + Contains, + Equals, + DocTestMatches, + DoesNotEndWith, + DoesNotStartWith, + EndsWith, + KeysEqual, + Is, + IsInstance, + LessThan, + GreaterThan, + MatchesAny, + MatchesAll, + MatchesException, + MatchesListwise, + MatchesRegex, + MatchesSetwise, + MatchesStructure, + Mismatch, + MismatchDecorator, + MismatchError, + Not, + NotEquals, + Raises, + raises, + StartsWith, + ) +from testtools.tests.helpers import FullStackRunTest + +# Silence pyflakes. +Matcher + + +class TestMismatch(TestCase): + + run_tests_with = FullStackRunTest + + def test_constructor_arguments(self): + mismatch = Mismatch("some description", {'detail': "things"}) + self.assertEqual("some description", mismatch.describe()) + self.assertEqual({'detail': "things"}, mismatch.get_details()) + + def test_constructor_no_arguments(self): + mismatch = Mismatch() + self.assertThat(mismatch.describe, + Raises(MatchesException(NotImplementedError))) + self.assertEqual({}, mismatch.get_details()) + + +class TestMismatchError(TestCase): + + def test_is_assertion_error(self): + # MismatchError is an AssertionError, so that most of the time, it + # looks like a test failure, rather than an error. + def raise_mismatch_error(): + raise MismatchError(2, Equals(3), Equals(3).match(2)) + self.assertRaises(AssertionError, raise_mismatch_error) + + def test_default_description_is_mismatch(self): + mismatch = Equals(3).match(2) + e = MismatchError(2, Equals(3), mismatch) + self.assertEqual(mismatch.describe(), str(e)) + + def test_default_description_unicode(self): + matchee = _u('\xa7') + matcher = Equals(_u('a')) + mismatch = matcher.match(matchee) + e = MismatchError(matchee, matcher, mismatch) + self.assertEqual(mismatch.describe(), str(e)) + + def test_verbose_description(self): + matchee = 2 + matcher = Equals(3) + mismatch = matcher.match(2) + e = MismatchError(matchee, matcher, mismatch, True) + expected = ( + 'Match failed. Matchee: %r\n' + 'Matcher: %s\n' + 'Difference: %s\n' % ( + matchee, + matcher, + matcher.match(matchee).describe(), + )) + self.assertEqual(expected, str(e)) + + def test_verbose_unicode(self): + # When assertThat is given matchees or matchers that contain non-ASCII + # unicode strings, we can still provide a meaningful error. + matchee = _u('\xa7') + matcher = Equals(_u('a')) + mismatch = matcher.match(matchee) + expected = ( + 'Match failed. Matchee: %s\n' + 'Matcher: %s\n' + 'Difference: %s\n' % ( + text_repr(matchee), + matcher, + mismatch.describe(), + )) + e = MismatchError(matchee, matcher, mismatch, True) + if str_is_unicode: + actual = str(e) + else: + actual = unicode(e) + # Using str() should still work, and return ascii only + self.assertEqual( + expected.replace(matchee, matchee.encode("unicode-escape")), + str(e).decode("ascii")) + self.assertEqual(expected, actual) + + +class Test_BinaryMismatch(TestCase): + """Mismatches from binary comparisons need useful describe output""" + + _long_string = "This is a longish multiline non-ascii string\n\xa7" + _long_b = _b(_long_string) + _long_u = _u(_long_string) + + def test_short_objects(self): + o1, o2 = object(), object() + mismatch = _BinaryMismatch(o1, "!~", o2) + self.assertEqual(mismatch.describe(), "%r !~ %r" % (o1, o2)) + + def test_short_mixed_strings(self): + b, u = _b("\xa7"), _u("\xa7") + mismatch = _BinaryMismatch(b, "!~", u) + self.assertEqual(mismatch.describe(), "%r !~ %r" % (b, u)) + + def test_long_bytes(self): + one_line_b = self._long_b.replace(_b("\n"), _b(" ")) + mismatch = _BinaryMismatch(one_line_b, "!~", self._long_b) + self.assertEqual(mismatch.describe(), + "%s:\nreference = %s\nactual = %s\n" % ("!~", + text_repr(one_line_b), + text_repr(self._long_b, multiline=True))) + + def test_long_unicode(self): + one_line_u = self._long_u.replace("\n", " ") + mismatch = _BinaryMismatch(one_line_u, "!~", self._long_u) + self.assertEqual(mismatch.describe(), + "%s:\nreference = %s\nactual = %s\n" % ("!~", + text_repr(one_line_u), + text_repr(self._long_u, multiline=True))) + + def test_long_mixed_strings(self): + mismatch = _BinaryMismatch(self._long_b, "!~", self._long_u) + self.assertEqual(mismatch.describe(), + "%s:\nreference = %s\nactual = %s\n" % ("!~", + text_repr(self._long_b, multiline=True), + text_repr(self._long_u, multiline=True))) + + def test_long_bytes_and_object(self): + obj = object() + mismatch = _BinaryMismatch(self._long_b, "!~", obj) + self.assertEqual(mismatch.describe(), + "%s:\nreference = %s\nactual = %s\n" % ("!~", + text_repr(self._long_b, multiline=True), + repr(obj))) + + def test_long_unicode_and_object(self): + obj = object() + mismatch = _BinaryMismatch(self._long_u, "!~", obj) + self.assertEqual(mismatch.describe(), + "%s:\nreference = %s\nactual = %s\n" % ("!~", + text_repr(self._long_u, multiline=True), + repr(obj))) + + +class TestMatchersInterface(object): + + run_tests_with = FullStackRunTest + + def test_matches_match(self): + matcher = self.matches_matcher + matches = self.matches_matches + mismatches = self.matches_mismatches + for candidate in matches: + self.assertEqual(None, matcher.match(candidate)) + for candidate in mismatches: + mismatch = matcher.match(candidate) + self.assertNotEqual(None, mismatch) + self.assertNotEqual(None, getattr(mismatch, 'describe', None)) + + def test__str__(self): + # [(expected, object to __str__)]. + examples = self.str_examples + for expected, matcher in examples: + self.assertThat(matcher, DocTestMatches(expected)) + + def test_describe_difference(self): + # [(expected, matchee, matcher), ...] + examples = self.describe_examples + for difference, matchee, matcher in examples: + mismatch = matcher.match(matchee) + self.assertEqual(difference, mismatch.describe()) + + def test_mismatch_details(self): + # The mismatch object must provide get_details, which must return a + # dictionary mapping names to Content objects. + examples = self.describe_examples + for difference, matchee, matcher in examples: + mismatch = matcher.match(matchee) + details = mismatch.get_details() + self.assertEqual(dict(details), details) + + +class TestDocTestMatchesInterface(TestCase, TestMatchersInterface): + + matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS) + matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"] + matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"] + + str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')", + DocTestMatches("Ran 1 test in ...s")), + ("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)), + ] + + describe_examples = [('Expected:\n Ran 1 tests in ...s\nGot:\n' + ' Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s", + DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))] + + +class TestDocTestMatchesInterfaceUnicode(TestCase, TestMatchersInterface): + + matches_matcher = DocTestMatches(_u("\xa7..."), doctest.ELLIPSIS) + matches_matches = [_u("\xa7"), _u("\xa7 more\n")] + matches_mismatches = ["\\xa7", _u("more \xa7"), _u("\n\xa7")] + + str_examples = [("DocTestMatches(%r)" % (_u("\xa7\n"),), + DocTestMatches(_u("\xa7"))), + ] + + describe_examples = [( + _u("Expected:\n \xa7\nGot:\n a\n"), + "a", + DocTestMatches(_u("\xa7"), doctest.ELLIPSIS))] + + +class TestDocTestMatchesSpecific(TestCase): + + run_tests_with = FullStackRunTest + + def test___init__simple(self): + matcher = DocTestMatches("foo") + self.assertEqual("foo\n", matcher.want) + + def test___init__flags(self): + matcher = DocTestMatches("bar\n", doctest.ELLIPSIS) + self.assertEqual("bar\n", matcher.want) + self.assertEqual(doctest.ELLIPSIS, matcher.flags) + + def test_describe_non_ascii_bytes(self): + """Even with bytestrings, the mismatch should be coercible to unicode + + DocTestMatches is intended for text, but the Python 2 str type also + permits arbitrary binary inputs. This is a slightly bogus thing to do, + and under Python 3 using bytes objects will reasonably raise an error. + """ + header = _b("\x89PNG\r\n\x1a\n...") + if str_is_unicode: + self.assertRaises(TypeError, + DocTestMatches, header, doctest.ELLIPSIS) + return + matcher = DocTestMatches(header, doctest.ELLIPSIS) + mismatch = matcher.match(_b("GIF89a\1\0\1\0\0\0\0;")) + # Must be treatable as unicode text, the exact output matters less + self.assertTrue(unicode(mismatch.describe())) + + +class TestEqualsInterface(TestCase, TestMatchersInterface): + + matches_matcher = Equals(1) + matches_matches = [1] + matches_mismatches = [2] + + str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))] + + describe_examples = [("1 != 2", 2, Equals(1))] + + +class TestNotEqualsInterface(TestCase, TestMatchersInterface): + + matches_matcher = NotEquals(1) + matches_matches = [2] + matches_mismatches = [1] + + str_examples = [ + ("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))] + + describe_examples = [("1 == 1", 1, NotEquals(1))] + + +class TestIsInterface(TestCase, TestMatchersInterface): + + foo = object() + bar = object() + + matches_matcher = Is(foo) + matches_matches = [foo] + matches_mismatches = [bar, 1] + + str_examples = [("Is(2)", Is(2))] + + describe_examples = [("1 is not 2", 2, Is(1))] + + +class TestIsInstanceInterface(TestCase, TestMatchersInterface): + + class Foo:pass + + matches_matcher = IsInstance(Foo) + matches_matches = [Foo()] + matches_mismatches = [object(), 1, Foo] + + str_examples = [ + ("IsInstance(str)", IsInstance(str)), + ("IsInstance(str, int)", IsInstance(str, int)), + ] + + describe_examples = [ + ("'foo' is not an instance of int", 'foo', IsInstance(int)), + ("'foo' is not an instance of any of (int, type)", 'foo', + IsInstance(int, type)), + ] + + +class TestLessThanInterface(TestCase, TestMatchersInterface): + + matches_matcher = LessThan(4) + matches_matches = [-5, 3] + matches_mismatches = [4, 5, 5000] + + str_examples = [ + ("LessThan(12)", LessThan(12)), + ] + + describe_examples = [ + ('4 is not > 5', 5, LessThan(4)), + ('4 is not > 4', 4, LessThan(4)), + ] + + +class TestGreaterThanInterface(TestCase, TestMatchersInterface): + + matches_matcher = GreaterThan(4) + matches_matches = [5, 8] + matches_mismatches = [-2, 0, 4] + + str_examples = [ + ("GreaterThan(12)", GreaterThan(12)), + ] + + describe_examples = [ + ('5 is not < 4', 4, GreaterThan(5)), + ('4 is not < 4', 4, GreaterThan(4)), + ] + + +class TestContainsInterface(TestCase, TestMatchersInterface): + + matches_matcher = Contains('foo') + matches_matches = ['foo', 'afoo', 'fooa'] + matches_mismatches = ['f', 'fo', 'oo', 'faoo', 'foao'] + + str_examples = [ + ("Contains(1)", Contains(1)), + ("Contains('foo')", Contains('foo')), + ] + + describe_examples = [("1 not in 2", 2, Contains(1))] + + +def make_error(type, *args, **kwargs): + try: + raise type(*args, **kwargs) + except type: + return sys.exc_info() + + +class TestMatchesExceptionInstanceInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesException(ValueError("foo")) + error_foo = make_error(ValueError, 'foo') + error_bar = make_error(ValueError, 'bar') + error_base_foo = make_error(Exception, 'foo') + matches_matches = [error_foo] + matches_mismatches = [error_bar, error_base_foo] + + str_examples = [ + ("MatchesException(Exception('foo',))", + MatchesException(Exception('foo'))) + ] + describe_examples = [ + ("%r is not a %r" % (Exception, ValueError), + error_base_foo, + MatchesException(ValueError("foo"))), + ("ValueError('bar',) has different arguments to ValueError('foo',).", + error_bar, + MatchesException(ValueError("foo"))), + ] + + +class TestMatchesExceptionTypeInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesException(ValueError) + error_foo = make_error(ValueError, 'foo') + error_sub = make_error(UnicodeError, 'bar') + error_base_foo = make_error(Exception, 'foo') + matches_matches = [error_foo, error_sub] + matches_mismatches = [error_base_foo] + + str_examples = [ + ("MatchesException(%r)" % Exception, + MatchesException(Exception)) + ] + describe_examples = [ + ("%r is not a %r" % (Exception, ValueError), + error_base_foo, + MatchesException(ValueError)), + ] + + +class TestMatchesExceptionTypeReInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesException(ValueError, 'fo.') + error_foo = make_error(ValueError, 'foo') + error_sub = make_error(UnicodeError, 'foo') + error_bar = make_error(ValueError, 'bar') + matches_matches = [error_foo, error_sub] + matches_mismatches = [error_bar] + + str_examples = [ + ("MatchesException(%r)" % Exception, + MatchesException(Exception, 'fo.')) + ] + describe_examples = [ + ("'bar' does not match /fo./", + error_bar, MatchesException(ValueError, "fo.")), + ] + + +class TestMatchesExceptionTypeMatcherInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesException( + ValueError, AfterPreprocessing(str, Equals('foo'))) + error_foo = make_error(ValueError, 'foo') + error_sub = make_error(UnicodeError, 'foo') + error_bar = make_error(ValueError, 'bar') + matches_matches = [error_foo, error_sub] + matches_mismatches = [error_bar] + + str_examples = [ + ("MatchesException(%r)" % Exception, + MatchesException(Exception, Equals('foo'))) + ] + describe_examples = [ + ("5 != %r" % (error_bar[1],), + error_bar, MatchesException(ValueError, Equals(5))), + ] + + +class TestNotInterface(TestCase, TestMatchersInterface): + + matches_matcher = Not(Equals(1)) + matches_matches = [2] + matches_mismatches = [1] + + str_examples = [ + ("Not(Equals(1))", Not(Equals(1))), + ("Not(Equals('1'))", Not(Equals('1')))] + + describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))] + + +class TestMatchersAnyInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2")) + matches_matches = ["1", "2"] + matches_mismatches = ["3"] + + str_examples = [( + "MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))", + MatchesAny(DocTestMatches("1"), DocTestMatches("2"))), + ] + + describe_examples = [("""Differences: [ +Expected: + 1 +Got: + 3 + +Expected: + 2 +Got: + 3 + +]""", + "3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))] + + +class TestMatchesAllInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesAll(NotEquals(1), NotEquals(2)) + matches_matches = [3, 4] + matches_mismatches = [1, 2] + + str_examples = [ + ("MatchesAll(NotEquals(1), NotEquals(2))", + MatchesAll(NotEquals(1), NotEquals(2)))] + + describe_examples = [("""Differences: [ +1 == 1 +]""", + 1, MatchesAll(NotEquals(1), NotEquals(2)))] + + +class TestKeysEqual(TestCase, TestMatchersInterface): + + matches_matcher = KeysEqual('foo', 'bar') + matches_matches = [ + {'foo': 0, 'bar': 1}, + ] + matches_mismatches = [ + {}, + {'foo': 0}, + {'bar': 1}, + {'foo': 0, 'bar': 1, 'baz': 2}, + {'a': None, 'b': None, 'c': None}, + ] + + str_examples = [ + ("KeysEqual('foo', 'bar')", KeysEqual('foo', 'bar')), + ] + + describe_examples = [ + ("['bar', 'foo'] does not match {'baz': 2, 'foo': 0, 'bar': 1}: " + "Keys not equal", + {'foo': 0, 'bar': 1, 'baz': 2}, KeysEqual('foo', 'bar')), + ] + + +class TestAnnotate(TestCase, TestMatchersInterface): + + matches_matcher = Annotate("foo", Equals(1)) + matches_matches = [1] + matches_mismatches = [2] + + str_examples = [ + ("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))] + + describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))] + + def test_if_message_no_message(self): + # Annotate.if_message returns the given matcher if there is no + # message. + matcher = Equals(1) + not_annotated = Annotate.if_message('', matcher) + self.assertIs(matcher, not_annotated) + + def test_if_message_given_message(self): + # Annotate.if_message returns an annotated version of the matcher if a + # message is provided. + matcher = Equals(1) + expected = Annotate('foo', matcher) + annotated = Annotate.if_message('foo', matcher) + self.assertThat( + annotated, + MatchesStructure.fromExample(expected, 'annotation', 'matcher')) + + +class TestAnnotatedMismatch(TestCase): + + run_tests_with = FullStackRunTest + + def test_forwards_details(self): + x = Mismatch('description', {'foo': 'bar'}) + annotated = AnnotatedMismatch("annotation", x) + self.assertEqual(x.get_details(), annotated.get_details()) + + +class TestRaisesInterface(TestCase, TestMatchersInterface): + + matches_matcher = Raises() + def boom(): + raise Exception('foo') + matches_matches = [boom] + matches_mismatches = [lambda:None] + + # Tricky to get function objects to render constantly, and the interfaces + # helper uses assertEqual rather than (for instance) DocTestMatches. + str_examples = [] + + describe_examples = [] + + +class TestRaisesExceptionMatcherInterface(TestCase, TestMatchersInterface): + + matches_matcher = Raises( + exception_matcher=MatchesException(Exception('foo'))) + def boom_bar(): + raise Exception('bar') + def boom_foo(): + raise Exception('foo') + matches_matches = [boom_foo] + matches_mismatches = [lambda:None, boom_bar] + + # Tricky to get function objects to render constantly, and the interfaces + # helper uses assertEqual rather than (for instance) DocTestMatches. + str_examples = [] + + describe_examples = [] + + +class TestRaisesBaseTypes(TestCase): + + run_tests_with = FullStackRunTest + + def raiser(self): + raise KeyboardInterrupt('foo') + + def test_KeyboardInterrupt_matched(self): + # When KeyboardInterrupt is matched, it is swallowed. + matcher = Raises(MatchesException(KeyboardInterrupt)) + self.assertThat(self.raiser, matcher) + + def test_KeyboardInterrupt_propogates(self): + # The default 'it raised' propogates KeyboardInterrupt. + match_keyb = Raises(MatchesException(KeyboardInterrupt)) + def raise_keyb_from_match(): + matcher = Raises() + matcher.match(self.raiser) + self.assertThat(raise_keyb_from_match, match_keyb) + + def test_KeyboardInterrupt_match_Exception_propogates(self): + # If the raised exception isn't matched, and it is not a subclass of + # Exception, it is propogated. + match_keyb = Raises(MatchesException(KeyboardInterrupt)) + def raise_keyb_from_match(): + if sys.version_info > (2, 5): + matcher = Raises(MatchesException(Exception)) + else: + # On Python 2.4 KeyboardInterrupt is a StandardError subclass + # but should propogate from less generic exception matchers + matcher = Raises(MatchesException(EnvironmentError)) + matcher.match(self.raiser) + self.assertThat(raise_keyb_from_match, match_keyb) + + +class TestRaisesConvenience(TestCase): + + run_tests_with = FullStackRunTest + + def test_exc_type(self): + self.assertThat(lambda: 1/0, raises(ZeroDivisionError)) + + def test_exc_value(self): + e = RuntimeError("You lose!") + def raiser(): + raise e + self.assertThat(raiser, raises(e)) + + +class DoesNotStartWithTests(TestCase): + + run_tests_with = FullStackRunTest + + def test_describe(self): + mismatch = DoesNotStartWith("fo", "bo") + self.assertEqual("'fo' does not start with 'bo'.", mismatch.describe()) + + def test_describe_non_ascii_unicode(self): + string = _u("A\xA7") + suffix = _u("B\xA7") + mismatch = DoesNotStartWith(string, suffix) + self.assertEqual("%s does not start with %s." % ( + text_repr(string), text_repr(suffix)), + mismatch.describe()) + + def test_describe_non_ascii_bytes(self): + string = _b("A\xA7") + suffix = _b("B\xA7") + mismatch = DoesNotStartWith(string, suffix) + self.assertEqual("%r does not start with %r." % (string, suffix), + mismatch.describe()) + + +class StartsWithTests(TestCase): + + run_tests_with = FullStackRunTest + + def test_str(self): + matcher = StartsWith("bar") + self.assertEqual("StartsWith('bar')", str(matcher)) + + def test_str_with_bytes(self): + b = _b("\xA7") + matcher = StartsWith(b) + self.assertEqual("StartsWith(%r)" % (b,), str(matcher)) + + def test_str_with_unicode(self): + u = _u("\xA7") + matcher = StartsWith(u) + self.assertEqual("StartsWith(%r)" % (u,), str(matcher)) + + def test_match(self): + matcher = StartsWith("bar") + self.assertIs(None, matcher.match("barf")) + + def test_mismatch_returns_does_not_start_with(self): + matcher = StartsWith("bar") + self.assertIsInstance(matcher.match("foo"), DoesNotStartWith) + + def test_mismatch_sets_matchee(self): + matcher = StartsWith("bar") + mismatch = matcher.match("foo") + self.assertEqual("foo", mismatch.matchee) + + def test_mismatch_sets_expected(self): + matcher = StartsWith("bar") + mismatch = matcher.match("foo") + self.assertEqual("bar", mismatch.expected) + + +class DoesNotEndWithTests(TestCase): + + run_tests_with = FullStackRunTest + + def test_describe(self): + mismatch = DoesNotEndWith("fo", "bo") + self.assertEqual("'fo' does not end with 'bo'.", mismatch.describe()) + + def test_describe_non_ascii_unicode(self): + string = _u("A\xA7") + suffix = _u("B\xA7") + mismatch = DoesNotEndWith(string, suffix) + self.assertEqual("%s does not end with %s." % ( + text_repr(string), text_repr(suffix)), + mismatch.describe()) + + def test_describe_non_ascii_bytes(self): + string = _b("A\xA7") + suffix = _b("B\xA7") + mismatch = DoesNotEndWith(string, suffix) + self.assertEqual("%r does not end with %r." % (string, suffix), + mismatch.describe()) + + +class EndsWithTests(TestCase): + + run_tests_with = FullStackRunTest + + def test_str(self): + matcher = EndsWith("bar") + self.assertEqual("EndsWith('bar')", str(matcher)) + + def test_str_with_bytes(self): + b = _b("\xA7") + matcher = EndsWith(b) + self.assertEqual("EndsWith(%r)" % (b,), str(matcher)) + + def test_str_with_unicode(self): + u = _u("\xA7") + matcher = EndsWith(u) + self.assertEqual("EndsWith(%r)" % (u,), str(matcher)) + + def test_match(self): + matcher = EndsWith("arf") + self.assertIs(None, matcher.match("barf")) + + def test_mismatch_returns_does_not_end_with(self): + matcher = EndsWith("bar") + self.assertIsInstance(matcher.match("foo"), DoesNotEndWith) + + def test_mismatch_sets_matchee(self): + matcher = EndsWith("bar") + mismatch = matcher.match("foo") + self.assertEqual("foo", mismatch.matchee) + + def test_mismatch_sets_expected(self): + matcher = EndsWith("bar") + mismatch = matcher.match("foo") + self.assertEqual("bar", mismatch.expected) + + +def run_doctest(obj, name): + p = doctest.DocTestParser() + t = p.get_doctest( + obj.__doc__, sys.modules[obj.__module__].__dict__, name, '', 0) + r = doctest.DocTestRunner() + output = StringIO() + r.run(t, out=output.write) + return r.failures, output.getvalue() + + +class TestMatchesListwise(TestCase): + + run_tests_with = FullStackRunTest + + def test_docstring(self): + failure_count, output = run_doctest( + MatchesListwise, "MatchesListwise") + if failure_count: + self.fail("Doctest failed with %s" % output) + + +class TestMatchesStructure(TestCase, TestMatchersInterface): + + class SimpleClass: + def __init__(self, x, y): + self.x = x + self.y = y + + matches_matcher = MatchesStructure(x=Equals(1), y=Equals(2)) + matches_matches = [SimpleClass(1, 2)] + matches_mismatches = [ + SimpleClass(2, 2), + SimpleClass(1, 1), + SimpleClass(3, 3), + ] + + str_examples = [ + ("MatchesStructure(x=Equals(1))", MatchesStructure(x=Equals(1))), + ("MatchesStructure(y=Equals(2))", MatchesStructure(y=Equals(2))), + ("MatchesStructure(x=Equals(1), y=Equals(2))", + MatchesStructure(x=Equals(1), y=Equals(2))), + ] + + describe_examples = [ + ("""\ +Differences: [ +3 != 1: x +]""", SimpleClass(1, 2), MatchesStructure(x=Equals(3), y=Equals(2))), + ("""\ +Differences: [ +3 != 2: y +]""", SimpleClass(1, 2), MatchesStructure(x=Equals(1), y=Equals(3))), + ("""\ +Differences: [ +0 != 1: x +0 != 2: y +]""", SimpleClass(1, 2), MatchesStructure(x=Equals(0), y=Equals(0))), + ] + + def test_fromExample(self): + self.assertThat( + self.SimpleClass(1, 2), + MatchesStructure.fromExample(self.SimpleClass(1, 3), 'x')) + + def test_byEquality(self): + self.assertThat( + self.SimpleClass(1, 2), + MatchesStructure.byEquality(x=1)) + + def test_withStructure(self): + self.assertThat( + self.SimpleClass(1, 2), + MatchesStructure.byMatcher(LessThan, x=2)) + + def test_update(self): + self.assertThat( + self.SimpleClass(1, 2), + MatchesStructure(x=NotEquals(1)).update(x=Equals(1))) + + def test_update_none(self): + self.assertThat( + self.SimpleClass(1, 2), + MatchesStructure(x=Equals(1), z=NotEquals(42)).update( + z=None)) + + +class TestMatchesRegex(TestCase, TestMatchersInterface): + + matches_matcher = MatchesRegex('a|b') + matches_matches = ['a', 'b'] + matches_mismatches = ['c'] + + str_examples = [ + ("MatchesRegex('a|b')", MatchesRegex('a|b')), + ("MatchesRegex('a|b', re.M)", MatchesRegex('a|b', re.M)), + ("MatchesRegex('a|b', re.I|re.M)", MatchesRegex('a|b', re.I|re.M)), + ("MatchesRegex(%r)" % (_b("\xA7"),), MatchesRegex(_b("\xA7"))), + ("MatchesRegex(%r)" % (_u("\xA7"),), MatchesRegex(_u("\xA7"))), + ] + + describe_examples = [ + ("'c' does not match /a|b/", 'c', MatchesRegex('a|b')), + ("'c' does not match /a\d/", 'c', MatchesRegex(r'a\d')), + ("%r does not match /\\s+\\xa7/" % (_b('c'),), + _b('c'), MatchesRegex(_b("\\s+\xA7"))), + ("%r does not match /\\s+\\xa7/" % (_u('c'),), + _u('c'), MatchesRegex(_u("\\s+\xA7"))), + ] + + +class TestMatchesSetwise(TestCase): + + run_tests_with = FullStackRunTest + + def assertMismatchWithDescriptionMatching(self, value, matcher, + description_matcher): + mismatch = matcher.match(value) + if mismatch is None: + self.fail("%s matched %s" % (matcher, value)) + actual_description = mismatch.describe() + self.assertThat( + actual_description, + Annotate( + "%s matching %s" % (matcher, value), + description_matcher)) + + def test_matches(self): + self.assertIs( + None, MatchesSetwise(Equals(1), Equals(2)).match([2, 1])) + + def test_mismatches(self): + self.assertMismatchWithDescriptionMatching( + [2, 3], MatchesSetwise(Equals(1), Equals(2)), + MatchesRegex('.*There was 1 mismatch$', re.S)) + + def test_too_many_matchers(self): + self.assertMismatchWithDescriptionMatching( + [2, 3], MatchesSetwise(Equals(1), Equals(2), Equals(3)), + Equals('There was 1 matcher left over: Equals(1)')) + + def test_too_many_values(self): + self.assertMismatchWithDescriptionMatching( + [1, 2, 3], MatchesSetwise(Equals(1), Equals(2)), + Equals('There was 1 value left over: [3]')) + + def test_two_too_many_matchers(self): + self.assertMismatchWithDescriptionMatching( + [3], MatchesSetwise(Equals(1), Equals(2), Equals(3)), + MatchesRegex( + 'There were 2 matchers left over: Equals\([12]\), ' + 'Equals\([12]\)')) + + def test_two_too_many_values(self): + self.assertMismatchWithDescriptionMatching( + [1, 2, 3, 4], MatchesSetwise(Equals(1), Equals(2)), + MatchesRegex( + 'There were 2 values left over: \[[34], [34]\]')) + + def test_mismatch_and_too_many_matchers(self): + self.assertMismatchWithDescriptionMatching( + [2, 3], MatchesSetwise(Equals(0), Equals(1), Equals(2)), + MatchesRegex( + '.*There was 1 mismatch and 1 extra matcher: Equals\([01]\)', + re.S)) + + def test_mismatch_and_too_many_values(self): + self.assertMismatchWithDescriptionMatching( + [2, 3, 4], MatchesSetwise(Equals(1), Equals(2)), + MatchesRegex( + '.*There was 1 mismatch and 1 extra value: \[[34]\]', + re.S)) + + def test_mismatch_and_two_too_many_matchers(self): + self.assertMismatchWithDescriptionMatching( + [3, 4], MatchesSetwise( + Equals(0), Equals(1), Equals(2), Equals(3)), + MatchesRegex( + '.*There was 1 mismatch and 2 extra matchers: ' + 'Equals\([012]\), Equals\([012]\)', re.S)) + + def test_mismatch_and_two_too_many_values(self): + self.assertMismatchWithDescriptionMatching( + [2, 3, 4, 5], MatchesSetwise(Equals(1), Equals(2)), + MatchesRegex( + '.*There was 1 mismatch and 2 extra values: \[[145], [145]\]', + re.S)) + + +class TestAfterPreprocessing(TestCase, TestMatchersInterface): + + def parity(x): + return x % 2 + + matches_matcher = AfterPreprocessing(parity, Equals(1)) + matches_matches = [3, 5] + matches_mismatches = [2] + + str_examples = [ + ("AfterPreprocessing(<function parity>, Equals(1))", + AfterPreprocessing(parity, Equals(1))), + ] + + describe_examples = [ + ("1 != 0: after <function parity> on 2", 2, + AfterPreprocessing(parity, Equals(1))), + ("1 != 0", 2, + AfterPreprocessing(parity, Equals(1), annotate=False)), + ] + + +class TestMismatchDecorator(TestCase): + + run_tests_with = FullStackRunTest + + def test_forwards_description(self): + x = Mismatch("description", {'foo': 'bar'}) + decorated = MismatchDecorator(x) + self.assertEqual(x.describe(), decorated.describe()) + + def test_forwards_details(self): + x = Mismatch("description", {'foo': 'bar'}) + decorated = MismatchDecorator(x) + self.assertEqual(x.get_details(), decorated.get_details()) + + def test_repr(self): + x = Mismatch("description", {'foo': 'bar'}) + decorated = MismatchDecorator(x) + self.assertEqual( + '<testtools.matchers.MismatchDecorator(%r)>' % (x,), + repr(decorated)) + + +class TestAllMatch(TestCase, TestMatchersInterface): + + matches_matcher = AllMatch(LessThan(10)) + matches_matches = [ + [9, 9, 9], + (9, 9), + iter([9, 9, 9, 9, 9]), + ] + matches_mismatches = [ + [11, 9, 9], + iter([9, 12, 9, 11]), + ] + + str_examples = [ + ("AllMatch(LessThan(12))", AllMatch(LessThan(12))), + ] + + describe_examples = [ + ('Differences: [\n' + '10 is not > 11\n' + '10 is not > 10\n' + ']', + [11, 9, 10], + AllMatch(LessThan(10))), + ] + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_monkey.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_monkey.py new file mode 100644 index 00000000000..540a2ee909f --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_monkey.py @@ -0,0 +1,167 @@ +# Copyright (c) 2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +"""Tests for testtools.monkey.""" + +from testtools import TestCase +from testtools.matchers import MatchesException, Raises +from testtools.monkey import MonkeyPatcher, patch + + +class TestObj: + + def __init__(self): + self.foo = 'foo value' + self.bar = 'bar value' + self.baz = 'baz value' + + +class MonkeyPatcherTest(TestCase): + """ + Tests for 'MonkeyPatcher' monkey-patching class. + """ + + def setUp(self): + super(MonkeyPatcherTest, self).setUp() + self.test_object = TestObj() + self.original_object = TestObj() + self.monkey_patcher = MonkeyPatcher() + + def test_empty(self): + # A monkey patcher without patches doesn't change a thing. + self.monkey_patcher.patch() + + # We can't assert that all state is unchanged, but at least we can + # check our test object. + self.assertEquals(self.original_object.foo, self.test_object.foo) + self.assertEquals(self.original_object.bar, self.test_object.bar) + self.assertEquals(self.original_object.baz, self.test_object.baz) + + def test_construct_with_patches(self): + # Constructing a 'MonkeyPatcher' with patches adds all of the given + # patches to the patch list. + patcher = MonkeyPatcher((self.test_object, 'foo', 'haha'), + (self.test_object, 'bar', 'hehe')) + patcher.patch() + self.assertEquals('haha', self.test_object.foo) + self.assertEquals('hehe', self.test_object.bar) + self.assertEquals(self.original_object.baz, self.test_object.baz) + + def test_patch_existing(self): + # Patching an attribute that exists sets it to the value defined in the + # patch. + self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha') + self.monkey_patcher.patch() + self.assertEquals(self.test_object.foo, 'haha') + + def test_patch_non_existing(self): + # Patching a non-existing attribute sets it to the value defined in + # the patch. + self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value') + self.monkey_patcher.patch() + self.assertEquals(self.test_object.doesntexist, 'value') + + def test_restore_non_existing(self): + # Restoring a value that didn't exist before the patch deletes the + # value. + self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value') + self.monkey_patcher.patch() + self.monkey_patcher.restore() + marker = object() + self.assertIs(marker, getattr(self.test_object, 'doesntexist', marker)) + + def test_patch_already_patched(self): + # Adding a patch for an object and attribute that already have a patch + # overrides the existing patch. + self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah') + self.monkey_patcher.add_patch(self.test_object, 'foo', 'BLAH') + self.monkey_patcher.patch() + self.assertEquals(self.test_object.foo, 'BLAH') + self.monkey_patcher.restore() + self.assertEquals(self.test_object.foo, self.original_object.foo) + + def test_restore_twice_is_a_no_op(self): + # Restoring an already-restored monkey patch is a no-op. + self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah') + self.monkey_patcher.patch() + self.monkey_patcher.restore() + self.assertEquals(self.test_object.foo, self.original_object.foo) + self.monkey_patcher.restore() + self.assertEquals(self.test_object.foo, self.original_object.foo) + + def test_run_with_patches_decoration(self): + # run_with_patches runs the given callable, passing in all arguments + # and keyword arguments, and returns the return value of the callable. + log = [] + + def f(a, b, c=None): + log.append((a, b, c)) + return 'foo' + + result = self.monkey_patcher.run_with_patches(f, 1, 2, c=10) + self.assertEquals('foo', result) + self.assertEquals([(1, 2, 10)], log) + + def test_repeated_run_with_patches(self): + # We can call the same function with run_with_patches more than + # once. All patches apply for each call. + def f(): + return (self.test_object.foo, self.test_object.bar, + self.test_object.baz) + + self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha') + result = self.monkey_patcher.run_with_patches(f) + self.assertEquals( + ('haha', self.original_object.bar, self.original_object.baz), + result) + result = self.monkey_patcher.run_with_patches(f) + self.assertEquals( + ('haha', self.original_object.bar, self.original_object.baz), + result) + + def test_run_with_patches_restores(self): + # run_with_patches restores the original values after the function has + # executed. + self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha') + self.assertEquals(self.original_object.foo, self.test_object.foo) + self.monkey_patcher.run_with_patches(lambda: None) + self.assertEquals(self.original_object.foo, self.test_object.foo) + + def test_run_with_patches_restores_on_exception(self): + # run_with_patches restores the original values even when the function + # raises an exception. + def _(): + self.assertEquals(self.test_object.foo, 'haha') + self.assertEquals(self.test_object.bar, 'blahblah') + raise RuntimeError("Something went wrong!") + + self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha') + self.monkey_patcher.add_patch(self.test_object, 'bar', 'blahblah') + + self.assertThat(lambda:self.monkey_patcher.run_with_patches(_), + Raises(MatchesException(RuntimeError("Something went wrong!")))) + self.assertEquals(self.test_object.foo, self.original_object.foo) + self.assertEquals(self.test_object.bar, self.original_object.bar) + + +class TestPatchHelper(TestCase): + + def test_patch_patches(self): + # patch(obj, name, value) sets obj.name to value. + test_object = TestObj() + patch(test_object, 'foo', 42) + self.assertEqual(42, test_object.foo) + + def test_patch_returns_cleanup(self): + # patch(obj, name, value) returns a nullary callable that restores obj + # to its original state when run. + test_object = TestObj() + original = test_object.foo + cleanup = patch(test_object, 'foo', 42) + cleanup() + self.assertEqual(original, test_object.foo) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_run.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_run.py new file mode 100644 index 00000000000..d2974f63731 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_run.py @@ -0,0 +1,80 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Tests for the test runner logic.""" + +from testtools.compat import ( + _b, + StringIO, + ) +from testtools.helpers import try_import +fixtures = try_import('fixtures') + +import testtools +from testtools import TestCase, run + + +if fixtures: + class SampleTestFixture(fixtures.Fixture): + """Creates testtools.runexample temporarily.""" + + def __init__(self): + self.package = fixtures.PythonPackage( + 'runexample', [('__init__.py', _b(""" +from testtools import TestCase + +class TestFoo(TestCase): + def test_bar(self): + pass + def test_quux(self): + pass +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) +"""))]) + + def setUp(self): + super(SampleTestFixture, self).setUp() + self.useFixture(self.package) + testtools.__path__.append(self.package.base) + self.addCleanup(testtools.__path__.remove, self.package.base) + + +class TestRun(TestCase): + + def test_run_list(self): + if fixtures is None: + self.skipTest("Need fixtures") + self.useFixture(SampleTestFixture()) + out = StringIO() + run.main(['prog', '-l', 'testtools.runexample.test_suite'], out) + self.assertEqual("""testtools.runexample.TestFoo.test_bar +testtools.runexample.TestFoo.test_quux +""", out.getvalue()) + + def test_run_load_list(self): + if fixtures is None: + self.skipTest("Need fixtures") + self.useFixture(SampleTestFixture()) + out = StringIO() + # We load two tests - one that exists and one that doesn't, and we + # should get the one that exists and neither the one that doesn't nor + # the unmentioned one that does. + tempdir = self.useFixture(fixtures.TempDir()) + tempname = tempdir.path + '/tests.list' + f = open(tempname, 'wb') + try: + f.write(_b(""" +testtools.runexample.TestFoo.test_bar +testtools.runexample.missingtest +""")) + finally: + f.close() + run.main(['prog', '-l', '--load-list', tempname, + 'testtools.runexample.test_suite'], out) + self.assertEqual("""testtools.runexample.TestFoo.test_bar +""", out.getvalue()) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_runtest.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_runtest.py new file mode 100644 index 00000000000..afbb8baf395 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_runtest.py @@ -0,0 +1,303 @@ +# Copyright (c) 2009-2011 testtools developers. See LICENSE for details. + +"""Tests for the RunTest single test execution logic.""" + +from testtools import ( + ExtendedToOriginalDecorator, + run_test_with, + RunTest, + TestCase, + TestResult, + ) +from testtools.matchers import MatchesException, Is, Raises +from testtools.testresult.doubles import ExtendedTestResult +from testtools.tests.helpers import FullStackRunTest + + +class TestRunTest(TestCase): + + run_tests_with = FullStackRunTest + + def make_case(self): + class Case(TestCase): + def test(self): + pass + return Case('test') + + def test___init___short(self): + run = RunTest("bar") + self.assertEqual("bar", run.case) + self.assertEqual([], run.handlers) + + def test__init____handlers(self): + handlers = [("quux", "baz")] + run = RunTest("bar", handlers) + self.assertEqual(handlers, run.handlers) + + def test_run_with_result(self): + # test.run passes result down to _run_test_method. + log = [] + class Case(TestCase): + def _run_test_method(self, result): + log.append(result) + case = Case('_run_test_method') + run = RunTest(case, lambda x: log.append(x)) + result = TestResult() + run.run(result) + self.assertEqual(1, len(log)) + self.assertEqual(result, log[0].decorated) + + def test_run_no_result_manages_new_result(self): + log = [] + run = RunTest(self.make_case(), lambda x: log.append(x) or x) + result = run.run() + self.assertIsInstance(result.decorated, TestResult) + + def test__run_core_called(self): + case = self.make_case() + log = [] + run = RunTest(case, lambda x: x) + run._run_core = lambda: log.append('foo') + run.run() + self.assertEqual(['foo'], log) + + def test__run_user_does_not_catch_keyboard(self): + case = self.make_case() + def raises(): + raise KeyboardInterrupt("yo") + run = RunTest(case, None) + run.result = ExtendedTestResult() + self.assertThat(lambda: run._run_user(raises), + Raises(MatchesException(KeyboardInterrupt))) + self.assertEqual([], run.result._events) + + def test__run_user_calls_onException(self): + case = self.make_case() + log = [] + def handler(exc_info): + log.append("got it") + self.assertEqual(3, len(exc_info)) + self.assertIsInstance(exc_info[1], KeyError) + self.assertIs(KeyError, exc_info[0]) + case.addOnException(handler) + e = KeyError('Yo') + def raises(): + raise e + run = RunTest(case, [(KeyError, None)]) + run.result = ExtendedTestResult() + status = run._run_user(raises) + self.assertEqual(run.exception_caught, status) + self.assertEqual([], run.result._events) + self.assertEqual(["got it"], log) + + def test__run_user_can_catch_Exception(self): + case = self.make_case() + e = Exception('Yo') + def raises(): + raise e + log = [] + run = RunTest(case, [(Exception, None)]) + run.result = ExtendedTestResult() + status = run._run_user(raises) + self.assertEqual(run.exception_caught, status) + self.assertEqual([], run.result._events) + self.assertEqual([], log) + + def test__run_user_uncaught_Exception_raised(self): + case = self.make_case() + e = KeyError('Yo') + def raises(): + raise e + log = [] + def log_exc(self, result, err): + log.append((result, err)) + run = RunTest(case, [(ValueError, log_exc)]) + run.result = ExtendedTestResult() + self.assertThat(lambda: run._run_user(raises), + Raises(MatchesException(KeyError))) + self.assertEqual([], run.result._events) + self.assertEqual([], log) + + def test__run_user_uncaught_Exception_from_exception_handler_raised(self): + case = self.make_case() + def broken_handler(exc_info): + # ValueError because thats what we know how to catch - and must + # not. + raise ValueError('boo') + case.addOnException(broken_handler) + e = KeyError('Yo') + def raises(): + raise e + log = [] + def log_exc(self, result, err): + log.append((result, err)) + run = RunTest(case, [(ValueError, log_exc)]) + run.result = ExtendedTestResult() + self.assertThat(lambda: run._run_user(raises), + Raises(MatchesException(ValueError))) + self.assertEqual([], run.result._events) + self.assertEqual([], log) + + def test__run_user_returns_result(self): + case = self.make_case() + def returns(): + return 1 + run = RunTest(case) + run.result = ExtendedTestResult() + self.assertEqual(1, run._run_user(returns)) + self.assertEqual([], run.result._events) + + def test__run_one_decorates_result(self): + log = [] + class Run(RunTest): + def _run_prepared_result(self, result): + log.append(result) + return result + run = Run(self.make_case(), lambda x: x) + result = run._run_one('foo') + self.assertEqual([result], log) + self.assertIsInstance(log[0], ExtendedToOriginalDecorator) + self.assertEqual('foo', result.decorated) + + def test__run_prepared_result_calls_start_and_stop_test(self): + result = ExtendedTestResult() + case = self.make_case() + run = RunTest(case, lambda x: x) + run.run(result) + self.assertEqual([ + ('startTest', case), + ('addSuccess', case), + ('stopTest', case), + ], result._events) + + def test__run_prepared_result_calls_stop_test_always(self): + result = ExtendedTestResult() + case = self.make_case() + def inner(): + raise Exception("foo") + run = RunTest(case, lambda x: x) + run._run_core = inner + self.assertThat(lambda: run.run(result), + Raises(MatchesException(Exception("foo")))) + self.assertEqual([ + ('startTest', case), + ('stopTest', case), + ], result._events) + + +class CustomRunTest(RunTest): + + marker = object() + + def run(self, result=None): + return self.marker + + +class TestTestCaseSupportForRunTest(TestCase): + + def test_pass_custom_run_test(self): + class SomeCase(TestCase): + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo', runTest=CustomRunTest) + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(CustomRunTest.marker)) + + def test_default_is_runTest_class_variable(self): + class SomeCase(TestCase): + run_tests_with = CustomRunTest + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo') + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(CustomRunTest.marker)) + + def test_constructor_argument_overrides_class_variable(self): + # If a 'runTest' argument is passed to the test's constructor, that + # overrides the class variable. + marker = object() + class DifferentRunTest(RunTest): + def run(self, result=None): + return marker + class SomeCase(TestCase): + run_tests_with = CustomRunTest + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo', runTest=DifferentRunTest) + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(marker)) + + def test_decorator_for_run_test(self): + # Individual test methods can be marked as needing a special runner. + class SomeCase(TestCase): + @run_test_with(CustomRunTest) + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo') + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(CustomRunTest.marker)) + + def test_extended_decorator_for_run_test(self): + # Individual test methods can be marked as needing a special runner. + # Extra arguments can be passed to the decorator which will then be + # passed on to the RunTest object. + marker = object() + class FooRunTest(RunTest): + def __init__(self, case, handlers=None, bar=None): + super(FooRunTest, self).__init__(case, handlers) + self.bar = bar + def run(self, result=None): + return self.bar + class SomeCase(TestCase): + @run_test_with(FooRunTest, bar=marker) + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo') + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(marker)) + + def test_works_as_inner_decorator(self): + # Even if run_test_with is the innermost decorator, it will be + # respected. + def wrapped(function): + """Silly, trivial decorator.""" + def decorated(*args, **kwargs): + return function(*args, **kwargs) + decorated.__name__ = function.__name__ + decorated.__dict__.update(function.__dict__) + return decorated + class SomeCase(TestCase): + @wrapped + @run_test_with(CustomRunTest) + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo') + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(CustomRunTest.marker)) + + def test_constructor_overrides_decorator(self): + # If a 'runTest' argument is passed to the test's constructor, that + # overrides the decorator. + marker = object() + class DifferentRunTest(RunTest): + def run(self, result=None): + return marker + class SomeCase(TestCase): + @run_test_with(CustomRunTest) + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo', runTest=DifferentRunTest) + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(marker)) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_spinner.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_spinner.py new file mode 100644 index 00000000000..3d677bd7545 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_spinner.py @@ -0,0 +1,332 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Tests for the evil Twisted reactor-spinning we do.""" + +import os +import signal + +from testtools import ( + skipIf, + TestCase, + ) +from testtools.helpers import try_import +from testtools.matchers import ( + Equals, + Is, + MatchesException, + Raises, + ) + +_spinner = try_import('testtools._spinner') + +defer = try_import('twisted.internet.defer') +Failure = try_import('twisted.python.failure.Failure') + + +class NeedsTwistedTestCase(TestCase): + + def setUp(self): + super(NeedsTwistedTestCase, self).setUp() + if defer is None or Failure is None: + self.skipTest("Need Twisted to run") + + +class TestNotReentrant(NeedsTwistedTestCase): + + def test_not_reentrant(self): + # A function decorated as not being re-entrant will raise a + # _spinner.ReentryError if it is called while it is running. + calls = [] + @_spinner.not_reentrant + def log_something(): + calls.append(None) + if len(calls) < 5: + log_something() + self.assertThat( + log_something, Raises(MatchesException(_spinner.ReentryError))) + self.assertEqual(1, len(calls)) + + def test_deeper_stack(self): + calls = [] + @_spinner.not_reentrant + def g(): + calls.append(None) + if len(calls) < 5: + f() + @_spinner.not_reentrant + def f(): + calls.append(None) + if len(calls) < 5: + g() + self.assertThat(f, Raises(MatchesException(_spinner.ReentryError))) + self.assertEqual(2, len(calls)) + + +class TestExtractResult(NeedsTwistedTestCase): + + def test_not_fired(self): + # _spinner.extract_result raises _spinner.DeferredNotFired if it's + # given a Deferred that has not fired. + self.assertThat(lambda:_spinner.extract_result(defer.Deferred()), + Raises(MatchesException(_spinner.DeferredNotFired))) + + def test_success(self): + # _spinner.extract_result returns the value of the Deferred if it has + # fired successfully. + marker = object() + d = defer.succeed(marker) + self.assertThat(_spinner.extract_result(d), Equals(marker)) + + def test_failure(self): + # _spinner.extract_result raises the failure's exception if it's given + # a Deferred that is failing. + try: + 1/0 + except ZeroDivisionError: + f = Failure() + d = defer.fail(f) + self.assertThat(lambda:_spinner.extract_result(d), + Raises(MatchesException(ZeroDivisionError))) + + +class TestTrapUnhandledErrors(NeedsTwistedTestCase): + + def test_no_deferreds(self): + marker = object() + result, errors = _spinner.trap_unhandled_errors(lambda: marker) + self.assertEqual([], errors) + self.assertIs(marker, result) + + def test_unhandled_error(self): + failures = [] + def make_deferred_but_dont_handle(): + try: + 1/0 + except ZeroDivisionError: + f = Failure() + failures.append(f) + defer.fail(f) + result, errors = _spinner.trap_unhandled_errors( + make_deferred_but_dont_handle) + self.assertIs(None, result) + self.assertEqual(failures, [error.failResult for error in errors]) + + +class TestRunInReactor(NeedsTwistedTestCase): + + def make_reactor(self): + from twisted.internet import reactor + return reactor + + def make_spinner(self, reactor=None): + if reactor is None: + reactor = self.make_reactor() + return _spinner.Spinner(reactor) + + def make_timeout(self): + return 0.01 + + def test_function_called(self): + # run_in_reactor actually calls the function given to it. + calls = [] + marker = object() + self.make_spinner().run(self.make_timeout(), calls.append, marker) + self.assertThat(calls, Equals([marker])) + + def test_return_value_returned(self): + # run_in_reactor returns the value returned by the function given to + # it. + marker = object() + result = self.make_spinner().run(self.make_timeout(), lambda: marker) + self.assertThat(result, Is(marker)) + + def test_exception_reraised(self): + # If the given function raises an error, run_in_reactor re-raises that + # error. + self.assertThat( + lambda:self.make_spinner().run(self.make_timeout(), lambda: 1/0), + Raises(MatchesException(ZeroDivisionError))) + + def test_keyword_arguments(self): + # run_in_reactor passes keyword arguments on. + calls = [] + function = lambda *a, **kw: calls.extend([a, kw]) + self.make_spinner().run(self.make_timeout(), function, foo=42) + self.assertThat(calls, Equals([(), {'foo': 42}])) + + def test_not_reentrant(self): + # run_in_reactor raises an error if it is called inside another call + # to run_in_reactor. + spinner = self.make_spinner() + self.assertThat(lambda: spinner.run( + self.make_timeout(), spinner.run, self.make_timeout(), + lambda: None), Raises(MatchesException(_spinner.ReentryError))) + + def test_deferred_value_returned(self): + # If the given function returns a Deferred, run_in_reactor returns the + # value in the Deferred at the end of the callback chain. + marker = object() + result = self.make_spinner().run( + self.make_timeout(), lambda: defer.succeed(marker)) + self.assertThat(result, Is(marker)) + + def test_preserve_signal_handler(self): + signals = ['SIGINT', 'SIGTERM', 'SIGCHLD'] + signals = filter( + None, (getattr(signal, name, None) for name in signals)) + for sig in signals: + self.addCleanup(signal.signal, sig, signal.getsignal(sig)) + new_hdlrs = list(lambda *a: None for _ in signals) + for sig, hdlr in zip(signals, new_hdlrs): + signal.signal(sig, hdlr) + spinner = self.make_spinner() + spinner.run(self.make_timeout(), lambda: None) + self.assertEqual(new_hdlrs, map(signal.getsignal, signals)) + + def test_timeout(self): + # If the function takes too long to run, we raise a + # _spinner.TimeoutError. + timeout = self.make_timeout() + self.assertThat( + lambda:self.make_spinner().run(timeout, lambda: defer.Deferred()), + Raises(MatchesException(_spinner.TimeoutError))) + + def test_no_junk_by_default(self): + # If the reactor hasn't spun yet, then there cannot be any junk. + spinner = self.make_spinner() + self.assertThat(spinner.get_junk(), Equals([])) + + def test_clean_do_nothing(self): + # If there's nothing going on in the reactor, then clean does nothing + # and returns an empty list. + spinner = self.make_spinner() + result = spinner._clean() + self.assertThat(result, Equals([])) + + def test_clean_delayed_call(self): + # If there's a delayed call in the reactor, then clean cancels it and + # returns an empty list. + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + call = reactor.callLater(10, lambda: None) + results = spinner._clean() + self.assertThat(results, Equals([call])) + self.assertThat(call.active(), Equals(False)) + + def test_clean_delayed_call_cancelled(self): + # If there's a delayed call that's just been cancelled, then it's no + # longer there. + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + call = reactor.callLater(10, lambda: None) + call.cancel() + results = spinner._clean() + self.assertThat(results, Equals([])) + + def test_clean_selectables(self): + # If there's still a selectable (e.g. a listening socket), then + # clean() removes it from the reactor's registry. + # + # Note that the socket is left open. This emulates a bug in trial. + from twisted.internet.protocol import ServerFactory + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + port = reactor.listenTCP(0, ServerFactory()) + spinner.run(self.make_timeout(), lambda: None) + results = spinner.get_junk() + self.assertThat(results, Equals([port])) + + def test_clean_running_threads(self): + import threading + import time + current_threads = list(threading.enumerate()) + reactor = self.make_reactor() + timeout = self.make_timeout() + spinner = self.make_spinner(reactor) + spinner.run(timeout, reactor.callInThread, time.sleep, timeout / 2.0) + # Python before 2.5 has a race condition with thread handling where + # join() does not remove threads from enumerate before returning - the + # thread being joined does the removal. This was fixed in Python 2.5 + # but we still support 2.4, so we have to workaround the issue. + # http://bugs.python.org/issue1703448. + self.assertThat( + [thread for thread in threading.enumerate() if thread.isAlive()], + Equals(current_threads)) + + def test_leftover_junk_available(self): + # If 'run' is given a function that leaves the reactor dirty in some + # way, 'run' will clean up the reactor and then store information + # about the junk. This information can be got using get_junk. + from twisted.internet.protocol import ServerFactory + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + port = spinner.run( + self.make_timeout(), reactor.listenTCP, 0, ServerFactory()) + self.assertThat(spinner.get_junk(), Equals([port])) + + def test_will_not_run_with_previous_junk(self): + # If 'run' is called and there's still junk in the spinner's junk + # list, then the spinner will refuse to run. + from twisted.internet.protocol import ServerFactory + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + timeout = self.make_timeout() + spinner.run(timeout, reactor.listenTCP, 0, ServerFactory()) + self.assertThat(lambda: spinner.run(timeout, lambda: None), + Raises(MatchesException(_spinner.StaleJunkError))) + + def test_clear_junk_clears_previous_junk(self): + # If 'run' is called and there's still junk in the spinner's junk + # list, then the spinner will refuse to run. + from twisted.internet.protocol import ServerFactory + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + timeout = self.make_timeout() + port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory()) + junk = spinner.clear_junk() + self.assertThat(junk, Equals([port])) + self.assertThat(spinner.get_junk(), Equals([])) + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_sigint_raises_no_result_error(self): + # If we get a SIGINT during a run, we raise _spinner.NoResultError. + SIGINT = getattr(signal, 'SIGINT', None) + if not SIGINT: + self.skipTest("SIGINT not available") + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + timeout = self.make_timeout() + reactor.callLater(timeout, os.kill, os.getpid(), SIGINT) + self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred), + Raises(MatchesException(_spinner.NoResultError))) + self.assertEqual([], spinner._clean()) + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_sigint_raises_no_result_error_second_time(self): + # If we get a SIGINT during a run, we raise _spinner.NoResultError. + # This test is exactly the same as test_sigint_raises_no_result_error, + # and exists to make sure we haven't futzed with state. + self.test_sigint_raises_no_result_error() + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_fast_sigint_raises_no_result_error(self): + # If we get a SIGINT during a run, we raise _spinner.NoResultError. + SIGINT = getattr(signal, 'SIGINT', None) + if not SIGINT: + self.skipTest("SIGINT not available") + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + timeout = self.make_timeout() + reactor.callWhenRunning(os.kill, os.getpid(), SIGINT) + self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred), + Raises(MatchesException(_spinner.NoResultError))) + self.assertEqual([], spinner._clean()) + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_fast_sigint_raises_no_result_error_second_time(self): + self.test_fast_sigint_raises_no_result_error() + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_testcase.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_testcase.py new file mode 100644 index 00000000000..52f93c3c526 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_testcase.py @@ -0,0 +1,1288 @@ +# Copyright (c) 2008-2011 testtools developers. See LICENSE for details. + +"""Tests for extensions to the base test library.""" + +from doctest import ELLIPSIS +from pprint import pformat +import sys +import unittest + +from testtools import ( + ErrorHolder, + MultipleExceptions, + PlaceHolder, + TestCase, + clone_test_with_new_id, + content, + skip, + skipIf, + skipUnless, + testcase, + ) +from testtools.compat import ( + _b, + _u, + ) +from testtools.matchers import ( + Annotate, + DocTestMatches, + Equals, + MatchesException, + Raises, + ) +from testtools.testresult.doubles import ( + Python26TestResult, + Python27TestResult, + ExtendedTestResult, + ) +from testtools.testresult.real import TestResult +from testtools.tests.helpers import ( + an_exc_info, + FullStackRunTest, + LoggingResult, + ) +try: + exec('from __future__ import with_statement') +except SyntaxError: + pass +else: + from testtools.tests.test_with_with import * + + +class TestPlaceHolder(TestCase): + + run_test_with = FullStackRunTest + + def makePlaceHolder(self, test_id="foo", short_description=None): + return PlaceHolder(test_id, short_description) + + def test_id_comes_from_constructor(self): + # The id() of a PlaceHolder is whatever you pass into the constructor. + test = PlaceHolder("test id") + self.assertEqual("test id", test.id()) + + def test_shortDescription_is_id(self): + # The shortDescription() of a PlaceHolder is the id, by default. + test = PlaceHolder("test id") + self.assertEqual(test.id(), test.shortDescription()) + + def test_shortDescription_specified(self): + # If a shortDescription is provided to the constructor, then + # shortDescription() returns that instead. + test = PlaceHolder("test id", "description") + self.assertEqual("description", test.shortDescription()) + + def test_repr_just_id(self): + # repr(placeholder) shows you how the object was constructed. + test = PlaceHolder("test id") + self.assertEqual( + "<testtools.testcase.PlaceHolder(%s)>" % repr(test.id()), + repr(test)) + + def test_repr_with_description(self): + # repr(placeholder) shows you how the object was constructed. + test = PlaceHolder("test id", "description") + self.assertEqual( + "<testtools.testcase.PlaceHolder(%r, %r)>" % ( + test.id(), test.shortDescription()), + repr(test)) + + def test_counts_as_one_test(self): + # A placeholder test counts as one test. + test = self.makePlaceHolder() + self.assertEqual(1, test.countTestCases()) + + def test_str_is_id(self): + # str(placeholder) is always the id(). We are not barbarians. + test = self.makePlaceHolder() + self.assertEqual(test.id(), str(test)) + + def test_runs_as_success(self): + # When run, a PlaceHolder test records a success. + test = self.makePlaceHolder() + log = [] + test.run(LoggingResult(log)) + self.assertEqual( + [('startTest', test), ('addSuccess', test), ('stopTest', test)], + log) + + def test_call_is_run(self): + # A PlaceHolder can be called, in which case it behaves like run. + test = self.makePlaceHolder() + run_log = [] + test.run(LoggingResult(run_log)) + call_log = [] + test(LoggingResult(call_log)) + self.assertEqual(run_log, call_log) + + def test_runs_without_result(self): + # A PlaceHolder can be run without a result, in which case there's no + # way to actually get at the result. + self.makePlaceHolder().run() + + def test_debug(self): + # A PlaceHolder can be debugged. + self.makePlaceHolder().debug() + + +class TestErrorHolder(TestCase): + + run_test_with = FullStackRunTest + + def makeException(self): + try: + raise RuntimeError("danger danger") + except: + return sys.exc_info() + + def makePlaceHolder(self, test_id="foo", error=None, + short_description=None): + if error is None: + error = self.makeException() + return ErrorHolder(test_id, error, short_description) + + def test_id_comes_from_constructor(self): + # The id() of a PlaceHolder is whatever you pass into the constructor. + test = ErrorHolder("test id", self.makeException()) + self.assertEqual("test id", test.id()) + + def test_shortDescription_is_id(self): + # The shortDescription() of a PlaceHolder is the id, by default. + test = ErrorHolder("test id", self.makeException()) + self.assertEqual(test.id(), test.shortDescription()) + + def test_shortDescription_specified(self): + # If a shortDescription is provided to the constructor, then + # shortDescription() returns that instead. + test = ErrorHolder("test id", self.makeException(), "description") + self.assertEqual("description", test.shortDescription()) + + def test_repr_just_id(self): + # repr(placeholder) shows you how the object was constructed. + error = self.makeException() + test = ErrorHolder("test id", error) + self.assertEqual( + "<testtools.testcase.ErrorHolder(%r, %r)>" % (test.id(), error), + repr(test)) + + def test_repr_with_description(self): + # repr(placeholder) shows you how the object was constructed. + error = self.makeException() + test = ErrorHolder("test id", error, "description") + self.assertEqual( + "<testtools.testcase.ErrorHolder(%r, %r, %r)>" % ( + test.id(), error, test.shortDescription()), + repr(test)) + + def test_counts_as_one_test(self): + # A placeholder test counts as one test. + test = self.makePlaceHolder() + self.assertEqual(1, test.countTestCases()) + + def test_str_is_id(self): + # str(placeholder) is always the id(). We are not barbarians. + test = self.makePlaceHolder() + self.assertEqual(test.id(), str(test)) + + def test_runs_as_error(self): + # When run, a PlaceHolder test records a success. + error = self.makeException() + test = self.makePlaceHolder(error=error) + log = [] + test.run(LoggingResult(log)) + self.assertEqual( + [('startTest', test), + ('addError', test, error), + ('stopTest', test)], log) + + def test_call_is_run(self): + # A PlaceHolder can be called, in which case it behaves like run. + test = self.makePlaceHolder() + run_log = [] + test.run(LoggingResult(run_log)) + call_log = [] + test(LoggingResult(call_log)) + self.assertEqual(run_log, call_log) + + def test_runs_without_result(self): + # A PlaceHolder can be run without a result, in which case there's no + # way to actually get at the result. + self.makePlaceHolder().run() + + def test_debug(self): + # A PlaceHolder can be debugged. + self.makePlaceHolder().debug() + + +class TestEquality(TestCase): + """Test ``TestCase``'s equality implementation.""" + + run_test_with = FullStackRunTest + + def test_identicalIsEqual(self): + # TestCase's are equal if they are identical. + self.assertEqual(self, self) + + def test_nonIdenticalInUnequal(self): + # TestCase's are not equal if they are not identical. + self.assertNotEqual(TestCase(methodName='run'), + TestCase(methodName='skip')) + + +class TestAssertions(TestCase): + """Test assertions in TestCase.""" + + run_test_with = FullStackRunTest + + def raiseError(self, exceptionFactory, *args, **kwargs): + raise exceptionFactory(*args, **kwargs) + + def test_formatTypes_single(self): + # Given a single class, _formatTypes returns the name. + class Foo(object): + pass + self.assertEqual('Foo', self._formatTypes(Foo)) + + def test_formatTypes_multiple(self): + # Given multiple types, _formatTypes returns the names joined by + # commas. + class Foo(object): + pass + class Bar(object): + pass + self.assertEqual('Foo, Bar', self._formatTypes([Foo, Bar])) + + def test_assertRaises(self): + # assertRaises asserts that a callable raises a particular exception. + self.assertRaises(RuntimeError, self.raiseError, RuntimeError) + + def test_assertRaises_fails_when_no_error_raised(self): + # assertRaises raises self.failureException when it's passed a + # callable that raises no error. + ret = ('orange', 42) + self.assertFails("<function <lambda> at ...> returned ('orange', 42)", + self.assertRaises, RuntimeError, lambda: ret) + + def test_assertRaises_fails_when_different_error_raised(self): + # assertRaises re-raises an exception that it didn't expect. + self.assertThat(lambda: self.assertRaises(RuntimeError, + self.raiseError, ZeroDivisionError), + Raises(MatchesException(ZeroDivisionError))) + + def test_assertRaises_returns_the_raised_exception(self): + # assertRaises returns the exception object that was raised. This is + # useful for testing that exceptions have the right message. + + # This contraption stores the raised exception, so we can compare it + # to the return value of assertRaises. + raisedExceptions = [] + def raiseError(): + try: + raise RuntimeError('Deliberate error') + except RuntimeError: + raisedExceptions.append(sys.exc_info()[1]) + raise + + exception = self.assertRaises(RuntimeError, raiseError) + self.assertEqual(1, len(raisedExceptions)) + self.assertTrue( + exception is raisedExceptions[0], + "%r is not %r" % (exception, raisedExceptions[0])) + + def test_assertRaises_with_multiple_exceptions(self): + # assertRaises((ExceptionOne, ExceptionTwo), function) asserts that + # function raises one of ExceptionTwo or ExceptionOne. + expectedExceptions = (RuntimeError, ZeroDivisionError) + self.assertRaises( + expectedExceptions, self.raiseError, expectedExceptions[0]) + self.assertRaises( + expectedExceptions, self.raiseError, expectedExceptions[1]) + + def test_assertRaises_with_multiple_exceptions_failure_mode(self): + # If assertRaises is called expecting one of a group of exceptions and + # a callable that doesn't raise an exception, then fail with an + # appropriate error message. + expectedExceptions = (RuntimeError, ZeroDivisionError) + failure = self.assertRaises( + self.failureException, + self.assertRaises, expectedExceptions, lambda: None) + self.assertFails('<function <lambda> at ...> returned None', + self.assertRaises, expectedExceptions, lambda: None) + + def assertFails(self, message, function, *args, **kwargs): + """Assert that function raises a failure with the given message.""" + failure = self.assertRaises( + self.failureException, function, *args, **kwargs) + self.assertThat(failure, DocTestMatches(message, ELLIPSIS)) + + def test_assertIn_success(self): + # assertIn(needle, haystack) asserts that 'needle' is in 'haystack'. + self.assertIn(3, range(10)) + self.assertIn('foo', 'foo bar baz') + self.assertIn('foo', 'foo bar baz'.split()) + + def test_assertIn_failure(self): + # assertIn(needle, haystack) fails the test when 'needle' is not in + # 'haystack'. + self.assertFails('3 not in [0, 1, 2]', self.assertIn, 3, [0, 1, 2]) + self.assertFails( + '%r not in %r' % ('qux', 'foo bar baz'), + self.assertIn, 'qux', 'foo bar baz') + + def test_assertNotIn_success(self): + # assertNotIn(needle, haystack) asserts that 'needle' is not in + # 'haystack'. + self.assertNotIn(3, [0, 1, 2]) + self.assertNotIn('qux', 'foo bar baz') + + def test_assertNotIn_failure(self): + # assertNotIn(needle, haystack) fails the test when 'needle' is in + # 'haystack'. + self.assertFails('[1, 2, 3] matches Contains(3)', self.assertNotIn, + 3, [1, 2, 3]) + self.assertFails( + "'foo bar baz' matches Contains('foo')", + self.assertNotIn, 'foo', 'foo bar baz') + + def test_assertIsInstance(self): + # assertIsInstance asserts that an object is an instance of a class. + + class Foo(object): + """Simple class for testing assertIsInstance.""" + + foo = Foo() + self.assertIsInstance(foo, Foo) + + def test_assertIsInstance_multiple_classes(self): + # assertIsInstance asserts that an object is an instance of one of a + # group of classes. + + class Foo(object): + """Simple class for testing assertIsInstance.""" + + class Bar(object): + """Another simple class for testing assertIsInstance.""" + + foo = Foo() + self.assertIsInstance(foo, (Foo, Bar)) + self.assertIsInstance(Bar(), (Foo, Bar)) + + def test_assertIsInstance_failure(self): + # assertIsInstance(obj, klass) fails the test when obj is not an + # instance of klass. + + class Foo(object): + """Simple class for testing assertIsInstance.""" + + self.assertFails( + "'42' is not an instance of %s" % self._formatTypes(Foo), + self.assertIsInstance, 42, Foo) + + def test_assertIsInstance_failure_multiple_classes(self): + # assertIsInstance(obj, (klass1, klass2)) fails the test when obj is + # not an instance of klass1 or klass2. + + class Foo(object): + """Simple class for testing assertIsInstance.""" + + class Bar(object): + """Another simple class for testing assertIsInstance.""" + + self.assertFails( + "'42' is not an instance of any of (%s)" % self._formatTypes([Foo, Bar]), + self.assertIsInstance, 42, (Foo, Bar)) + + def test_assertIsInstance_overridden_message(self): + # assertIsInstance(obj, klass, msg) permits a custom message. + self.assertFails("'42' is not an instance of str: foo", + self.assertIsInstance, 42, str, "foo") + + def test_assertIs(self): + # assertIs asserts that an object is identical to another object. + self.assertIs(None, None) + some_list = [42] + self.assertIs(some_list, some_list) + some_object = object() + self.assertIs(some_object, some_object) + + def test_assertIs_fails(self): + # assertIs raises assertion errors if one object is not identical to + # another. + self.assertFails('None is not 42', self.assertIs, None, 42) + self.assertFails('[42] is not [42]', self.assertIs, [42], [42]) + + def test_assertIs_fails_with_message(self): + # assertIs raises assertion errors if one object is not identical to + # another, and includes a user-supplied message, if it's provided. + self.assertFails( + 'None is not 42: foo bar', self.assertIs, None, 42, 'foo bar') + + def test_assertIsNot(self): + # assertIsNot asserts that an object is not identical to another + # object. + self.assertIsNot(None, 42) + self.assertIsNot([42], [42]) + self.assertIsNot(object(), object()) + + def test_assertIsNot_fails(self): + # assertIsNot raises assertion errors if one object is identical to + # another. + self.assertFails('None matches Is(None)', self.assertIsNot, None, None) + some_list = [42] + self.assertFails( + '[42] matches Is([42])', self.assertIsNot, some_list, some_list) + + def test_assertIsNot_fails_with_message(self): + # assertIsNot raises assertion errors if one object is identical to + # another, and includes a user-supplied message if it's provided. + self.assertFails( + 'None matches Is(None): foo bar', self.assertIsNot, None, None, + "foo bar") + + def test_assertThat_matches_clean(self): + class Matcher(object): + def match(self, foo): + return None + self.assertThat("foo", Matcher()) + + def test_assertThat_mismatch_raises_description(self): + calls = [] + class Mismatch(object): + def __init__(self, thing): + self.thing = thing + def describe(self): + calls.append(('describe_diff', self.thing)) + return "object is not a thing" + def get_details(self): + return {} + class Matcher(object): + def match(self, thing): + calls.append(('match', thing)) + return Mismatch(thing) + def __str__(self): + calls.append(('__str__',)) + return "a description" + class Test(TestCase): + def test(self): + self.assertThat("foo", Matcher()) + result = Test("test").run() + self.assertEqual([ + ('match', "foo"), + ('describe_diff', "foo"), + ], calls) + self.assertFalse(result.wasSuccessful()) + + def test_assertThat_output(self): + matchee = 'foo' + matcher = Equals('bar') + expected = matcher.match(matchee).describe() + self.assertFails(expected, self.assertThat, matchee, matcher) + + def test_assertThat_message_is_annotated(self): + matchee = 'foo' + matcher = Equals('bar') + expected = Annotate('woo', matcher).match(matchee).describe() + self.assertFails(expected, self.assertThat, matchee, matcher, 'woo') + + def test_assertThat_verbose_output(self): + matchee = 'foo' + matcher = Equals('bar') + expected = ( + 'Match failed. Matchee: %r\n' + 'Matcher: %s\n' + 'Difference: %s\n' % ( + matchee, + matcher, + matcher.match(matchee).describe(), + )) + self.assertFails( + expected, self.assertThat, matchee, matcher, verbose=True) + + def get_error_string(self, e): + """Get the string showing how 'e' would be formatted in test output. + + This is a little bit hacky, since it's designed to give consistent + output regardless of Python version. + + In testtools, TestResult._exc_info_to_unicode is the point of dispatch + between various different implementations of methods that format + exceptions, so that's what we have to call. However, that method cares + about stack traces and formats the exception class. We don't care + about either of these, so we take its output and parse it a little. + """ + error = TestResult()._exc_info_to_unicode((e.__class__, e, None), self) + # We aren't at all interested in the traceback. + if error.startswith('Traceback (most recent call last):\n'): + lines = error.splitlines(True)[1:] + for i, line in enumerate(lines): + if not line.startswith(' '): + break + error = ''.join(lines[i:]) + # We aren't interested in how the exception type is formatted. + exc_class, error = error.split(': ', 1) + return error + + def test_assertThat_verbose_unicode(self): + # When assertThat is given matchees or matchers that contain non-ASCII + # unicode strings, we can still provide a meaningful error. + matchee = _u('\xa7') + matcher = Equals(_u('a')) + expected = ( + 'Match failed. Matchee: %s\n' + 'Matcher: %s\n' + 'Difference: %s\n\n' % ( + repr(matchee).replace("\\xa7", matchee), + matcher, + matcher.match(matchee).describe(), + )) + e = self.assertRaises( + self.failureException, self.assertThat, matchee, matcher, + verbose=True) + self.assertEqual(expected, self.get_error_string(e)) + + def test_assertEqual_nice_formatting(self): + message = "These things ought not be equal." + a = ['apple', 'banana', 'cherry'] + b = {'Thatcher': 'One who mends roofs of straw', + 'Major': 'A military officer, ranked below colonel', + 'Blair': 'To shout loudly', + 'Brown': 'The colour of healthy human faeces'} + expected_error = '\n'.join([ + '!=:', + 'reference = %s' % pformat(a), + 'actual = %s' % pformat(b), + ': ' + message, + ]) + self.assertFails(expected_error, self.assertEqual, a, b, message) + self.assertFails(expected_error, self.assertEquals, a, b, message) + self.assertFails(expected_error, self.failUnlessEqual, a, b, message) + + def test_assertEqual_formatting_no_message(self): + a = "cat" + b = "dog" + expected_error = "'cat' != 'dog'" + self.assertFails(expected_error, self.assertEqual, a, b) + self.assertFails(expected_error, self.assertEquals, a, b) + self.assertFails(expected_error, self.failUnlessEqual, a, b) + + def test_assertEqual_non_ascii_str_with_newlines(self): + message = _u("Be careful mixing unicode and bytes") + a = "a\n\xa7\n" + b = "Just a longish string so the more verbose output form is used." + expected_error = '\n'.join([ + '!=:', + "reference = '''\\", + 'a', + repr('\xa7')[1:-1], + "'''", + 'actual = %r' % (b,), + ': ' + message, + ]) + self.assertFails(expected_error, self.assertEqual, a, b, message) + + def test_assertIsNone(self): + self.assertIsNone(None) + + expected_error = 'None is not 0' + self.assertFails(expected_error, self.assertIsNone, 0) + + def test_assertIsNotNone(self): + self.assertIsNotNone(0) + self.assertIsNotNone("0") + + expected_error = 'None matches Is(None)' + self.assertFails(expected_error, self.assertIsNotNone, None) + + +class TestAddCleanup(TestCase): + """Tests for TestCase.addCleanup.""" + + run_test_with = FullStackRunTest + + class LoggingTest(TestCase): + """A test that logs calls to setUp, runTest and tearDown.""" + + def setUp(self): + TestCase.setUp(self) + self._calls = ['setUp'] + + def brokenSetUp(self): + # A tearDown that deliberately fails. + self._calls = ['brokenSetUp'] + raise RuntimeError('Deliberate Failure') + + def runTest(self): + self._calls.append('runTest') + + def brokenTest(self): + raise RuntimeError('Deliberate broken test') + + def tearDown(self): + self._calls.append('tearDown') + TestCase.tearDown(self) + + def setUp(self): + TestCase.setUp(self) + self._result_calls = [] + self.test = TestAddCleanup.LoggingTest('runTest') + self.logging_result = LoggingResult(self._result_calls) + + def assertErrorLogEqual(self, messages): + self.assertEqual(messages, [call[0] for call in self._result_calls]) + + def assertTestLogEqual(self, messages): + """Assert that the call log equals 'messages'.""" + case = self._result_calls[0][1] + self.assertEqual(messages, case._calls) + + def logAppender(self, message): + """A cleanup that appends 'message' to the tests log. + + Cleanups are callables that are added to a test by addCleanup. To + verify that our cleanups run in the right order, we add strings to a + list that acts as a log. This method returns a cleanup that will add + the given message to that log when run. + """ + self.test._calls.append(message) + + def test_fixture(self): + # A normal run of self.test logs 'setUp', 'runTest' and 'tearDown'. + # This test doesn't test addCleanup itself, it just sanity checks the + # fixture. + self.test.run(self.logging_result) + self.assertTestLogEqual(['setUp', 'runTest', 'tearDown']) + + def test_cleanup_run_before_tearDown(self): + # Cleanup functions added with 'addCleanup' are called before tearDown + # runs. + self.test.addCleanup(self.logAppender, 'cleanup') + self.test.run(self.logging_result) + self.assertTestLogEqual(['setUp', 'runTest', 'tearDown', 'cleanup']) + + def test_add_cleanup_called_if_setUp_fails(self): + # Cleanup functions added with 'addCleanup' are called even if setUp + # fails. Note that tearDown has a different behavior: it is only + # called when setUp succeeds. + self.test.setUp = self.test.brokenSetUp + self.test.addCleanup(self.logAppender, 'cleanup') + self.test.run(self.logging_result) + self.assertTestLogEqual(['brokenSetUp', 'cleanup']) + + def test_addCleanup_called_in_reverse_order(self): + # Cleanup functions added with 'addCleanup' are called in reverse + # order. + # + # One of the main uses of addCleanup is to dynamically create + # resources that need some sort of explicit tearDown. Often one + # resource will be created in terms of another, e.g., + # self.first = self.makeFirst() + # self.second = self.makeSecond(self.first) + # + # When this happens, we generally want to clean up the second resource + # before the first one, since the second depends on the first. + self.test.addCleanup(self.logAppender, 'first') + self.test.addCleanup(self.logAppender, 'second') + self.test.run(self.logging_result) + self.assertTestLogEqual( + ['setUp', 'runTest', 'tearDown', 'second', 'first']) + + def test_tearDown_runs_after_cleanup_failure(self): + # tearDown runs even if a cleanup function fails. + self.test.addCleanup(lambda: 1/0) + self.test.run(self.logging_result) + self.assertTestLogEqual(['setUp', 'runTest', 'tearDown']) + + def test_cleanups_continue_running_after_error(self): + # All cleanups are always run, even if one or two of them fail. + self.test.addCleanup(self.logAppender, 'first') + self.test.addCleanup(lambda: 1/0) + self.test.addCleanup(self.logAppender, 'second') + self.test.run(self.logging_result) + self.assertTestLogEqual( + ['setUp', 'runTest', 'tearDown', 'second', 'first']) + + def test_error_in_cleanups_are_captured(self): + # If a cleanup raises an error, we want to record it and fail the the + # test, even though we go on to run other cleanups. + self.test.addCleanup(lambda: 1/0) + self.test.run(self.logging_result) + self.assertErrorLogEqual(['startTest', 'addError', 'stopTest']) + + def test_keyboard_interrupt_not_caught(self): + # If a cleanup raises KeyboardInterrupt, it gets reraised. + def raiseKeyboardInterrupt(): + raise KeyboardInterrupt() + self.test.addCleanup(raiseKeyboardInterrupt) + self.assertThat(lambda:self.test.run(self.logging_result), + Raises(MatchesException(KeyboardInterrupt))) + + def test_all_errors_from_MultipleExceptions_reported(self): + # When a MultipleExceptions exception is caught, all the errors are + # reported. + def raiseMany(): + try: + 1/0 + except Exception: + exc_info1 = sys.exc_info() + try: + 1/0 + except Exception: + exc_info2 = sys.exc_info() + raise MultipleExceptions(exc_info1, exc_info2) + self.test.addCleanup(raiseMany) + self.logging_result = ExtendedTestResult() + self.test.run(self.logging_result) + self.assertEqual(['startTest', 'addError', 'stopTest'], + [event[0] for event in self.logging_result._events]) + self.assertEqual(set(['traceback', 'traceback-1']), + set(self.logging_result._events[1][2].keys())) + + def test_multipleCleanupErrorsReported(self): + # Errors from all failing cleanups are reported as separate backtraces. + self.test.addCleanup(lambda: 1/0) + self.test.addCleanup(lambda: 1/0) + self.logging_result = ExtendedTestResult() + self.test.run(self.logging_result) + self.assertEqual(['startTest', 'addError', 'stopTest'], + [event[0] for event in self.logging_result._events]) + self.assertEqual(set(['traceback', 'traceback-1']), + set(self.logging_result._events[1][2].keys())) + + def test_multipleErrorsCoreAndCleanupReported(self): + # Errors from all failing cleanups are reported, with stopTest, + # startTest inserted. + self.test = TestAddCleanup.LoggingTest('brokenTest') + self.test.addCleanup(lambda: 1/0) + self.test.addCleanup(lambda: 1/0) + self.logging_result = ExtendedTestResult() + self.test.run(self.logging_result) + self.assertEqual(['startTest', 'addError', 'stopTest'], + [event[0] for event in self.logging_result._events]) + self.assertEqual(set(['traceback', 'traceback-1', 'traceback-2']), + set(self.logging_result._events[1][2].keys())) + + +class TestWithDetails(TestCase): + + run_test_with = FullStackRunTest + + def assertDetailsProvided(self, case, expected_outcome, expected_keys): + """Assert that when case is run, details are provided to the result. + + :param case: A TestCase to run. + :param expected_outcome: The call that should be made. + :param expected_keys: The keys to look for. + """ + result = ExtendedTestResult() + case.run(result) + case = result._events[0][1] + expected = [ + ('startTest', case), + (expected_outcome, case), + ('stopTest', case), + ] + self.assertEqual(3, len(result._events)) + self.assertEqual(expected[0], result._events[0]) + self.assertEqual(expected[1], result._events[1][0:2]) + # Checking the TB is right is rather tricky. doctest line matching + # would help, but 'meh'. + self.assertEqual(sorted(expected_keys), + sorted(result._events[1][2].keys())) + self.assertEqual(expected[-1], result._events[-1]) + + def get_content(self): + return content.Content( + content.ContentType("text", "foo"), lambda: [_b('foo')]) + + +class TestExpectedFailure(TestWithDetails): + """Tests for expected failures and unexpected successess.""" + + run_test_with = FullStackRunTest + + def make_unexpected_case(self): + class Case(TestCase): + def test(self): + raise testcase._UnexpectedSuccess + case = Case('test') + return case + + def test_raising__UnexpectedSuccess_py27(self): + case = self.make_unexpected_case() + result = Python27TestResult() + case.run(result) + case = result._events[0][1] + self.assertEqual([ + ('startTest', case), + ('addUnexpectedSuccess', case), + ('stopTest', case), + ], result._events) + + def test_raising__UnexpectedSuccess_extended(self): + case = self.make_unexpected_case() + result = ExtendedTestResult() + case.run(result) + case = result._events[0][1] + self.assertEqual([ + ('startTest', case), + ('addUnexpectedSuccess', case, {}), + ('stopTest', case), + ], result._events) + + def make_xfail_case_xfails(self): + content = self.get_content() + class Case(TestCase): + def test(self): + self.addDetail("foo", content) + self.expectFailure("we are sad", self.assertEqual, + 1, 0) + case = Case('test') + return case + + def make_xfail_case_succeeds(self): + content = self.get_content() + class Case(TestCase): + def test(self): + self.addDetail("foo", content) + self.expectFailure("we are sad", self.assertEqual, + 1, 1) + case = Case('test') + return case + + def test_expectFailure_KnownFailure_extended(self): + case = self.make_xfail_case_xfails() + self.assertDetailsProvided(case, "addExpectedFailure", + ["foo", "traceback", "reason"]) + + def test_expectFailure_KnownFailure_unexpected_success(self): + case = self.make_xfail_case_succeeds() + self.assertDetailsProvided(case, "addUnexpectedSuccess", + ["foo", "reason"]) + + +class TestUniqueFactories(TestCase): + """Tests for getUniqueString and getUniqueInteger.""" + + run_test_with = FullStackRunTest + + def test_getUniqueInteger(self): + # getUniqueInteger returns an integer that increments each time you + # call it. + one = self.getUniqueInteger() + self.assertEqual(1, one) + two = self.getUniqueInteger() + self.assertEqual(2, two) + + def test_getUniqueString(self): + # getUniqueString returns the current test id followed by a unique + # integer. + name_one = self.getUniqueString() + self.assertEqual('%s-%d' % (self.id(), 1), name_one) + name_two = self.getUniqueString() + self.assertEqual('%s-%d' % (self.id(), 2), name_two) + + def test_getUniqueString_prefix(self): + # If getUniqueString is given an argument, it uses that argument as + # the prefix of the unique string, rather than the test id. + name_one = self.getUniqueString('foo') + self.assertThat(name_one, Equals('foo-1')) + name_two = self.getUniqueString('bar') + self.assertThat(name_two, Equals('bar-2')) + + +class TestCloneTestWithNewId(TestCase): + """Tests for clone_test_with_new_id.""" + + run_test_with = FullStackRunTest + + def test_clone_test_with_new_id(self): + class FooTestCase(TestCase): + def test_foo(self): + pass + test = FooTestCase('test_foo') + oldName = test.id() + newName = self.getUniqueString() + newTest = clone_test_with_new_id(test, newName) + self.assertEqual(newName, newTest.id()) + self.assertEqual(oldName, test.id(), + "the original test instance should be unchanged.") + + def test_cloned_testcase_does_not_share_details(self): + """A cloned TestCase does not share the details dict.""" + class Test(TestCase): + def test_foo(self): + self.addDetail( + 'foo', content.Content('text/plain', lambda: 'foo')) + orig_test = Test('test_foo') + cloned_test = clone_test_with_new_id(orig_test, self.getUniqueString()) + orig_test.run(unittest.TestResult()) + self.assertEqual('foo', orig_test.getDetails()['foo'].iter_bytes()) + self.assertEqual(None, cloned_test.getDetails().get('foo')) + + +class TestDetailsProvided(TestWithDetails): + + run_test_with = FullStackRunTest + + def test_addDetail(self): + mycontent = self.get_content() + self.addDetail("foo", mycontent) + details = self.getDetails() + self.assertEqual({"foo": mycontent}, details) + + def test_addError(self): + class Case(TestCase): + def test(this): + this.addDetail("foo", self.get_content()) + 1/0 + self.assertDetailsProvided(Case("test"), "addError", + ["foo", "traceback"]) + + def test_addFailure(self): + class Case(TestCase): + def test(this): + this.addDetail("foo", self.get_content()) + self.fail('yo') + self.assertDetailsProvided(Case("test"), "addFailure", + ["foo", "traceback"]) + + def test_addSkip(self): + class Case(TestCase): + def test(this): + this.addDetail("foo", self.get_content()) + self.skip('yo') + self.assertDetailsProvided(Case("test"), "addSkip", + ["foo", "reason"]) + + def test_addSucccess(self): + class Case(TestCase): + def test(this): + this.addDetail("foo", self.get_content()) + self.assertDetailsProvided(Case("test"), "addSuccess", + ["foo"]) + + def test_addUnexpectedSuccess(self): + class Case(TestCase): + def test(this): + this.addDetail("foo", self.get_content()) + raise testcase._UnexpectedSuccess() + self.assertDetailsProvided(Case("test"), "addUnexpectedSuccess", + ["foo"]) + + def test_addDetails_from_Mismatch(self): + content = self.get_content() + class Mismatch(object): + def describe(self): + return "Mismatch" + def get_details(self): + return {"foo": content} + class Matcher(object): + def match(self, thing): + return Mismatch() + def __str__(self): + return "a description" + class Case(TestCase): + def test(self): + self.assertThat("foo", Matcher()) + self.assertDetailsProvided(Case("test"), "addFailure", + ["foo", "traceback"]) + + def test_multiple_addDetails_from_Mismatch(self): + content = self.get_content() + class Mismatch(object): + def describe(self): + return "Mismatch" + def get_details(self): + return {"foo": content, "bar": content} + class Matcher(object): + def match(self, thing): + return Mismatch() + def __str__(self): + return "a description" + class Case(TestCase): + def test(self): + self.assertThat("foo", Matcher()) + self.assertDetailsProvided(Case("test"), "addFailure", + ["bar", "foo", "traceback"]) + + def test_addDetails_with_same_name_as_key_from_get_details(self): + content = self.get_content() + class Mismatch(object): + def describe(self): + return "Mismatch" + def get_details(self): + return {"foo": content} + class Matcher(object): + def match(self, thing): + return Mismatch() + def __str__(self): + return "a description" + class Case(TestCase): + def test(self): + self.addDetail("foo", content) + self.assertThat("foo", Matcher()) + self.assertDetailsProvided(Case("test"), "addFailure", + ["foo", "foo-1", "traceback"]) + + +class TestSetupTearDown(TestCase): + + run_test_with = FullStackRunTest + + def test_setUpNotCalled(self): + class DoesnotcallsetUp(TestCase): + def setUp(self): + pass + def test_method(self): + pass + result = unittest.TestResult() + DoesnotcallsetUp('test_method').run(result) + self.assertEqual(1, len(result.errors)) + + def test_tearDownNotCalled(self): + class DoesnotcalltearDown(TestCase): + def test_method(self): + pass + def tearDown(self): + pass + result = unittest.TestResult() + DoesnotcalltearDown('test_method').run(result) + self.assertEqual(1, len(result.errors)) + + +class TestSkipping(TestCase): + """Tests for skipping of tests functionality.""" + + run_test_with = FullStackRunTest + + def test_skip_causes_skipException(self): + self.assertThat(lambda:self.skip("Skip this test"), + Raises(MatchesException(self.skipException))) + + def test_can_use_skipTest(self): + self.assertThat(lambda:self.skipTest("Skip this test"), + Raises(MatchesException(self.skipException))) + + def test_skip_without_reason_works(self): + class Test(TestCase): + def test(self): + raise self.skipException() + case = Test("test") + result = ExtendedTestResult() + case.run(result) + self.assertEqual('addSkip', result._events[1][0]) + self.assertEqual('no reason given.', + ''.join(result._events[1][2]['reason'].iter_text())) + + def test_skipException_in_setup_calls_result_addSkip(self): + class TestThatRaisesInSetUp(TestCase): + def setUp(self): + TestCase.setUp(self) + self.skip("skipping this test") + def test_that_passes(self): + pass + calls = [] + result = LoggingResult(calls) + test = TestThatRaisesInSetUp("test_that_passes") + test.run(result) + case = result._events[0][1] + self.assertEqual([('startTest', case), + ('addSkip', case, "skipping this test"), ('stopTest', case)], + calls) + + def test_skipException_in_test_method_calls_result_addSkip(self): + class SkippingTest(TestCase): + def test_that_raises_skipException(self): + self.skip("skipping this test") + result = Python27TestResult() + test = SkippingTest("test_that_raises_skipException") + test.run(result) + case = result._events[0][1] + self.assertEqual([('startTest', case), + ('addSkip', case, "skipping this test"), ('stopTest', case)], + result._events) + + def test_skip__in_setup_with_old_result_object_calls_addSuccess(self): + class SkippingTest(TestCase): + def setUp(self): + TestCase.setUp(self) + raise self.skipException("skipping this test") + def test_that_raises_skipException(self): + pass + result = Python26TestResult() + test = SkippingTest("test_that_raises_skipException") + test.run(result) + self.assertEqual('addSuccess', result._events[1][0]) + + def test_skip_with_old_result_object_calls_addError(self): + class SkippingTest(TestCase): + def test_that_raises_skipException(self): + raise self.skipException("skipping this test") + result = Python26TestResult() + test = SkippingTest("test_that_raises_skipException") + test.run(result) + self.assertEqual('addSuccess', result._events[1][0]) + + def test_skip_decorator(self): + class SkippingTest(TestCase): + @skip("skipping this test") + def test_that_is_decorated_with_skip(self): + self.fail() + result = Python26TestResult() + test = SkippingTest("test_that_is_decorated_with_skip") + test.run(result) + self.assertEqual('addSuccess', result._events[1][0]) + + def test_skipIf_decorator(self): + class SkippingTest(TestCase): + @skipIf(True, "skipping this test") + def test_that_is_decorated_with_skipIf(self): + self.fail() + result = Python26TestResult() + test = SkippingTest("test_that_is_decorated_with_skipIf") + test.run(result) + self.assertEqual('addSuccess', result._events[1][0]) + + def test_skipUnless_decorator(self): + class SkippingTest(TestCase): + @skipUnless(False, "skipping this test") + def test_that_is_decorated_with_skipUnless(self): + self.fail() + result = Python26TestResult() + test = SkippingTest("test_that_is_decorated_with_skipUnless") + test.run(result) + self.assertEqual('addSuccess', result._events[1][0]) + + +class TestOnException(TestCase): + + run_test_with = FullStackRunTest + + def test_default_works(self): + events = [] + class Case(TestCase): + def method(self): + self.onException(an_exc_info) + events.append(True) + case = Case("method") + case.run() + self.assertThat(events, Equals([True])) + + def test_added_handler_works(self): + events = [] + class Case(TestCase): + def method(self): + self.addOnException(events.append) + self.onException(an_exc_info) + case = Case("method") + case.run() + self.assertThat(events, Equals([an_exc_info])) + + def test_handler_that_raises_is_not_caught(self): + events = [] + class Case(TestCase): + def method(self): + self.addOnException(events.index) + self.assertThat(lambda: self.onException(an_exc_info), + Raises(MatchesException(ValueError))) + case = Case("method") + case.run() + self.assertThat(events, Equals([])) + + +class TestPatchSupport(TestCase): + + run_test_with = FullStackRunTest + + class Case(TestCase): + def test(self): + pass + + def test_patch(self): + # TestCase.patch masks obj.attribute with the new value. + self.foo = 'original' + test = self.Case('test') + test.patch(self, 'foo', 'patched') + self.assertEqual('patched', self.foo) + + def test_patch_restored_after_run(self): + # TestCase.patch masks obj.attribute with the new value, but restores + # the original value after the test is finished. + self.foo = 'original' + test = self.Case('test') + test.patch(self, 'foo', 'patched') + test.run() + self.assertEqual('original', self.foo) + + def test_successive_patches_apply(self): + # TestCase.patch can be called multiple times per test. Each time you + # call it, it overrides the original value. + self.foo = 'original' + test = self.Case('test') + test.patch(self, 'foo', 'patched') + test.patch(self, 'foo', 'second') + self.assertEqual('second', self.foo) + + def test_successive_patches_restored_after_run(self): + # TestCase.patch restores the original value, no matter how many times + # it was called. + self.foo = 'original' + test = self.Case('test') + test.patch(self, 'foo', 'patched') + test.patch(self, 'foo', 'second') + test.run() + self.assertEqual('original', self.foo) + + def test_patch_nonexistent_attribute(self): + # TestCase.patch can be used to patch a non-existent attribute. + test = self.Case('test') + test.patch(self, 'doesntexist', 'patched') + self.assertEqual('patched', self.doesntexist) + + def test_restore_nonexistent_attribute(self): + # TestCase.patch can be used to patch a non-existent attribute, after + # the test run, the attribute is then removed from the object. + test = self.Case('test') + test.patch(self, 'doesntexist', 'patched') + test.run() + marker = object() + value = getattr(self, 'doesntexist', marker) + self.assertIs(marker, value) + + +class TestTestCaseSuper(TestCase): + + run_test_with = FullStackRunTest + + def test_setup_uses_super(self): + class OtherBaseCase(unittest.TestCase): + setup_called = False + def setUp(self): + self.setup_called = True + super(OtherBaseCase, self).setUp() + class OurCase(TestCase, OtherBaseCase): + def runTest(self): + pass + test = OurCase() + test.setUp() + test.tearDown() + self.assertTrue(test.setup_called) + + def test_teardown_uses_super(self): + class OtherBaseCase(unittest.TestCase): + teardown_called = False + def tearDown(self): + self.teardown_called = True + super(OtherBaseCase, self).tearDown() + class OurCase(TestCase, OtherBaseCase): + def runTest(self): + pass + test = OurCase() + test.setUp() + test.tearDown() + self.assertTrue(test.teardown_called) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_testresult.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_testresult.py new file mode 100644 index 00000000000..a241788bea8 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_testresult.py @@ -0,0 +1,1507 @@ +# Copyright (c) 2008-2011 testtools developers. See LICENSE for details. + +"""Test TestResults and related things.""" + +__metaclass__ = type + +import codecs +import datetime +import doctest +import os +import shutil +import sys +import tempfile +import threading +import warnings + +from testtools import ( + ExtendedToOriginalDecorator, + MultiTestResult, + TestCase, + TestResult, + TextTestResult, + ThreadsafeForwardingResult, + testresult, + ) +from testtools.compat import ( + _b, + _get_exception_encoding, + _r, + _u, + str_is_unicode, + StringIO, + ) +from testtools.content import ( + Content, + content_from_stream, + text_content, + ) +from testtools.content_type import ContentType, UTF8_TEXT +from testtools.matchers import ( + DocTestMatches, + Equals, + MatchesException, + Raises, + ) +from testtools.tests.helpers import ( + an_exc_info, + FullStackRunTest, + LoggingResult, + run_with_stack_hidden, + ) +from testtools.testresult.doubles import ( + Python26TestResult, + Python27TestResult, + ExtendedTestResult, + ) +from testtools.testresult.real import ( + _details_to_str, + utc, + ) + + +def make_erroring_test(): + class Test(TestCase): + def error(self): + 1/0 + return Test("error") + + +def make_failing_test(): + class Test(TestCase): + def failed(self): + self.fail("yo!") + return Test("failed") + + +def make_unexpectedly_successful_test(): + class Test(TestCase): + def succeeded(self): + self.expectFailure("yo!", lambda: None) + return Test("succeeded") + + +def make_test(): + class Test(TestCase): + def test(self): + pass + return Test("test") + + +def make_exception_info(exceptionFactory, *args, **kwargs): + try: + raise exceptionFactory(*args, **kwargs) + except: + return sys.exc_info() + + +class Python26Contract(object): + + def test_fresh_result_is_successful(self): + # A result is considered successful before any tests are run. + result = self.makeResult() + self.assertTrue(result.wasSuccessful()) + + def test_addError_is_failure(self): + # addError fails the test run. + result = self.makeResult() + result.startTest(self) + result.addError(self, an_exc_info) + result.stopTest(self) + self.assertFalse(result.wasSuccessful()) + + def test_addFailure_is_failure(self): + # addFailure fails the test run. + result = self.makeResult() + result.startTest(self) + result.addFailure(self, an_exc_info) + result.stopTest(self) + self.assertFalse(result.wasSuccessful()) + + def test_addSuccess_is_success(self): + # addSuccess does not fail the test run. + result = self.makeResult() + result.startTest(self) + result.addSuccess(self) + result.stopTest(self) + self.assertTrue(result.wasSuccessful()) + + +class Python27Contract(Python26Contract): + + def test_addExpectedFailure(self): + # Calling addExpectedFailure(test, exc_info) completes ok. + result = self.makeResult() + result.startTest(self) + result.addExpectedFailure(self, an_exc_info) + + def test_addExpectedFailure_is_success(self): + # addExpectedFailure does not fail the test run. + result = self.makeResult() + result.startTest(self) + result.addExpectedFailure(self, an_exc_info) + result.stopTest(self) + self.assertTrue(result.wasSuccessful()) + + def test_addSkipped(self): + # Calling addSkip(test, reason) completes ok. + result = self.makeResult() + result.startTest(self) + result.addSkip(self, _u("Skipped for some reason")) + + def test_addSkip_is_success(self): + # addSkip does not fail the test run. + result = self.makeResult() + result.startTest(self) + result.addSkip(self, _u("Skipped for some reason")) + result.stopTest(self) + self.assertTrue(result.wasSuccessful()) + + def test_addUnexpectedSuccess(self): + # Calling addUnexpectedSuccess(test) completes ok. + result = self.makeResult() + result.startTest(self) + result.addUnexpectedSuccess(self) + + def test_addUnexpectedSuccess_was_successful(self): + # addUnexpectedSuccess does not fail the test run in Python 2.7. + result = self.makeResult() + result.startTest(self) + result.addUnexpectedSuccess(self) + result.stopTest(self) + self.assertTrue(result.wasSuccessful()) + + def test_startStopTestRun(self): + # Calling startTestRun completes ok. + result = self.makeResult() + result.startTestRun() + result.stopTestRun() + + +class DetailsContract(Python27Contract): + """Tests for the contract of TestResults.""" + + def test_addExpectedFailure_details(self): + # Calling addExpectedFailure(test, details=xxx) completes ok. + result = self.makeResult() + result.startTest(self) + result.addExpectedFailure(self, details={}) + + def test_addError_details(self): + # Calling addError(test, details=xxx) completes ok. + result = self.makeResult() + result.startTest(self) + result.addError(self, details={}) + + def test_addFailure_details(self): + # Calling addFailure(test, details=xxx) completes ok. + result = self.makeResult() + result.startTest(self) + result.addFailure(self, details={}) + + def test_addSkipped_details(self): + # Calling addSkip(test, reason) completes ok. + result = self.makeResult() + result.startTest(self) + result.addSkip(self, details={}) + + def test_addUnexpectedSuccess_details(self): + # Calling addUnexpectedSuccess(test) completes ok. + result = self.makeResult() + result.startTest(self) + result.addUnexpectedSuccess(self, details={}) + + def test_addSuccess_details(self): + # Calling addSuccess(test) completes ok. + result = self.makeResult() + result.startTest(self) + result.addSuccess(self, details={}) + + +class FallbackContract(DetailsContract): + """When we fallback we take our policy choice to map calls. + + For instance, we map unexpectedSuccess to an error code, not to success. + """ + + def test_addUnexpectedSuccess_was_successful(self): + # addUnexpectedSuccess fails test run in testtools. + result = self.makeResult() + result.startTest(self) + result.addUnexpectedSuccess(self) + result.stopTest(self) + self.assertFalse(result.wasSuccessful()) + + +class StartTestRunContract(FallbackContract): + """Defines the contract for testtools policy choices. + + That is things which are not simply extensions to unittest but choices we + have made differently. + """ + + def test_startTestRun_resets_unexpected_success(self): + result = self.makeResult() + result.startTest(self) + result.addUnexpectedSuccess(self) + result.stopTest(self) + result.startTestRun() + self.assertTrue(result.wasSuccessful()) + + def test_startTestRun_resets_failure(self): + result = self.makeResult() + result.startTest(self) + result.addFailure(self, an_exc_info) + result.stopTest(self) + result.startTestRun() + self.assertTrue(result.wasSuccessful()) + + def test_startTestRun_resets_errors(self): + result = self.makeResult() + result.startTest(self) + result.addError(self, an_exc_info) + result.stopTest(self) + result.startTestRun() + self.assertTrue(result.wasSuccessful()) + + +class TestTestResultContract(TestCase, StartTestRunContract): + + run_test_with = FullStackRunTest + + def makeResult(self): + return TestResult() + + +class TestMultiTestResultContract(TestCase, StartTestRunContract): + + run_test_with = FullStackRunTest + + def makeResult(self): + return MultiTestResult(TestResult(), TestResult()) + + +class TestTextTestResultContract(TestCase, StartTestRunContract): + + run_test_with = FullStackRunTest + + def makeResult(self): + return TextTestResult(StringIO()) + + +class TestThreadSafeForwardingResultContract(TestCase, StartTestRunContract): + + run_test_with = FullStackRunTest + + def makeResult(self): + result_semaphore = threading.Semaphore(1) + target = TestResult() + return ThreadsafeForwardingResult(target, result_semaphore) + + +class TestExtendedTestResultContract(TestCase, StartTestRunContract): + + def makeResult(self): + return ExtendedTestResult() + + +class TestPython26TestResultContract(TestCase, Python26Contract): + + def makeResult(self): + return Python26TestResult() + + +class TestAdaptedPython26TestResultContract(TestCase, FallbackContract): + + def makeResult(self): + return ExtendedToOriginalDecorator(Python26TestResult()) + + +class TestPython27TestResultContract(TestCase, Python27Contract): + + def makeResult(self): + return Python27TestResult() + + +class TestAdaptedPython27TestResultContract(TestCase, DetailsContract): + + def makeResult(self): + return ExtendedToOriginalDecorator(Python27TestResult()) + + +class TestTestResult(TestCase): + """Tests for 'TestResult'.""" + + run_tests_with = FullStackRunTest + + def makeResult(self): + """Make an arbitrary result for testing.""" + return TestResult() + + def test_addSkipped(self): + # Calling addSkip on a TestResult records the test that was skipped in + # its skip_reasons dict. + result = self.makeResult() + result.addSkip(self, _u("Skipped for some reason")) + self.assertEqual({_u("Skipped for some reason"):[self]}, + result.skip_reasons) + result.addSkip(self, _u("Skipped for some reason")) + self.assertEqual({_u("Skipped for some reason"):[self, self]}, + result.skip_reasons) + result.addSkip(self, _u("Skipped for another reason")) + self.assertEqual({_u("Skipped for some reason"):[self, self], + _u("Skipped for another reason"):[self]}, + result.skip_reasons) + + def test_now_datetime_now(self): + result = self.makeResult() + olddatetime = testresult.real.datetime + def restore(): + testresult.real.datetime = olddatetime + self.addCleanup(restore) + class Module: + pass + now = datetime.datetime.now(utc) + stubdatetime = Module() + stubdatetime.datetime = Module() + stubdatetime.datetime.now = lambda tz: now + testresult.real.datetime = stubdatetime + # Calling _now() looks up the time. + self.assertEqual(now, result._now()) + then = now + datetime.timedelta(0, 1) + # Set an explicit datetime, which gets returned from then on. + result.time(then) + self.assertNotEqual(now, result._now()) + self.assertEqual(then, result._now()) + # go back to looking it up. + result.time(None) + self.assertEqual(now, result._now()) + + def test_now_datetime_time(self): + result = self.makeResult() + now = datetime.datetime.now(utc) + result.time(now) + self.assertEqual(now, result._now()) + + def test_traceback_formatting_without_stack_hidden(self): + # During the testtools test run, we show our levels of the stack, + # because we want to be able to use our test suite to debug our own + # code. + result = self.makeResult() + test = make_erroring_test() + test.run(result) + self.assertThat( + result.errors[0][1], + DocTestMatches( + 'Traceback (most recent call last):\n' + ' File "...testtools...runtest.py", line ..., in _run_user\n' + ' return fn(*args, **kwargs)\n' + ' File "...testtools...testcase.py", line ..., in _run_test_method\n' + ' return self._get_test_method()()\n' + ' File "...testtools...tests...test_testresult.py", line ..., in error\n' + ' 1/0\n' + 'ZeroDivisionError: ...\n', + doctest.ELLIPSIS | doctest.REPORT_UDIFF)) + + def test_traceback_formatting_with_stack_hidden(self): + result = self.makeResult() + test = make_erroring_test() + run_with_stack_hidden(True, test.run, result) + self.assertThat( + result.errors[0][1], + DocTestMatches( + 'Traceback (most recent call last):\n' + ' File "...testtools...tests...test_testresult.py", line ..., in error\n' + ' 1/0\n' + 'ZeroDivisionError: ...\n', + doctest.ELLIPSIS)) + + +class TestMultiTestResult(TestCase): + """Tests for 'MultiTestResult'.""" + + def setUp(self): + super(TestMultiTestResult, self).setUp() + self.result1 = LoggingResult([]) + self.result2 = LoggingResult([]) + self.multiResult = MultiTestResult(self.result1, self.result2) + + def assertResultLogsEqual(self, expectedEvents): + """Assert that our test results have received the expected events.""" + self.assertEqual(expectedEvents, self.result1._events) + self.assertEqual(expectedEvents, self.result2._events) + + def test_empty(self): + # Initializing a `MultiTestResult` doesn't do anything to its + # `TestResult`s. + self.assertResultLogsEqual([]) + + def test_startTest(self): + # Calling `startTest` on a `MultiTestResult` calls `startTest` on all + # its `TestResult`s. + self.multiResult.startTest(self) + self.assertResultLogsEqual([('startTest', self)]) + + def test_stopTest(self): + # Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all + # its `TestResult`s. + self.multiResult.stopTest(self) + self.assertResultLogsEqual([('stopTest', self)]) + + def test_addSkipped(self): + # Calling `addSkip` on a `MultiTestResult` calls addSkip on its + # results. + reason = _u("Skipped for some reason") + self.multiResult.addSkip(self, reason) + self.assertResultLogsEqual([('addSkip', self, reason)]) + + def test_addSuccess(self): + # Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on + # all its `TestResult`s. + self.multiResult.addSuccess(self) + self.assertResultLogsEqual([('addSuccess', self)]) + + def test_done(self): + # Calling `done` on a `MultiTestResult` calls `done` on all its + # `TestResult`s. + self.multiResult.done() + self.assertResultLogsEqual([('done')]) + + def test_addFailure(self): + # Calling `addFailure` on a `MultiTestResult` calls `addFailure` on + # all its `TestResult`s. + exc_info = make_exception_info(AssertionError, 'failure') + self.multiResult.addFailure(self, exc_info) + self.assertResultLogsEqual([('addFailure', self, exc_info)]) + + def test_addError(self): + # Calling `addError` on a `MultiTestResult` calls `addError` on all + # its `TestResult`s. + exc_info = make_exception_info(RuntimeError, 'error') + self.multiResult.addError(self, exc_info) + self.assertResultLogsEqual([('addError', self, exc_info)]) + + def test_startTestRun(self): + # Calling `startTestRun` on a `MultiTestResult` forwards to all its + # `TestResult`s. + self.multiResult.startTestRun() + self.assertResultLogsEqual([('startTestRun')]) + + def test_stopTestRun(self): + # Calling `stopTestRun` on a `MultiTestResult` forwards to all its + # `TestResult`s. + self.multiResult.stopTestRun() + self.assertResultLogsEqual([('stopTestRun')]) + + def test_stopTestRun_returns_results(self): + # `MultiTestResult.stopTestRun` returns a tuple of all of the return + # values the `stopTestRun`s that it forwards to. + class Result(LoggingResult): + def stopTestRun(self): + super(Result, self).stopTestRun() + return 'foo' + multi_result = MultiTestResult(Result([]), Result([])) + result = multi_result.stopTestRun() + self.assertEqual(('foo', 'foo'), result) + + def test_time(self): + # the time call is dispatched, not eaten by the base class + self.multiResult.time('foo') + self.assertResultLogsEqual([('time', 'foo')]) + + +class TestTextTestResult(TestCase): + """Tests for 'TextTestResult'.""" + + def setUp(self): + super(TestTextTestResult, self).setUp() + self.result = TextTestResult(StringIO()) + + def getvalue(self): + return self.result.stream.getvalue() + + def test__init_sets_stream(self): + result = TextTestResult("fp") + self.assertEqual("fp", result.stream) + + def reset_output(self): + self.result.stream = StringIO() + + def test_startTestRun(self): + self.result.startTestRun() + self.assertEqual("Tests running...\n", self.getvalue()) + + def test_stopTestRun_count_many(self): + test = make_test() + self.result.startTestRun() + self.result.startTest(test) + self.result.stopTest(test) + self.result.startTest(test) + self.result.stopTest(test) + self.result.stream = StringIO() + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("\nRan 2 tests in ...s\n...", doctest.ELLIPSIS)) + + def test_stopTestRun_count_single(self): + test = make_test() + self.result.startTestRun() + self.result.startTest(test) + self.result.stopTest(test) + self.reset_output() + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("\nRan 1 test in ...s\nOK\n", doctest.ELLIPSIS)) + + def test_stopTestRun_count_zero(self): + self.result.startTestRun() + self.reset_output() + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("\nRan 0 tests in ...s\nOK\n", doctest.ELLIPSIS)) + + def test_stopTestRun_current_time(self): + test = make_test() + now = datetime.datetime.now(utc) + self.result.time(now) + self.result.startTestRun() + self.result.startTest(test) + now = now + datetime.timedelta(0, 0, 0, 1) + self.result.time(now) + self.result.stopTest(test) + self.reset_output() + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS)) + + def test_stopTestRun_successful(self): + self.result.startTestRun() + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("...\nOK\n", doctest.ELLIPSIS)) + + def test_stopTestRun_not_successful_failure(self): + test = make_failing_test() + self.result.startTestRun() + test.run(self.result) + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS)) + + def test_stopTestRun_not_successful_error(self): + test = make_erroring_test() + self.result.startTestRun() + test.run(self.result) + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS)) + + def test_stopTestRun_not_successful_unexpected_success(self): + test = make_unexpectedly_successful_test() + self.result.startTestRun() + test.run(self.result) + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS)) + + def test_stopTestRun_shows_details(self): + def run_tests(): + self.result.startTestRun() + make_erroring_test().run(self.result) + make_unexpectedly_successful_test().run(self.result) + make_failing_test().run(self.result) + self.reset_output() + self.result.stopTestRun() + run_with_stack_hidden(True, run_tests) + self.assertThat(self.getvalue(), + DocTestMatches("""...====================================================================== +ERROR: testtools.tests.test_testresult.Test.error +---------------------------------------------------------------------- +Traceback (most recent call last): + File "...testtools...tests...test_testresult.py", line ..., in error + 1/0 +ZeroDivisionError:... divi... by zero... +====================================================================== +FAIL: testtools.tests.test_testresult.Test.failed +---------------------------------------------------------------------- +Traceback (most recent call last): + File "...testtools...tests...test_testresult.py", line ..., in failed + self.fail("yo!") +AssertionError: yo! +====================================================================== +UNEXPECTED SUCCESS: testtools.tests.test_testresult.Test.succeeded +---------------------------------------------------------------------- +...""", doctest.ELLIPSIS | doctest.REPORT_NDIFF)) + + +class TestThreadSafeForwardingResult(TestCase): + """Tests for `TestThreadSafeForwardingResult`.""" + + def setUp(self): + super(TestThreadSafeForwardingResult, self).setUp() + self.result_semaphore = threading.Semaphore(1) + self.target = LoggingResult([]) + self.result1 = ThreadsafeForwardingResult(self.target, + self.result_semaphore) + + def test_nonforwarding_methods(self): + # startTest and stopTest are not forwarded because they need to be + # batched. + self.result1.startTest(self) + self.result1.stopTest(self) + self.assertEqual([], self.target._events) + + def test_startTestRun(self): + self.result1.startTestRun() + self.result2 = ThreadsafeForwardingResult(self.target, + self.result_semaphore) + self.result2.startTestRun() + self.assertEqual(["startTestRun", "startTestRun"], self.target._events) + + def test_stopTestRun(self): + self.result1.stopTestRun() + self.result2 = ThreadsafeForwardingResult(self.target, + self.result_semaphore) + self.result2.stopTestRun() + self.assertEqual(["stopTestRun", "stopTestRun"], self.target._events) + + def test_forwarding_methods(self): + # error, failure, skip and success are forwarded in batches. + exc_info1 = make_exception_info(RuntimeError, 'error') + starttime1 = datetime.datetime.utcfromtimestamp(1.489) + endtime1 = datetime.datetime.utcfromtimestamp(51.476) + self.result1.time(starttime1) + self.result1.startTest(self) + self.result1.time(endtime1) + self.result1.addError(self, exc_info1) + exc_info2 = make_exception_info(AssertionError, 'failure') + starttime2 = datetime.datetime.utcfromtimestamp(2.489) + endtime2 = datetime.datetime.utcfromtimestamp(3.476) + self.result1.time(starttime2) + self.result1.startTest(self) + self.result1.time(endtime2) + self.result1.addFailure(self, exc_info2) + reason = _u("Skipped for some reason") + starttime3 = datetime.datetime.utcfromtimestamp(4.489) + endtime3 = datetime.datetime.utcfromtimestamp(5.476) + self.result1.time(starttime3) + self.result1.startTest(self) + self.result1.time(endtime3) + self.result1.addSkip(self, reason) + starttime4 = datetime.datetime.utcfromtimestamp(6.489) + endtime4 = datetime.datetime.utcfromtimestamp(7.476) + self.result1.time(starttime4) + self.result1.startTest(self) + self.result1.time(endtime4) + self.result1.addSuccess(self) + self.assertEqual([ + ('time', starttime1), + ('startTest', self), + ('time', endtime1), + ('addError', self, exc_info1), + ('stopTest', self), + ('time', starttime2), + ('startTest', self), + ('time', endtime2), + ('addFailure', self, exc_info2), + ('stopTest', self), + ('time', starttime3), + ('startTest', self), + ('time', endtime3), + ('addSkip', self, reason), + ('stopTest', self), + ('time', starttime4), + ('startTest', self), + ('time', endtime4), + ('addSuccess', self), + ('stopTest', self), + ], self.target._events) + + +class TestExtendedToOriginalResultDecoratorBase(TestCase): + + def make_26_result(self): + self.result = Python26TestResult() + self.make_converter() + + def make_27_result(self): + self.result = Python27TestResult() + self.make_converter() + + def make_converter(self): + self.converter = ExtendedToOriginalDecorator(self.result) + + def make_extended_result(self): + self.result = ExtendedTestResult() + self.make_converter() + + def check_outcome_details(self, outcome): + """Call an outcome with a details dict to be passed through.""" + # This dict is /not/ convertible - thats deliberate, as it should + # not hit the conversion code path. + details = {'foo': 'bar'} + getattr(self.converter, outcome)(self, details=details) + self.assertEqual([(outcome, self, details)], self.result._events) + + def get_details_and_string(self): + """Get a details dict and expected string.""" + text1 = lambda: [_b("1\n2\n")] + text2 = lambda: [_b("3\n4\n")] + bin1 = lambda: [_b("5\n")] + details = {'text 1': Content(ContentType('text', 'plain'), text1), + 'text 2': Content(ContentType('text', 'strange'), text2), + 'bin 1': Content(ContentType('application', 'binary'), bin1)} + return (details, + ("Binary content:\n" + " bin 1 (application/binary)\n" + "\n" + "text 1: {{{\n" + "1\n" + "2\n" + "}}}\n" + "\n" + "text 2: {{{\n" + "3\n" + "4\n" + "}}}\n")) + + def check_outcome_details_to_exec_info(self, outcome, expected=None): + """Call an outcome with a details dict to be made into exc_info.""" + # The conversion is a done using RemoteError and the string contents + # of the text types in the details dict. + if not expected: + expected = outcome + details, err_str = self.get_details_and_string() + getattr(self.converter, outcome)(self, details=details) + err = self.converter._details_to_exc_info(details) + self.assertEqual([(expected, self, err)], self.result._events) + + def check_outcome_details_to_nothing(self, outcome, expected=None): + """Call an outcome with a details dict to be swallowed.""" + if not expected: + expected = outcome + details = {'foo': 'bar'} + getattr(self.converter, outcome)(self, details=details) + self.assertEqual([(expected, self)], self.result._events) + + def check_outcome_details_to_string(self, outcome): + """Call an outcome with a details dict to be stringified.""" + details, err_str = self.get_details_and_string() + getattr(self.converter, outcome)(self, details=details) + self.assertEqual([(outcome, self, err_str)], self.result._events) + + def check_outcome_details_to_arg(self, outcome, arg, extra_detail=None): + """Call an outcome with a details dict to have an arg extracted.""" + details, _ = self.get_details_and_string() + if extra_detail: + details.update(extra_detail) + getattr(self.converter, outcome)(self, details=details) + self.assertEqual([(outcome, self, arg)], self.result._events) + + def check_outcome_exc_info(self, outcome, expected=None): + """Check that calling a legacy outcome still works.""" + # calling some outcome with the legacy exc_info style api (no keyword + # parameters) gets passed through. + if not expected: + expected = outcome + err = sys.exc_info() + getattr(self.converter, outcome)(self, err) + self.assertEqual([(expected, self, err)], self.result._events) + + def check_outcome_exc_info_to_nothing(self, outcome, expected=None): + """Check that calling a legacy outcome on a fallback works.""" + # calling some outcome with the legacy exc_info style api (no keyword + # parameters) gets passed through. + if not expected: + expected = outcome + err = sys.exc_info() + getattr(self.converter, outcome)(self, err) + self.assertEqual([(expected, self)], self.result._events) + + def check_outcome_nothing(self, outcome, expected=None): + """Check that calling a legacy outcome still works.""" + if not expected: + expected = outcome + getattr(self.converter, outcome)(self) + self.assertEqual([(expected, self)], self.result._events) + + def check_outcome_string_nothing(self, outcome, expected): + """Check that calling outcome with a string calls expected.""" + getattr(self.converter, outcome)(self, "foo") + self.assertEqual([(expected, self)], self.result._events) + + def check_outcome_string(self, outcome): + """Check that calling outcome with a string works.""" + getattr(self.converter, outcome)(self, "foo") + self.assertEqual([(outcome, self, "foo")], self.result._events) + + +class TestExtendedToOriginalResultDecorator( + TestExtendedToOriginalResultDecoratorBase): + + def test_progress_py26(self): + self.make_26_result() + self.converter.progress(1, 2) + + def test_progress_py27(self): + self.make_27_result() + self.converter.progress(1, 2) + + def test_progress_pyextended(self): + self.make_extended_result() + self.converter.progress(1, 2) + self.assertEqual([('progress', 1, 2)], self.result._events) + + def test_shouldStop(self): + self.make_26_result() + self.assertEqual(False, self.converter.shouldStop) + self.converter.decorated.stop() + self.assertEqual(True, self.converter.shouldStop) + + def test_startTest_py26(self): + self.make_26_result() + self.converter.startTest(self) + self.assertEqual([('startTest', self)], self.result._events) + + def test_startTest_py27(self): + self.make_27_result() + self.converter.startTest(self) + self.assertEqual([('startTest', self)], self.result._events) + + def test_startTest_pyextended(self): + self.make_extended_result() + self.converter.startTest(self) + self.assertEqual([('startTest', self)], self.result._events) + + def test_startTestRun_py26(self): + self.make_26_result() + self.converter.startTestRun() + self.assertEqual([], self.result._events) + + def test_startTestRun_py27(self): + self.make_27_result() + self.converter.startTestRun() + self.assertEqual([('startTestRun',)], self.result._events) + + def test_startTestRun_pyextended(self): + self.make_extended_result() + self.converter.startTestRun() + self.assertEqual([('startTestRun',)], self.result._events) + + def test_stopTest_py26(self): + self.make_26_result() + self.converter.stopTest(self) + self.assertEqual([('stopTest', self)], self.result._events) + + def test_stopTest_py27(self): + self.make_27_result() + self.converter.stopTest(self) + self.assertEqual([('stopTest', self)], self.result._events) + + def test_stopTest_pyextended(self): + self.make_extended_result() + self.converter.stopTest(self) + self.assertEqual([('stopTest', self)], self.result._events) + + def test_stopTestRun_py26(self): + self.make_26_result() + self.converter.stopTestRun() + self.assertEqual([], self.result._events) + + def test_stopTestRun_py27(self): + self.make_27_result() + self.converter.stopTestRun() + self.assertEqual([('stopTestRun',)], self.result._events) + + def test_stopTestRun_pyextended(self): + self.make_extended_result() + self.converter.stopTestRun() + self.assertEqual([('stopTestRun',)], self.result._events) + + def test_tags_py26(self): + self.make_26_result() + self.converter.tags(1, 2) + + def test_tags_py27(self): + self.make_27_result() + self.converter.tags(1, 2) + + def test_tags_pyextended(self): + self.make_extended_result() + self.converter.tags(1, 2) + self.assertEqual([('tags', 1, 2)], self.result._events) + + def test_time_py26(self): + self.make_26_result() + self.converter.time(1) + + def test_time_py27(self): + self.make_27_result() + self.converter.time(1) + + def test_time_pyextended(self): + self.make_extended_result() + self.converter.time(1) + self.assertEqual([('time', 1)], self.result._events) + + +class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase): + + outcome = 'addError' + + def test_outcome_Original_py26(self): + self.make_26_result() + self.check_outcome_exc_info(self.outcome) + + def test_outcome_Original_py27(self): + self.make_27_result() + self.check_outcome_exc_info(self.outcome) + + def test_outcome_Original_pyextended(self): + self.make_extended_result() + self.check_outcome_exc_info(self.outcome) + + def test_outcome_Extended_py26(self): + self.make_26_result() + self.check_outcome_details_to_exec_info(self.outcome) + + def test_outcome_Extended_py27(self): + self.make_27_result() + self.check_outcome_details_to_exec_info(self.outcome) + + def test_outcome_Extended_pyextended(self): + self.make_extended_result() + self.check_outcome_details(self.outcome) + + def test_outcome__no_details(self): + self.make_extended_result() + self.assertThat( + lambda: getattr(self.converter, self.outcome)(self), + Raises(MatchesException(ValueError))) + + +class TestExtendedToOriginalAddFailure( + TestExtendedToOriginalAddError): + + outcome = 'addFailure' + + +class TestExtendedToOriginalAddExpectedFailure( + TestExtendedToOriginalAddError): + + outcome = 'addExpectedFailure' + + def test_outcome_Original_py26(self): + self.make_26_result() + self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess') + + def test_outcome_Extended_py26(self): + self.make_26_result() + self.check_outcome_details_to_nothing(self.outcome, 'addSuccess') + + + +class TestExtendedToOriginalAddSkip( + TestExtendedToOriginalResultDecoratorBase): + + outcome = 'addSkip' + + def test_outcome_Original_py26(self): + self.make_26_result() + self.check_outcome_string_nothing(self.outcome, 'addSuccess') + + def test_outcome_Original_py27(self): + self.make_27_result() + self.check_outcome_string(self.outcome) + + def test_outcome_Original_pyextended(self): + self.make_extended_result() + self.check_outcome_string(self.outcome) + + def test_outcome_Extended_py26(self): + self.make_26_result() + self.check_outcome_string_nothing(self.outcome, 'addSuccess') + + def test_outcome_Extended_py27_no_reason(self): + self.make_27_result() + self.check_outcome_details_to_string(self.outcome) + + def test_outcome_Extended_py27_reason(self): + self.make_27_result() + self.check_outcome_details_to_arg(self.outcome, 'foo', + {'reason': Content(UTF8_TEXT, lambda:[_b('foo')])}) + + def test_outcome_Extended_pyextended(self): + self.make_extended_result() + self.check_outcome_details(self.outcome) + + def test_outcome__no_details(self): + self.make_extended_result() + self.assertThat( + lambda: getattr(self.converter, self.outcome)(self), + Raises(MatchesException(ValueError))) + + +class TestExtendedToOriginalAddSuccess( + TestExtendedToOriginalResultDecoratorBase): + + outcome = 'addSuccess' + expected = 'addSuccess' + + def test_outcome_Original_py26(self): + self.make_26_result() + self.check_outcome_nothing(self.outcome, self.expected) + + def test_outcome_Original_py27(self): + self.make_27_result() + self.check_outcome_nothing(self.outcome) + + def test_outcome_Original_pyextended(self): + self.make_extended_result() + self.check_outcome_nothing(self.outcome) + + def test_outcome_Extended_py26(self): + self.make_26_result() + self.check_outcome_details_to_nothing(self.outcome, self.expected) + + def test_outcome_Extended_py27(self): + self.make_27_result() + self.check_outcome_details_to_nothing(self.outcome) + + def test_outcome_Extended_pyextended(self): + self.make_extended_result() + self.check_outcome_details(self.outcome) + + +class TestExtendedToOriginalAddUnexpectedSuccess( + TestExtendedToOriginalResultDecoratorBase): + + outcome = 'addUnexpectedSuccess' + expected = 'addFailure' + + def test_outcome_Original_py26(self): + self.make_26_result() + getattr(self.converter, self.outcome)(self) + [event] = self.result._events + self.assertEqual((self.expected, self), event[:2]) + + def test_outcome_Original_py27(self): + self.make_27_result() + self.check_outcome_nothing(self.outcome) + + def test_outcome_Original_pyextended(self): + self.make_extended_result() + self.check_outcome_nothing(self.outcome) + + def test_outcome_Extended_py26(self): + self.make_26_result() + getattr(self.converter, self.outcome)(self) + [event] = self.result._events + self.assertEqual((self.expected, self), event[:2]) + + def test_outcome_Extended_py27(self): + self.make_27_result() + self.check_outcome_details_to_nothing(self.outcome) + + def test_outcome_Extended_pyextended(self): + self.make_extended_result() + self.check_outcome_details(self.outcome) + + +class TestExtendedToOriginalResultOtherAttributes( + TestExtendedToOriginalResultDecoratorBase): + + def test_other_attribute(self): + class OtherExtendedResult: + def foo(self): + return 2 + bar = 1 + self.result = OtherExtendedResult() + self.make_converter() + self.assertEqual(1, self.converter.bar) + self.assertEqual(2, self.converter.foo()) + + +class TestNonAsciiResults(TestCase): + """Test all kinds of tracebacks are cleanly interpreted as unicode + + Currently only uses weak "contains" assertions, would be good to be much + stricter about the expected output. This would add a few failures for the + current release of IronPython for instance, which gets some traceback + lines muddled. + """ + + _sample_texts = ( + _u("pa\u026a\u03b8\u0259n"), # Unicode encodings only + _u("\u5357\u7121"), # In ISO 2022 encodings + _u("\xa7\xa7\xa7"), # In ISO 8859 encodings + ) + # Everything but Jython shows syntax errors on the current character + _error_on_character = os.name != "java" + + def _run(self, stream, test): + """Run the test, the same as in testtools.run but not to stdout""" + result = TextTestResult(stream) + result.startTestRun() + try: + return test.run(result) + finally: + result.stopTestRun() + + def _write_module(self, name, encoding, contents): + """Create Python module on disk with contents in given encoding""" + try: + # Need to pre-check that the coding is valid or codecs.open drops + # the file without closing it which breaks non-refcounted pythons + codecs.lookup(encoding) + except LookupError: + self.skip("Encoding unsupported by implementation: %r" % encoding) + f = codecs.open(os.path.join(self.dir, name + ".py"), "w", encoding) + try: + f.write(contents) + finally: + f.close() + + def _test_external_case(self, testline, coding="ascii", modulelevel="", + suffix=""): + """Create and run a test case in a seperate module""" + self._setup_external_case(testline, coding, modulelevel, suffix) + return self._run_external_case() + + def _setup_external_case(self, testline, coding="ascii", modulelevel="", + suffix=""): + """Create a test case in a seperate module""" + _, prefix, self.modname = self.id().rsplit(".", 2) + self.dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix) + self.addCleanup(shutil.rmtree, self.dir) + self._write_module(self.modname, coding, + # Older Python 2 versions don't see a coding declaration in a + # docstring so it has to be in a comment, but then we can't + # workaround bug: <http://ironpython.codeplex.com/workitem/26940> + "# coding: %s\n" + "import testtools\n" + "%s\n" + "class Test(testtools.TestCase):\n" + " def runTest(self):\n" + " %s\n" % (coding, modulelevel, testline)) + + def _run_external_case(self): + """Run the prepared test case in a seperate module""" + sys.path.insert(0, self.dir) + self.addCleanup(sys.path.remove, self.dir) + module = __import__(self.modname) + self.addCleanup(sys.modules.pop, self.modname) + stream = StringIO() + self._run(stream, module.Test()) + return stream.getvalue() + + def _silence_deprecation_warnings(self): + """Shut up DeprecationWarning for this test only""" + warnings.simplefilter("ignore", DeprecationWarning) + self.addCleanup(warnings.filters.remove, warnings.filters[0]) + + def _get_sample_text(self, encoding="unicode_internal"): + if encoding is None and str_is_unicode: + encoding = "unicode_internal" + for u in self._sample_texts: + try: + b = u.encode(encoding) + if u == b.decode(encoding): + if str_is_unicode: + return u, u + return u, b + except (LookupError, UnicodeError): + pass + self.skip("Could not find a sample text for encoding: %r" % encoding) + + def _as_output(self, text): + return text + + def test_non_ascii_failure_string(self): + """Assertion contents can be non-ascii and should get decoded""" + text, raw = self._get_sample_text(_get_exception_encoding()) + textoutput = self._test_external_case("self.fail(%s)" % _r(raw)) + self.assertIn(self._as_output(text), textoutput) + + def test_non_ascii_failure_string_via_exec(self): + """Assertion via exec can be non-ascii and still gets decoded""" + text, raw = self._get_sample_text(_get_exception_encoding()) + textoutput = self._test_external_case( + testline='exec ("self.fail(%s)")' % _r(raw)) + self.assertIn(self._as_output(text), textoutput) + + def test_control_characters_in_failure_string(self): + """Control characters in assertions should be escaped""" + textoutput = self._test_external_case("self.fail('\\a\\a\\a')") + self.expectFailure("Defense against the beeping horror unimplemented", + self.assertNotIn, self._as_output("\a\a\a"), textoutput) + self.assertIn(self._as_output(_u("\uFFFD\uFFFD\uFFFD")), textoutput) + + def test_os_error(self): + """Locale error messages from the OS shouldn't break anything""" + textoutput = self._test_external_case( + modulelevel="import os", + testline="os.mkdir('/')") + if os.name != "nt" or sys.version_info < (2, 5): + self.assertIn(self._as_output("OSError: "), textoutput) + else: + self.assertIn(self._as_output("WindowsError: "), textoutput) + + def test_assertion_text_shift_jis(self): + """A terminal raw backslash in an encoded string is weird but fine""" + example_text = _u("\u5341") + textoutput = self._test_external_case( + coding="shift_jis", + testline="self.fail('%s')" % example_text) + if str_is_unicode: + output_text = example_text + else: + output_text = example_text.encode("shift_jis").decode( + _get_exception_encoding(), "replace") + self.assertIn(self._as_output("AssertionError: %s" % output_text), + textoutput) + + def test_file_comment_iso2022_jp(self): + """Control character escapes must be preserved if valid encoding""" + example_text, _ = self._get_sample_text("iso2022_jp") + textoutput = self._test_external_case( + coding="iso2022_jp", + testline="self.fail('Simple') # %s" % example_text) + self.assertIn(self._as_output(example_text), textoutput) + + def test_unicode_exception(self): + """Exceptions that can be formated losslessly as unicode should be""" + example_text, _ = self._get_sample_text() + exception_class = ( + "class FancyError(Exception):\n" + # A __unicode__ method does nothing on py3k but the default works + " def __unicode__(self):\n" + " return self.args[0]\n") + textoutput = self._test_external_case( + modulelevel=exception_class, + testline="raise FancyError(%s)" % _r(example_text)) + self.assertIn(self._as_output(example_text), textoutput) + + def test_unprintable_exception(self): + """A totally useless exception instance still prints something""" + exception_class = ( + "class UnprintableError(Exception):\n" + " def __str__(self):\n" + " raise RuntimeError\n" + " def __unicode__(self):\n" + " raise RuntimeError\n" + " def __repr__(self):\n" + " raise RuntimeError\n") + textoutput = self._test_external_case( + modulelevel=exception_class, + testline="raise UnprintableError") + self.assertIn(self._as_output( + "UnprintableError: <unprintable UnprintableError object>\n"), + textoutput) + + def test_string_exception(self): + """Raise a string rather than an exception instance if supported""" + if sys.version_info > (2, 6): + self.skip("No string exceptions in Python 2.6 or later") + elif sys.version_info > (2, 5): + self._silence_deprecation_warnings() + textoutput = self._test_external_case(testline="raise 'plain str'") + self.assertIn(self._as_output("\nplain str\n"), textoutput) + + def test_non_ascii_dirname(self): + """Script paths in the traceback can be non-ascii""" + text, raw = self._get_sample_text(sys.getfilesystemencoding()) + textoutput = self._test_external_case( + # Avoid bug in Python 3 by giving a unicode source encoding rather + # than just ascii which raises a SyntaxError with no other details + coding="utf-8", + testline="self.fail('Simple')", + suffix=raw) + self.assertIn(self._as_output(text), textoutput) + + def test_syntax_error(self): + """Syntax errors should still have fancy special-case formatting""" + textoutput = self._test_external_case("exec ('f(a, b c)')") + self.assertIn(self._as_output( + ' File "<string>", line 1\n' + ' f(a, b c)\n' + + ' ' * self._error_on_character + + ' ^\n' + 'SyntaxError: ' + ), textoutput) + + def test_syntax_error_malformed(self): + """Syntax errors with bogus parameters should break anything""" + textoutput = self._test_external_case("raise SyntaxError(3, 2, 1)") + self.assertIn(self._as_output("\nSyntaxError: "), textoutput) + + def test_syntax_error_import_binary(self): + """Importing a binary file shouldn't break SyntaxError formatting""" + if sys.version_info < (2, 5): + # Python 2.4 assumes the file is latin-1 and tells you off + self._silence_deprecation_warnings() + self._setup_external_case("import bad") + f = open(os.path.join(self.dir, "bad.py"), "wb") + try: + f.write(_b("x\x9c\xcb*\xcd\xcb\x06\x00\x04R\x01\xb9")) + finally: + f.close() + textoutput = self._run_external_case() + self.assertIn(self._as_output("\nSyntaxError: "), textoutput) + + def test_syntax_error_line_iso_8859_1(self): + """Syntax error on a latin-1 line shows the line decoded""" + text, raw = self._get_sample_text("iso-8859-1") + textoutput = self._setup_external_case("import bad") + self._write_module("bad", "iso-8859-1", + "# coding: iso-8859-1\n! = 0 # %s\n" % text) + textoutput = self._run_external_case() + self.assertIn(self._as_output(_u( + #'bad.py", line 2\n' + ' ! = 0 # %s\n' + ' ^\n' + 'SyntaxError: ') % + (text,)), textoutput) + + def test_syntax_error_line_iso_8859_5(self): + """Syntax error on a iso-8859-5 line shows the line decoded""" + text, raw = self._get_sample_text("iso-8859-5") + textoutput = self._setup_external_case("import bad") + self._write_module("bad", "iso-8859-5", + "# coding: iso-8859-5\n%% = 0 # %s\n" % text) + textoutput = self._run_external_case() + self.assertIn(self._as_output(_u( + #'bad.py", line 2\n' + ' %% = 0 # %s\n' + + ' ' * self._error_on_character + + ' ^\n' + 'SyntaxError: ') % + (text,)), textoutput) + + def test_syntax_error_line_euc_jp(self): + """Syntax error on a euc_jp line shows the line decoded""" + text, raw = self._get_sample_text("euc_jp") + textoutput = self._setup_external_case("import bad") + self._write_module("bad", "euc_jp", + "# coding: euc_jp\n$ = 0 # %s\n" % text) + textoutput = self._run_external_case() + self.assertIn(self._as_output(_u( + #'bad.py", line 2\n' + ' $ = 0 # %s\n' + + ' ' * self._error_on_character + + ' ^\n' + 'SyntaxError: ') % + (text,)), textoutput) + + def test_syntax_error_line_utf_8(self): + """Syntax error on a utf-8 line shows the line decoded""" + text, raw = self._get_sample_text("utf-8") + textoutput = self._setup_external_case("import bad") + self._write_module("bad", "utf-8", _u("\ufeff^ = 0 # %s\n") % text) + textoutput = self._run_external_case() + self.assertIn(self._as_output(_u( + 'bad.py", line 1\n' + ' ^ = 0 # %s\n' + + ' ' * self._error_on_character + + ' ^\n' + 'SyntaxError: ') % + text), textoutput) + + +class TestNonAsciiResultsWithUnittest(TestNonAsciiResults): + """Test that running under unittest produces clean ascii strings""" + + def _run(self, stream, test): + from unittest import TextTestRunner as _Runner + return _Runner(stream).run(test) + + def _as_output(self, text): + if str_is_unicode: + return text + return text.encode("utf-8") + + +class TestDetailsToStr(TestCase): + + def test_no_details(self): + string = _details_to_str({}) + self.assertThat(string, Equals('')) + + def test_binary_content(self): + content = content_from_stream( + StringIO('foo'), content_type=ContentType('image', 'jpeg')) + string = _details_to_str({'attachment': content}) + self.assertThat( + string, Equals("""\ +Binary content: + attachment (image/jpeg) +""")) + + def test_single_line_content(self): + content = text_content('foo') + string = _details_to_str({'attachment': content}) + self.assertThat(string, Equals('attachment: {{{foo}}}\n')) + + def test_multi_line_text_content(self): + content = text_content('foo\nbar\nbaz') + string = _details_to_str({'attachment': content}) + self.assertThat(string, Equals('attachment: {{{\nfoo\nbar\nbaz\n}}}\n')) + + def test_special_text_content(self): + content = text_content('foo') + string = _details_to_str({'attachment': content}, special='attachment') + self.assertThat(string, Equals('foo\n')) + + def test_multiple_text_content(self): + string = _details_to_str( + {'attachment': text_content('foo\nfoo'), + 'attachment-1': text_content('bar\nbar')}) + self.assertThat( + string, Equals('attachment: {{{\n' + 'foo\n' + 'foo\n' + '}}}\n' + '\n' + 'attachment-1: {{{\n' + 'bar\n' + 'bar\n' + '}}}\n')) + + def test_empty_attachment(self): + string = _details_to_str({'attachment': text_content('')}) + self.assertThat( + string, Equals("""\ +Empty attachments: + attachment +""")) + + def test_lots_of_different_attachments(self): + jpg = lambda x: content_from_stream( + StringIO(x), ContentType('image', 'jpeg')) + attachments = { + 'attachment': text_content('foo'), + 'attachment-1': text_content('traceback'), + 'attachment-2': jpg('pic1'), + 'attachment-3': text_content('bar'), + 'attachment-4': text_content(''), + 'attachment-5': jpg('pic2'), + } + string = _details_to_str(attachments, special='attachment-1') + self.assertThat( + string, Equals("""\ +Binary content: + attachment-2 (image/jpeg) + attachment-5 (image/jpeg) +Empty attachments: + attachment-4 + +attachment: {{{foo}}} +attachment-3: {{{bar}}} + +traceback +""")) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_testsuite.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_testsuite.py new file mode 100644 index 00000000000..05647577cdb --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_testsuite.py @@ -0,0 +1,75 @@ +# Copyright (c) 2009-2011 testtools developers. See LICENSE for details. + +"""Test ConcurrentTestSuite and related things.""" + +__metaclass__ = type + +import unittest + +from testtools import ( + ConcurrentTestSuite, + iterate_tests, + TestCase, + ) +from testtools.helpers import try_import +from testtools.testsuite import FixtureSuite +from testtools.tests.helpers import LoggingResult + +FunctionFixture = try_import('fixtures.FunctionFixture') + + +class TestConcurrentTestSuiteRun(TestCase): + + def test_trivial(self): + log = [] + result = LoggingResult(log) + class Sample(TestCase): + def __hash__(self): + return id(self) + + def test_method1(self): + pass + def test_method2(self): + pass + test1 = Sample('test_method1') + test2 = Sample('test_method2') + original_suite = unittest.TestSuite([test1, test2]) + suite = ConcurrentTestSuite(original_suite, self.split_suite) + suite.run(result) + # 0 is the timestamp for the first test starting. + test1 = log[1][1] + test2 = log[-1][1] + self.assertIsInstance(test1, Sample) + self.assertIsInstance(test2, Sample) + self.assertNotEqual(test1.id(), test2.id()) + + def split_suite(self, suite): + tests = list(iterate_tests(suite)) + return tests[0], tests[1] + + +class TestFixtureSuite(TestCase): + + def setUp(self): + super(TestFixtureSuite, self).setUp() + if FunctionFixture is None: + self.skip("Need fixtures") + + def test_fixture_suite(self): + log = [] + class Sample(TestCase): + def test_one(self): + log.append(1) + def test_two(self): + log.append(2) + fixture = FunctionFixture( + lambda: log.append('setUp'), + lambda fixture: log.append('tearDown')) + suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_two')]) + suite.run(LoggingResult([])) + self.assertEqual(['setUp', 1, 2, 'tearDown'], log) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_with_with.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_with_with.py new file mode 100644 index 00000000000..e06adeb1816 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/tests/test_with_with.py @@ -0,0 +1,73 @@ +# Copyright (c) 2011 testtools developers. See LICENSE for details. + +from __future__ import with_statement + +import sys + +from testtools import ( + ExpectedException, + TestCase, + ) +from testtools.matchers import ( + AfterPreprocessing, + Equals, + ) + + +class TestExpectedException(TestCase): + """Test the ExpectedException context manager.""" + + def test_pass_on_raise(self): + with ExpectedException(ValueError, 'tes.'): + raise ValueError('test') + + def test_pass_on_raise_matcher(self): + with ExpectedException( + ValueError, AfterPreprocessing(str, Equals('test'))): + raise ValueError('test') + + def test_raise_on_text_mismatch(self): + try: + with ExpectedException(ValueError, 'tes.'): + raise ValueError('mismatch') + except AssertionError: + e = sys.exc_info()[1] + self.assertEqual("'mismatch' does not match /tes./", str(e)) + else: + self.fail('AssertionError not raised.') + + def test_raise_on_general_mismatch(self): + matcher = AfterPreprocessing(str, Equals('test')) + value_error = ValueError('mismatch') + try: + with ExpectedException(ValueError, matcher): + raise value_error + except AssertionError: + e = sys.exc_info()[1] + self.assertEqual(matcher.match(value_error).describe(), str(e)) + else: + self.fail('AssertionError not raised.') + + def test_raise_on_error_mismatch(self): + try: + with ExpectedException(TypeError, 'tes.'): + raise ValueError('mismatch') + except ValueError: + e = sys.exc_info()[1] + self.assertEqual('mismatch', str(e)) + else: + self.fail('ValueError not raised.') + + def test_raise_if_no_exception(self): + try: + with ExpectedException(TypeError, 'tes.'): + pass + except AssertionError: + e = sys.exc_info()[1] + self.assertEqual('TypeError not raised.', str(e)) + else: + self.fail('AssertionError not raised.') + + def test_pass_on_raise_any_message(self): + with ExpectedException(ValueError): + raise ValueError('whatever') diff --git a/test/3rdparty/testtools-0.9.12/testtools/testsuite.py b/test/3rdparty/testtools-0.9.12/testtools/testsuite.py new file mode 100644 index 00000000000..18de8b89e12 --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/testsuite.py @@ -0,0 +1,101 @@ +# Copyright (c) 2009-2011 testtools developers. See LICENSE for details. + +"""Test suites and related things.""" + +__metaclass__ = type +__all__ = [ + 'ConcurrentTestSuite', + 'iterate_tests', + ] + +from testtools.helpers import try_imports + +Queue = try_imports(['Queue.Queue', 'queue.Queue']) + +import threading +import unittest + +import testtools + + +def iterate_tests(test_suite_or_case): + """Iterate through all of the test cases in 'test_suite_or_case'.""" + try: + suite = iter(test_suite_or_case) + except TypeError: + yield test_suite_or_case + else: + for test in suite: + for subtest in iterate_tests(test): + yield subtest + + +class ConcurrentTestSuite(unittest.TestSuite): + """A TestSuite whose run() calls out to a concurrency strategy.""" + + def __init__(self, suite, make_tests): + """Create a ConcurrentTestSuite to execute suite. + + :param suite: A suite to run concurrently. + :param make_tests: A helper function to split the tests in the + ConcurrentTestSuite into some number of concurrently executing + sub-suites. make_tests must take a suite, and return an iterable + of TestCase-like object, each of which must have a run(result) + method. + """ + super(ConcurrentTestSuite, self).__init__([suite]) + self.make_tests = make_tests + + def run(self, result): + """Run the tests concurrently. + + This calls out to the provided make_tests helper, and then serialises + the results so that result only sees activity from one TestCase at + a time. + + ConcurrentTestSuite provides no special mechanism to stop the tests + returned by make_tests, it is up to the make_tests to honour the + shouldStop attribute on the result object they are run with, which will + be set if an exception is raised in the thread which + ConcurrentTestSuite.run is called in. + """ + tests = self.make_tests(self) + try: + threads = {} + queue = Queue() + result_semaphore = threading.Semaphore(1) + for test in tests: + process_result = testtools.ThreadsafeForwardingResult(result, + result_semaphore) + reader_thread = threading.Thread( + target=self._run_test, args=(test, process_result, queue)) + threads[test] = reader_thread, process_result + reader_thread.start() + while threads: + finished_test = queue.get() + threads[finished_test][0].join() + del threads[finished_test] + except: + for thread, process_result in threads.values(): + process_result.stop() + raise + + def _run_test(self, test, process_result, queue): + try: + test.run(process_result) + finally: + queue.put(test) + + +class FixtureSuite(unittest.TestSuite): + + def __init__(self, fixture, tests): + super(FixtureSuite, self).__init__(tests) + self._fixture = fixture + + def run(self, result): + self._fixture.setUp() + try: + super(FixtureSuite, self).run(result) + finally: + self._fixture.cleanUp() diff --git a/test/3rdparty/testtools-0.9.12/testtools/utils.py b/test/3rdparty/testtools-0.9.12/testtools/utils.py new file mode 100644 index 00000000000..0f39d8f5b6e --- /dev/null +++ b/test/3rdparty/testtools-0.9.12/testtools/utils.py @@ -0,0 +1,13 @@ +# Copyright (c) 2008-2010 testtools developers. See LICENSE for details. + +"""Utilities for dealing with stuff in unittest. + +Legacy - deprecated - use testtools.testsuite.iterate_tests +""" + +import warnings +warnings.warn("Please import iterate_tests from testtools.testsuite - " + "testtools.utils is deprecated.", DeprecationWarning, stacklevel=2) + +from testtools.testsuite import iterate_tests + |