diff options
Diffstat (limited to 'src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools')
57 files changed, 17637 insertions, 0 deletions
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/__init__.py new file mode 100644 index 00000000000..62caae8545a --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/__init__.py @@ -0,0 +1,125 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +"""Extensions to the standard Python unittest library.""" + +__all__ = [ + 'clone_test_with_new_id', + 'CopyStreamResult', + 'ConcurrentTestSuite', + 'ConcurrentStreamTestSuite', + 'DecorateTestCaseResult', + 'ErrorHolder', + 'ExpectedException', + 'ExtendedToOriginalDecorator', + 'ExtendedToStreamDecorator', + 'FixtureSuite', + 'iterate_tests', + 'MultipleExceptions', + 'MultiTestResult', + 'PlaceHolder', + 'run_test_with', + 'Tagger', + 'TestCase', + 'TestCommand', + 'TestByTestResult', + 'TestResult', + 'TestResultDecorator', + 'TextTestResult', + 'RunTest', + 'skip', + 'skipIf', + 'skipUnless', + 'StreamFailFast', + 'StreamResult', + 'StreamResultRouter', + 'StreamSummary', + 'StreamTagger', + 'StreamToDict', + 'StreamToExtendedDecorator', + 'StreamToQueue', + 'TestControl', + 'ThreadsafeForwardingResult', + 'TimestampingStreamResult', + 'try_import', + 'try_imports', + ] + +# Compat - removal announced in 0.9.25. +try: + from extras import ( + try_import, + try_imports, + ) +except ImportError: + # Support reading __init__ for __version__ without extras, because pip does + # not support setup_requires. + pass +else: + + from testtools.matchers._impl import ( + Matcher, + ) +# Shut up, pyflakes. We are importing for documentation, not for namespacing. + Matcher + + from testtools.runtest import ( + MultipleExceptions, + RunTest, + ) + from testtools.testcase import ( + DecorateTestCaseResult, + ErrorHolder, + ExpectedException, + PlaceHolder, + TestCase, + clone_test_with_new_id, + run_test_with, + skip, + skipIf, + skipUnless, + ) + from testtools.testresult import ( + CopyStreamResult, + ExtendedToOriginalDecorator, + ExtendedToStreamDecorator, + MultiTestResult, + StreamFailFast, + StreamResult, + StreamResultRouter, + StreamSummary, + StreamTagger, + StreamToDict, + StreamToExtendedDecorator, + StreamToQueue, + Tagger, + TestByTestResult, + TestControl, + TestResult, + TestResultDecorator, + TextTestResult, + ThreadsafeForwardingResult, + TimestampingStreamResult, + ) + from testtools.testsuite import ( + ConcurrentTestSuite, + ConcurrentStreamTestSuite, + FixtureSuite, + iterate_tests, + ) + from testtools.distutilscmd import ( + TestCommand, + ) + +# same format as sys.version_info: "A tuple containing the five components of +# the version number: major, minor, micro, releaselevel, and serial. All +# values except releaselevel are integers; the release level is 'alpha', +# 'beta', 'candidate', or 'final'. The version_info value corresponding to the +# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a +# releaselevel of 'dev' for unreleased under-development code. +# +# If the releaselevel is 'alpha' then the major/minor/micro components are not +# established at this point, and setup.py will use a version of next-$(revno). +# If the releaselevel is 'final', then the tarball will be major.minor.micro. +# Otherwise it is major.minor.micro~$(revno). + +__version__ = (0, 9, 34, 'final', 0) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat2x.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat2x.py new file mode 100644 index 00000000000..2b25c13e081 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat2x.py @@ -0,0 +1,17 @@ +# Copyright (c) 2011 testtools developers. See LICENSE for details. + +"""Compatibility helpers that are valid syntax in Python 2.x. + +Only add things here if they *only* work in Python 2.x or are Python 2 +alternatives to things that *only* work in Python 3.x. +""" + +__all__ = [ + 'reraise', + ] + + +def reraise(exc_class, exc_obj, exc_tb, _marker=object()): + """Re-raise an exception received from sys.exc_info() or similar.""" + raise exc_class, exc_obj, exc_tb + diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat3x.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat3x.py new file mode 100644 index 00000000000..7a482c14b43 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat3x.py @@ -0,0 +1,17 @@ +# Copyright (c) 2011 testtools developers. See LICENSE for details. + +"""Compatibility helpers that are valid syntax in Python 3.x. + +Only add things here if they *only* work in Python 3.x or are Python 3 +alternatives to things that *only* work in Python 2.x. +""" + +__all__ = [ + 'reraise', + ] + + +def reraise(exc_class, exc_obj, exc_tb, _marker=object()): + """Re-raise an exception received from sys.exc_info() or similar.""" + raise exc_obj.with_traceback(exc_tb) + diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_spinner.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_spinner.py new file mode 100644 index 00000000000..baf455a5f94 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_spinner.py @@ -0,0 +1,316 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Evil reactor-spinning logic for running Twisted tests. + +This code is highly experimental, liable to change and not to be trusted. If +you couldn't write this yourself, you should not be using it. +""" + +__all__ = [ + 'DeferredNotFired', + 'extract_result', + 'NoResultError', + 'not_reentrant', + 'ReentryError', + 'Spinner', + 'StaleJunkError', + 'TimeoutError', + 'trap_unhandled_errors', + ] + +import signal + +from testtools.monkey import MonkeyPatcher + +from twisted.internet import defer +from twisted.internet.base import DelayedCall +from twisted.internet.interfaces import IReactorThreads +from twisted.python.failure import Failure +from twisted.python.util import mergeFunctionMetadata + + +class ReentryError(Exception): + """Raised when we try to re-enter a function that forbids it.""" + + def __init__(self, function): + Exception.__init__(self, + "%r in not re-entrant but was called within a call to itself." + % (function,)) + + +def not_reentrant(function, _calls={}): + """Decorates a function as not being re-entrant. + + The decorated function will raise an error if called from within itself. + """ + def decorated(*args, **kwargs): + if _calls.get(function, False): + raise ReentryError(function) + _calls[function] = True + try: + return function(*args, **kwargs) + finally: + _calls[function] = False + return mergeFunctionMetadata(function, decorated) + + +class DeferredNotFired(Exception): + """Raised when we extract a result from a Deferred that's not fired yet.""" + + +def extract_result(deferred): + """Extract the result from a fired deferred. + + It can happen that you have an API that returns Deferreds for + compatibility with Twisted code, but is in fact synchronous, i.e. the + Deferreds it returns have always fired by the time it returns. In this + case, you can use this function to convert the result back into the usual + form for a synchronous API, i.e. the result itself or a raised exception. + + It would be very bad form to use this as some way of checking if a + Deferred has fired. + """ + failures = [] + successes = [] + deferred.addCallbacks(successes.append, failures.append) + if len(failures) == 1: + failures[0].raiseException() + elif len(successes) == 1: + return successes[0] + else: + raise DeferredNotFired("%r has not fired yet." % (deferred,)) + + +def trap_unhandled_errors(function, *args, **kwargs): + """Run a function, trapping any unhandled errors in Deferreds. + + Assumes that 'function' will have handled any errors in Deferreds by the + time it is complete. This is almost never true of any Twisted code, since + you can never tell when someone has added an errback to a Deferred. + + If 'function' raises, then don't bother doing any unhandled error + jiggery-pokery, since something horrible has probably happened anyway. + + :return: A tuple of '(result, error)', where 'result' is the value + returned by 'function' and 'error' is a list of 'defer.DebugInfo' + objects that have unhandled errors in Deferreds. + """ + real_DebugInfo = defer.DebugInfo + debug_infos = [] + def DebugInfo(): + info = real_DebugInfo() + debug_infos.append(info) + return info + defer.DebugInfo = DebugInfo + try: + result = function(*args, **kwargs) + finally: + defer.DebugInfo = real_DebugInfo + errors = [] + for info in debug_infos: + if info.failResult is not None: + errors.append(info) + # Disable the destructor that logs to error. We are already + # catching the error here. + info.__del__ = lambda: None + return result, errors + + +class TimeoutError(Exception): + """Raised when run_in_reactor takes too long to run a function.""" + + def __init__(self, function, timeout): + Exception.__init__(self, + "%r took longer than %s seconds" % (function, timeout)) + + +class NoResultError(Exception): + """Raised when the reactor has stopped but we don't have any result.""" + + def __init__(self): + Exception.__init__(self, + "Tried to get test's result from Deferred when no result is " + "available. Probably means we received SIGINT or similar.") + + +class StaleJunkError(Exception): + """Raised when there's junk in the spinner from a previous run.""" + + def __init__(self, junk): + Exception.__init__(self, + "There was junk in the spinner from a previous run. " + "Use clear_junk() to clear it out: %r" % (junk,)) + + +class Spinner(object): + """Spin the reactor until a function is done. + + This class emulates the behaviour of twisted.trial in that it grotesquely + and horribly spins the Twisted reactor while a function is running, and + then kills the reactor when that function is complete and all the + callbacks in its chains are done. + """ + + _UNSET = object() + + # Signals that we save and restore for each spin. + _PRESERVED_SIGNALS = [ + 'SIGINT', + 'SIGTERM', + 'SIGCHLD', + ] + + # There are many APIs within Twisted itself where a Deferred fires but + # leaves cleanup work scheduled for the reactor to do. Arguably, many of + # these are bugs. As such, we provide a facility to iterate the reactor + # event loop a number of times after every call, in order to shake out + # these buggy-but-commonplace events. The default is 0, because that is + # the ideal, and it actually works for many cases. + _OBLIGATORY_REACTOR_ITERATIONS = 0 + + def __init__(self, reactor, debug=False): + """Construct a Spinner. + + :param reactor: A Twisted reactor. + :param debug: Whether or not to enable Twisted's debugging. Defaults + to False. + """ + self._reactor = reactor + self._timeout_call = None + self._success = self._UNSET + self._failure = self._UNSET + self._saved_signals = [] + self._junk = [] + self._debug = debug + + def _cancel_timeout(self): + if self._timeout_call: + self._timeout_call.cancel() + + def _get_result(self): + if self._failure is not self._UNSET: + self._failure.raiseException() + if self._success is not self._UNSET: + return self._success + raise NoResultError() + + def _got_failure(self, result): + self._cancel_timeout() + self._failure = result + + def _got_success(self, result): + self._cancel_timeout() + self._success = result + + def _stop_reactor(self, ignored=None): + """Stop the reactor!""" + self._reactor.crash() + + def _timed_out(self, function, timeout): + e = TimeoutError(function, timeout) + self._failure = Failure(e) + self._stop_reactor() + + def _clean(self): + """Clean up any junk in the reactor. + + Will always iterate the reactor a number of times equal to + ``Spinner._OBLIGATORY_REACTOR_ITERATIONS``. This is to work around + bugs in various Twisted APIs where a Deferred fires but still leaves + work (e.g. cancelling a call, actually closing a connection) for the + reactor to do. + """ + for i in range(self._OBLIGATORY_REACTOR_ITERATIONS): + self._reactor.iterate(0) + junk = [] + for delayed_call in self._reactor.getDelayedCalls(): + delayed_call.cancel() + junk.append(delayed_call) + for selectable in self._reactor.removeAll(): + # Twisted sends a 'KILL' signal to selectables that provide + # IProcessTransport. Since only _dumbwin32proc processes do this, + # we aren't going to bother. + junk.append(selectable) + if IReactorThreads.providedBy(self._reactor): + if self._reactor.threadpool is not None: + self._reactor._stopThreadPool() + self._junk.extend(junk) + return junk + + def clear_junk(self): + """Clear out our recorded junk. + + :return: Whatever junk was there before. + """ + junk = self._junk + self._junk = [] + return junk + + def get_junk(self): + """Return any junk that has been found on the reactor.""" + return self._junk + + def _save_signals(self): + available_signals = [ + getattr(signal, name, None) for name in self._PRESERVED_SIGNALS] + self._saved_signals = [ + (sig, signal.getsignal(sig)) for sig in available_signals if sig] + + def _restore_signals(self): + for sig, hdlr in self._saved_signals: + signal.signal(sig, hdlr) + self._saved_signals = [] + + @not_reentrant + def run(self, timeout, function, *args, **kwargs): + """Run 'function' in a reactor. + + If 'function' returns a Deferred, the reactor will keep spinning until + the Deferred fires and its chain completes or until the timeout is + reached -- whichever comes first. + + :raise TimeoutError: If 'timeout' is reached before the Deferred + returned by 'function' has completed its callback chain. + :raise NoResultError: If the reactor is somehow interrupted before + the Deferred returned by 'function' has completed its callback + chain. + :raise StaleJunkError: If there's junk in the spinner from a previous + run. + :return: Whatever is at the end of the function's callback chain. If + it's an error, then raise that. + """ + debug = MonkeyPatcher() + if self._debug: + debug.add_patch(defer.Deferred, 'debug', True) + debug.add_patch(DelayedCall, 'debug', True) + debug.patch() + try: + junk = self.get_junk() + if junk: + raise StaleJunkError(junk) + self._save_signals() + self._timeout_call = self._reactor.callLater( + timeout, self._timed_out, function, timeout) + # Calling 'stop' on the reactor will make it impossible to + # re-start the reactor. Since the default signal handlers for + # TERM, BREAK and INT all call reactor.stop(), we'll patch it over + # with crash. XXX: It might be a better idea to either install + # custom signal handlers or to override the methods that are + # Twisted's signal handlers. + stop, self._reactor.stop = self._reactor.stop, self._reactor.crash + def run_function(): + d = defer.maybeDeferred(function, *args, **kwargs) + d.addCallbacks(self._got_success, self._got_failure) + d.addBoth(self._stop_reactor) + try: + self._reactor.callWhenRunning(run_function) + self._reactor.run() + finally: + self._reactor.stop = stop + self._restore_signals() + try: + return self._get_result() + finally: + self._clean() + finally: + debug.restore() diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/compat.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/compat.py new file mode 100644 index 00000000000..5502e0c2161 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/compat.py @@ -0,0 +1,415 @@ +# Copyright (c) 2008-2011 testtools developers. See LICENSE for details. + +"""Compatibility support for python 2 and 3.""" + +__metaclass__ = type +__all__ = [ + '_b', + '_u', + 'advance_iterator', + 'all', + 'BytesIO', + 'classtypes', + 'isbaseexception', + 'istext', + 'str_is_unicode', + 'StringIO', + 'reraise', + 'unicode_output_stream', + ] + +import codecs +import io +import linecache +import locale +import os +import re +import sys +import traceback +import unicodedata + +from extras import try_imports + +BytesIO = try_imports(['StringIO.StringIO', 'io.BytesIO']) +StringIO = try_imports(['StringIO.StringIO', 'io.StringIO']) + +try: + from testtools import _compat2x as _compat +except (SyntaxError, ImportError): + from testtools import _compat3x as _compat + +reraise = _compat.reraise + + +__u_doc = """A function version of the 'u' prefix. + +This is needed becayse the u prefix is not usable in Python 3 but is required +in Python 2 to get a unicode object. + +To migrate code that was written as u'\u1234' in Python 2 to 2+3 change +it to be _u('\u1234'). The Python 3 interpreter will decode it +appropriately and the no-op _u for Python 3 lets it through, in Python +2 we then call unicode-escape in the _u function. +""" + +if sys.version_info > (3, 0): + import builtins + def _u(s): + return s + _r = ascii + def _b(s): + """A byte literal.""" + return s.encode("latin-1") + advance_iterator = next + # GZ 2011-08-24: Seems istext() is easy to misuse and makes for bad code. + def istext(x): + return isinstance(x, str) + def classtypes(): + return (type,) + str_is_unicode = True +else: + import __builtin__ as builtins + def _u(s): + # The double replace mangling going on prepares the string for + # unicode-escape - \foo is preserved, \u and \U are decoded. + return (s.replace("\\", "\\\\").replace("\\\\u", "\\u") + .replace("\\\\U", "\\U").decode("unicode-escape")) + _r = repr + def _b(s): + return s + advance_iterator = lambda it: it.next() + def istext(x): + return isinstance(x, basestring) + def classtypes(): + import types + return (type, types.ClassType) + str_is_unicode = sys.platform == "cli" + +_u.__doc__ = __u_doc + + +if sys.version_info > (2, 5): + all = all + _error_repr = BaseException.__repr__ + def isbaseexception(exception): + """Return whether exception inherits from BaseException only""" + return (isinstance(exception, BaseException) + and not isinstance(exception, Exception)) +else: + def all(iterable): + """If contents of iterable all evaluate as boolean True""" + for obj in iterable: + if not obj: + return False + return True + def _error_repr(exception): + """Format an exception instance as Python 2.5 and later do""" + return exception.__class__.__name__ + repr(exception.args) + def isbaseexception(exception): + """Return whether exception would inherit from BaseException only + + This approximates the hierarchy in Python 2.5 and later, compare the + difference between the diagrams at the bottom of the pages: + <http://docs.python.org/release/2.4.4/lib/module-exceptions.html> + <http://docs.python.org/release/2.5.4/lib/module-exceptions.html> + """ + return isinstance(exception, (KeyboardInterrupt, SystemExit)) + + +# GZ 2011-08-24: Using isinstance checks like this encourages bad interfaces, +# there should be better ways to write code needing this. +if not issubclass(getattr(builtins, "bytes", str), str): + def _isbytes(x): + return isinstance(x, bytes) +else: + # Never return True on Pythons that provide the name but not the real type + def _isbytes(x): + return False + + +def _slow_escape(text): + """Escape unicode ``text`` leaving printable characters unmodified + + The behaviour emulates the Python 3 implementation of repr, see + unicode_repr in unicodeobject.c and isprintable definition. + + Because this iterates over the input a codepoint at a time, it's slow, and + does not handle astral characters correctly on Python builds with 16 bit + rather than 32 bit unicode type. + """ + output = [] + for c in text: + o = ord(c) + if o < 256: + if o < 32 or 126 < o < 161: + output.append(c.encode("unicode-escape")) + elif o == 92: + # Separate due to bug in unicode-escape codec in Python 2.4 + output.append("\\\\") + else: + output.append(c) + else: + # To get correct behaviour would need to pair up surrogates here + if unicodedata.category(c)[0] in "CZ": + output.append(c.encode("unicode-escape")) + else: + output.append(c) + return "".join(output) + + +def text_repr(text, multiline=None): + """Rich repr for ``text`` returning unicode, triple quoted if ``multiline``. + """ + is_py3k = sys.version_info > (3, 0) + nl = _isbytes(text) and bytes((0xA,)) or "\n" + if multiline is None: + multiline = nl in text + if not multiline and (is_py3k or not str_is_unicode and type(text) is str): + # Use normal repr for single line of unicode on Python 3 or bytes + return repr(text) + prefix = repr(text[:0])[:-2] + if multiline: + # To escape multiline strings, split and process each line in turn, + # making sure that quotes are not escaped. + if is_py3k: + offset = len(prefix) + 1 + lines = [] + for l in text.split(nl): + r = repr(l) + q = r[-1] + lines.append(r[offset:-1].replace("\\" + q, q)) + elif not str_is_unicode and isinstance(text, str): + lines = [l.encode("string-escape").replace("\\'", "'") + for l in text.split("\n")] + else: + lines = [_slow_escape(l) for l in text.split("\n")] + # Combine the escaped lines and append two of the closing quotes, + # then iterate over the result to escape triple quotes correctly. + _semi_done = "\n".join(lines) + "''" + p = 0 + while True: + p = _semi_done.find("'''", p) + if p == -1: + break + _semi_done = "\\".join([_semi_done[:p], _semi_done[p:]]) + p += 2 + return "".join([prefix, "'''\\\n", _semi_done, "'"]) + escaped_text = _slow_escape(text) + # Determine which quote character to use and if one gets prefixed with a + # backslash following the same logic Python uses for repr() on strings + quote = "'" + if "'" in text: + if '"' in text: + escaped_text = escaped_text.replace("'", "\\'") + else: + quote = '"' + return "".join([prefix, quote, escaped_text, quote]) + + +def unicode_output_stream(stream): + """Get wrapper for given stream that writes any unicode without exception + + Characters that can't be coerced to the encoding of the stream, or 'ascii' + if valid encoding is not found, will be replaced. The original stream may + be returned in situations where a wrapper is determined unneeded. + + The wrapper only allows unicode to be written, not non-ascii bytestrings, + which is a good thing to ensure sanity and sanitation. + """ + if (sys.platform == "cli" or + isinstance(stream, (io.TextIOWrapper, io.StringIO))): + # Best to never encode before writing in IronPython, or if it is + # already a TextIO [which in the io library has no encoding + # attribute). + return stream + try: + writer = codecs.getwriter(stream.encoding or "") + except (AttributeError, LookupError): + return codecs.getwriter("ascii")(stream, "replace") + if writer.__module__.rsplit(".", 1)[1].startswith("utf"): + # The current stream has a unicode encoding so no error handler is needed + if sys.version_info > (3, 0): + return stream + return writer(stream) + if sys.version_info > (3, 0): + # Python 3 doesn't seem to make this easy, handle a common case + try: + return stream.__class__(stream.buffer, stream.encoding, "replace", + stream.newlines, stream.line_buffering) + except AttributeError: + pass + return writer(stream, "replace") + + +# The default source encoding is actually "iso-8859-1" until Python 2.5 but +# using non-ascii causes a deprecation warning in 2.4 and it's cleaner to +# treat all versions the same way +_default_source_encoding = "ascii" + +# Pattern specified in <http://www.python.org/dev/peps/pep-0263/> +_cookie_search=re.compile("coding[:=]\s*([-\w.]+)").search + +def _detect_encoding(lines): + """Get the encoding of a Python source file from a list of lines as bytes + + This function does less than tokenize.detect_encoding added in Python 3 as + it does not attempt to raise a SyntaxError when the interpreter would, it + just wants the encoding of a source file Python has already compiled and + determined is valid. + """ + if not lines: + return _default_source_encoding + if lines[0].startswith("\xef\xbb\xbf"): + # Source starting with UTF-8 BOM is either UTF-8 or a SyntaxError + return "utf-8" + # Only the first two lines of the source file are examined + magic = _cookie_search("".join(lines[:2])) + if magic is None: + return _default_source_encoding + encoding = magic.group(1) + try: + codecs.lookup(encoding) + except LookupError: + # Some codecs raise something other than LookupError if they don't + # support the given error handler, but not the text ones that could + # actually be used for Python source code + return _default_source_encoding + return encoding + + +class _EncodingTuple(tuple): + """A tuple type that can have an encoding attribute smuggled on""" + + +def _get_source_encoding(filename): + """Detect, cache and return the encoding of Python source at filename""" + try: + return linecache.cache[filename].encoding + except (AttributeError, KeyError): + encoding = _detect_encoding(linecache.getlines(filename)) + if filename in linecache.cache: + newtuple = _EncodingTuple(linecache.cache[filename]) + newtuple.encoding = encoding + linecache.cache[filename] = newtuple + return encoding + + +def _get_exception_encoding(): + """Return the encoding we expect messages from the OS to be encoded in""" + if os.name == "nt": + # GZ 2010-05-24: Really want the codepage number instead, the error + # handling of standard codecs is more deterministic + return "mbcs" + # GZ 2010-05-23: We need this call to be after initialisation, but there's + # no benefit in asking more than once as it's a global + # setting that can change after the message is formatted. + return locale.getlocale(locale.LC_MESSAGES)[1] or "ascii" + + +def _exception_to_text(evalue): + """Try hard to get a sensible text value out of an exception instance""" + try: + return unicode(evalue) + except KeyboardInterrupt: + raise + except: + # Apparently this is what traceback._some_str does. Sigh - RBC 20100623 + pass + try: + return str(evalue).decode(_get_exception_encoding(), "replace") + except KeyboardInterrupt: + raise + except: + # Apparently this is what traceback._some_str does. Sigh - RBC 20100623 + pass + # Okay, out of ideas, let higher level handle it + return None + + +def _format_stack_list(stack_lines): + """Format 'stack_lines' and return a list of unicode strings. + + :param stack_lines: A list of filename, lineno, name, and line variables, + probably obtained by calling traceback.extract_tb or + traceback.extract_stack. + """ + fs_enc = sys.getfilesystemencoding() + extracted_list = [] + for filename, lineno, name, line in stack_lines: + extracted_list.append(( + filename.decode(fs_enc, "replace"), + lineno, + name.decode("ascii", "replace"), + line and line.decode( + _get_source_encoding(filename), "replace"))) + return traceback.format_list(extracted_list) + + +def _format_exception_only(eclass, evalue): + """Format the excption part of a traceback. + + :param eclass: The type of the exception being formatted. + :param evalue: The exception instance. + :returns: A list of unicode strings. + """ + list = [] + if evalue is None: + # Is a (deprecated) string exception + list.append((eclass + "\n").decode("ascii", "replace")) + return list + if isinstance(evalue, SyntaxError): + # Avoid duplicating the special formatting for SyntaxError here, + # instead create a new instance with unicode filename and line + # Potentially gives duff spacing, but that's a pre-existing issue + try: + msg, (filename, lineno, offset, line) = evalue + except (TypeError, ValueError): + pass # Strange exception instance, fall through to generic code + else: + # Errors during parsing give the line from buffer encoded as + # latin-1 or utf-8 or the encoding of the file depending on the + # coding and whether the patch for issue #1031213 is applied, so + # give up on trying to decode it and just read the file again + if line: + bytestr = linecache.getline(filename, lineno) + if bytestr: + if lineno == 1 and bytestr.startswith("\xef\xbb\xbf"): + bytestr = bytestr[3:] + line = bytestr.decode( + _get_source_encoding(filename), "replace") + del linecache.cache[filename] + else: + line = line.decode("ascii", "replace") + if filename: + fs_enc = sys.getfilesystemencoding() + filename = filename.decode(fs_enc, "replace") + evalue = eclass(msg, (filename, lineno, offset, line)) + list.extend(traceback.format_exception_only(eclass, evalue)) + return list + sclass = eclass.__name__ + svalue = _exception_to_text(evalue) + if svalue: + list.append("%s: %s\n" % (sclass, svalue)) + elif svalue is None: + # GZ 2010-05-24: Not a great fallback message, but keep for the moment + list.append(_u("%s: <unprintable %s object>\n" % (sclass, sclass))) + else: + list.append(_u("%s\n" % sclass)) + return list + + +_TB_HEADER = _u('Traceback (most recent call last):\n') + + +def _format_exc_info(eclass, evalue, tb, limit=None): + """Format a stack trace and the exception information as unicode + + Compatibility function for Python 2 which ensures each component of a + traceback is correctly decoded according to its origins. + + Based on traceback.format_exception and related functions. + """ + return [_TB_HEADER] \ + + _format_stack_list(traceback.extract_tb(tb, limit)) \ + + _format_exception_only(eclass, evalue) + diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content.py new file mode 100644 index 00000000000..09f44844524 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content.py @@ -0,0 +1,385 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + +"""Content - a MIME-like Content object.""" + +__all__ = [ + 'attach_file', + 'Content', + 'content_from_file', + 'content_from_stream', + 'json_content', + 'text_content', + 'TracebackContent', + ] + +import codecs +import inspect +import json +import os +import sys +import traceback + +from extras import try_import + +from testtools.compat import ( + _b, + _format_exception_only, + _format_stack_list, + _TB_HEADER, + _u, + str_is_unicode, +) +from testtools.content_type import ContentType, JSON, UTF8_TEXT + + +functools = try_import('functools') + +_join_b = _b("").join + + +DEFAULT_CHUNK_SIZE = 4096 + +STDOUT_LINE = '\nStdout:\n%s' +STDERR_LINE = '\nStderr:\n%s' + + +def _iter_chunks(stream, chunk_size, seek_offset=None, seek_whence=0): + """Read 'stream' in chunks of 'chunk_size'. + + :param stream: A file-like object to read from. + :param chunk_size: The size of each read from 'stream'. + :param seek_offset: If non-None, seek before iterating. + :param seek_whence: Pass through to the seek call, if seeking. + """ + if seek_offset is not None: + stream.seek(seek_offset, seek_whence) + chunk = stream.read(chunk_size) + while chunk: + yield chunk + chunk = stream.read(chunk_size) + + +class Content(object): + """A MIME-like Content object. + + Content objects can be serialised to bytes using the iter_bytes method. + If the Content-Type is recognised by other code, they are welcome to + look for richer contents that mere byte serialisation - for example in + memory object graphs etc. However, such code MUST be prepared to receive + a generic Content object that has been reconstructed from a byte stream. + + :ivar content_type: The content type of this Content. + """ + + def __init__(self, content_type, get_bytes): + """Create a ContentType.""" + if None in (content_type, get_bytes): + raise ValueError("None not permitted in %r, %r" % ( + content_type, get_bytes)) + self.content_type = content_type + self._get_bytes = get_bytes + + def __eq__(self, other): + return (self.content_type == other.content_type and + _join_b(self.iter_bytes()) == _join_b(other.iter_bytes())) + + def as_text(self): + """Return all of the content as text. + + This is only valid where ``iter_text`` is. It will load all of the + content into memory. Where this is a concern, use ``iter_text`` + instead. + """ + return _u('').join(self.iter_text()) + + def iter_bytes(self): + """Iterate over bytestrings of the serialised content.""" + return self._get_bytes() + + def iter_text(self): + """Iterate over the text of the serialised content. + + This is only valid for text MIME types, and will use ISO-8859-1 if + no charset parameter is present in the MIME type. (This is somewhat + arbitrary, but consistent with RFC2617 3.7.1). + + :raises ValueError: If the content type is not text/\*. + """ + if self.content_type.type != "text": + raise ValueError("Not a text type %r" % self.content_type) + return self._iter_text() + + def _iter_text(self): + """Worker for iter_text - does the decoding.""" + encoding = self.content_type.parameters.get('charset', 'ISO-8859-1') + try: + # 2.5+ + decoder = codecs.getincrementaldecoder(encoding)() + for bytes in self.iter_bytes(): + yield decoder.decode(bytes) + final = decoder.decode(_b(''), True) + if final: + yield final + except AttributeError: + # < 2.5 + bytes = ''.join(self.iter_bytes()) + yield bytes.decode(encoding) + + def __repr__(self): + return "<Content type=%r, value=%r>" % ( + self.content_type, _join_b(self.iter_bytes())) + + +class StackLinesContent(Content): + """Content object for stack lines. + + This adapts a list of "preprocessed" stack lines into a content object. + The stack lines are most likely produced from ``traceback.extract_stack`` + or ``traceback.extract_tb``. + + text/x-traceback;language=python is used for the mime type, in order to + provide room for other languages to format their tracebacks differently. + """ + + # Whether or not to hide layers of the stack trace that are + # unittest/testtools internal code. Defaults to True since the + # system-under-test is rarely unittest or testtools. + HIDE_INTERNAL_STACK = True + + def __init__(self, stack_lines, prefix_content="", postfix_content=""): + """Create a StackLinesContent for ``stack_lines``. + + :param stack_lines: A list of preprocessed stack lines, probably + obtained by calling ``traceback.extract_stack`` or + ``traceback.extract_tb``. + :param prefix_content: If specified, a unicode string to prepend to the + text content. + :param postfix_content: If specified, a unicode string to append to the + text content. + """ + content_type = ContentType('text', 'x-traceback', + {"language": "python", "charset": "utf8"}) + value = prefix_content + \ + self._stack_lines_to_unicode(stack_lines) + \ + postfix_content + super(StackLinesContent, self).__init__( + content_type, lambda: [value.encode("utf8")]) + + def _stack_lines_to_unicode(self, stack_lines): + """Converts a list of pre-processed stack lines into a unicode string. + """ + + # testtools customization. When str is unicode (e.g. IronPython, + # Python 3), traceback.format_exception returns unicode. For Python 2, + # it returns bytes. We need to guarantee unicode. + if str_is_unicode: + format_stack_lines = traceback.format_list + else: + format_stack_lines = _format_stack_list + + msg_lines = format_stack_lines(stack_lines) + + return ''.join(msg_lines) + + +def TracebackContent(err, test): + """Content object for tracebacks. + + This adapts an exc_info tuple to the Content interface. + text/x-traceback;language=python is used for the mime type, in order to + provide room for other languages to format their tracebacks differently. + """ + if err is None: + raise ValueError("err may not be None") + + exctype, value, tb = err + # Skip test runner traceback levels + if StackLinesContent.HIDE_INTERNAL_STACK: + while tb and '__unittest' in tb.tb_frame.f_globals: + tb = tb.tb_next + + # testtools customization. When str is unicode (e.g. IronPython, + # Python 3), traceback.format_exception_only returns unicode. For Python 2, + # it returns bytes. We need to guarantee unicode. + if str_is_unicode: + format_exception_only = traceback.format_exception_only + else: + format_exception_only = _format_exception_only + + limit = None + # Disabled due to https://bugs.launchpad.net/testtools/+bug/1188420 + if (False + and StackLinesContent.HIDE_INTERNAL_STACK + and test.failureException + and isinstance(value, test.failureException)): + # Skip assert*() traceback levels + limit = 0 + while tb and not self._is_relevant_tb_level(tb): + limit += 1 + tb = tb.tb_next + + prefix = _TB_HEADER + stack_lines = traceback.extract_tb(tb, limit) + postfix = ''.join(format_exception_only(exctype, value)) + + return StackLinesContent(stack_lines, prefix, postfix) + + +def StacktraceContent(prefix_content="", postfix_content=""): + """Content object for stack traces. + + This function will create and return a content object that contains a + stack trace. + + The mime type is set to 'text/x-traceback;language=python', so other + languages can format their stack traces differently. + + :param prefix_content: A unicode string to add before the stack lines. + :param postfix_content: A unicode string to add after the stack lines. + """ + stack = inspect.stack()[1:] + + if StackLinesContent.HIDE_INTERNAL_STACK: + limit = 1 + while limit < len(stack) and '__unittest' not in stack[limit][0].f_globals: + limit += 1 + else: + limit = -1 + + frames_only = [line[0] for line in stack[:limit]] + processed_stack = [ ] + for frame in reversed(frames_only): + filename, line, function, context, _ = inspect.getframeinfo(frame) + context = ''.join(context) + processed_stack.append((filename, line, function, context)) + return StackLinesContent(processed_stack, prefix_content, postfix_content) + + +def json_content(json_data): + """Create a JSON `Content` object from JSON-encodeable data.""" + data = json.dumps(json_data) + if str_is_unicode: + # The json module perversely returns native str not bytes + data = data.encode('utf8') + return Content(JSON, lambda: [data]) + + +def text_content(text): + """Create a `Content` object from some text. + + This is useful for adding details which are short strings. + """ + return Content(UTF8_TEXT, lambda: [text.encode('utf8')]) + + +def maybe_wrap(wrapper, func): + """Merge metadata for func into wrapper if functools is present.""" + if functools is not None: + wrapper = functools.update_wrapper(wrapper, func) + return wrapper + + +def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE, + buffer_now=False, seek_offset=None, seek_whence=0): + """Create a `Content` object from a file on disk. + + Note that unless 'read_now' is explicitly passed in as True, the file + will only be read from when ``iter_bytes`` is called. + + :param path: The path to the file to be used as content. + :param content_type: The type of content. If not specified, defaults + to UTF8-encoded text/plain. + :param chunk_size: The size of chunks to read from the file. + Defaults to ``DEFAULT_CHUNK_SIZE``. + :param buffer_now: If True, read the file from disk now and keep it in + memory. Otherwise, only read when the content is serialized. + :param seek_offset: If non-None, seek within the stream before reading it. + :param seek_whence: If supplied, pass to stream.seek() when seeking. + """ + if content_type is None: + content_type = UTF8_TEXT + def reader(): + # This should be try:finally:, but python2.4 makes that hard. When + # We drop older python support we can make this use a context manager + # for maximum simplicity. + stream = open(path, 'rb') + for chunk in _iter_chunks(stream, chunk_size, seek_offset, seek_whence): + yield chunk + stream.close() + return content_from_reader(reader, content_type, buffer_now) + + +def content_from_stream(stream, content_type=None, + chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False, + seek_offset=None, seek_whence=0): + """Create a `Content` object from a file-like stream. + + Note that the stream will only be read from when ``iter_bytes`` is + called. + + :param stream: A file-like object to read the content from. The stream + is not closed by this function or the content object it returns. + :param content_type: The type of content. If not specified, defaults + to UTF8-encoded text/plain. + :param chunk_size: The size of chunks to read from the file. + Defaults to ``DEFAULT_CHUNK_SIZE``. + :param buffer_now: If True, reads from the stream right now. Otherwise, + only reads when the content is serialized. Defaults to False. + :param seek_offset: If non-None, seek within the stream before reading it. + :param seek_whence: If supplied, pass to stream.seek() when seeking. + """ + if content_type is None: + content_type = UTF8_TEXT + reader = lambda: _iter_chunks(stream, chunk_size, seek_offset, seek_whence) + return content_from_reader(reader, content_type, buffer_now) + + +def content_from_reader(reader, content_type, buffer_now): + """Create a Content object that will obtain the content from reader. + + :param reader: A callback to read the content. Should return an iterable of + bytestrings. + :param content_type: The content type to create. + :param buffer_now: If True the reader is evaluated immediately and + buffered. + """ + if content_type is None: + content_type = UTF8_TEXT + if buffer_now: + contents = list(reader()) + reader = lambda: contents + return Content(content_type, reader) + + +def attach_file(detailed, path, name=None, content_type=None, + chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=True): + """Attach a file to this test as a detail. + + This is a convenience method wrapping around ``addDetail``. + + Note that unless 'read_now' is explicitly passed in as True, the file + *must* exist when the test result is called with the results of this + test, after the test has been torn down. + + :param detailed: An object with details + :param path: The path to the file to attach. + :param name: The name to give to the detail for the attached file. + :param content_type: The content type of the file. If not provided, + defaults to UTF8-encoded text/plain. + :param chunk_size: The size of chunks to read from the file. Defaults + to something sensible. + :param buffer_now: If False the file content is read when the content + object is evaluated rather than when attach_file is called. + Note that this may be after any cleanups that obj_with_details has, so + if the file is a temporary file disabling buffer_now may cause the file + to be read after it is deleted. To handle those cases, using + attach_file as a cleanup is recommended because it guarantees a + sequence for when the attach_file call is made:: + + detailed.addCleanup(attach_file, 'foo.txt', detailed) + """ + if name is None: + name = os.path.basename(path) + content_object = content_from_file( + path, content_type, chunk_size, buffer_now) + detailed.addDetail(name, content_object) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content_type.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content_type.py new file mode 100644 index 00000000000..bbf314b492e --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content_type.py @@ -0,0 +1,41 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + +"""ContentType - a MIME Content Type.""" + + +class ContentType(object): + """A content type from http://www.iana.org/assignments/media-types/ + + :ivar type: The primary type, e.g. "text" or "application" + :ivar subtype: The subtype, e.g. "plain" or "octet-stream" + :ivar parameters: A dict of additional parameters specific to the + content type. + """ + + def __init__(self, primary_type, sub_type, parameters=None): + """Create a ContentType.""" + if None in (primary_type, sub_type): + raise ValueError("None not permitted in %r, %r" % ( + primary_type, sub_type)) + self.type = primary_type + self.subtype = sub_type + self.parameters = parameters or {} + + def __eq__(self, other): + if type(other) != ContentType: + return False + return self.__dict__ == other.__dict__ + + def __repr__(self): + if self.parameters: + params = '; ' + params += '; '.join( + sorted('%s="%s"' % (k, v) for k, v in self.parameters.items())) + else: + params = '' + return "%s/%s%s" % (self.type, self.subtype, params) + + +JSON = ContentType('application', 'json') + +UTF8_TEXT = ContentType('text', 'plain', {'charset': 'utf8'}) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/deferredruntest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/deferredruntest.py new file mode 100644 index 00000000000..cf33c06e277 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/deferredruntest.py @@ -0,0 +1,336 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Individual test case execution for tests that return Deferreds. + +This module is highly experimental and is liable to change in ways that cause +subtle failures in tests. Use at your own peril. +""" + +__all__ = [ + 'assert_fails_with', + 'AsynchronousDeferredRunTest', + 'AsynchronousDeferredRunTestForBrokenTwisted', + 'SynchronousDeferredRunTest', + ] + +import sys + +from testtools.compat import StringIO +from testtools.content import ( + Content, + text_content, + ) +from testtools.content_type import UTF8_TEXT +from testtools.runtest import RunTest +from testtools._spinner import ( + extract_result, + NoResultError, + Spinner, + TimeoutError, + trap_unhandled_errors, + ) + +from twisted.internet import defer +from twisted.python import log +from twisted.trial.unittest import _LogObserver + + +class _DeferredRunTest(RunTest): + """Base for tests that return Deferreds.""" + + def _got_user_failure(self, failure, tb_label='traceback'): + """We got a failure from user code.""" + return self._got_user_exception( + (failure.type, failure.value, failure.getTracebackObject()), + tb_label=tb_label) + + +class SynchronousDeferredRunTest(_DeferredRunTest): + """Runner for tests that return synchronous Deferreds.""" + + def _run_user(self, function, *args): + d = defer.maybeDeferred(function, *args) + d.addErrback(self._got_user_failure) + result = extract_result(d) + return result + + +def run_with_log_observers(observers, function, *args, **kwargs): + """Run 'function' with the given Twisted log observers.""" + real_observers = list(log.theLogPublisher.observers) + for observer in real_observers: + log.theLogPublisher.removeObserver(observer) + for observer in observers: + log.theLogPublisher.addObserver(observer) + try: + return function(*args, **kwargs) + finally: + for observer in observers: + log.theLogPublisher.removeObserver(observer) + for observer in real_observers: + log.theLogPublisher.addObserver(observer) + + +# Observer of the Twisted log that we install during tests. +_log_observer = _LogObserver() + + + +class AsynchronousDeferredRunTest(_DeferredRunTest): + """Runner for tests that return Deferreds that fire asynchronously. + + That is, this test runner assumes that the Deferreds will only fire if the + reactor is left to spin for a while. + + Do not rely too heavily on the nuances of the behaviour of this class. + What it does to the reactor is black magic, and if we can find nicer ways + of doing it we will gladly break backwards compatibility. + + This is highly experimental code. Use at your own risk. + """ + + def __init__(self, case, handlers=None, reactor=None, timeout=0.005, + debug=False): + """Construct an `AsynchronousDeferredRunTest`. + + :param case: The `TestCase` to run. + :param handlers: A list of exception handlers (ExceptionType, handler) + where 'handler' is a callable that takes a `TestCase`, a + ``testtools.TestResult`` and the exception raised. + :param reactor: The Twisted reactor to use. If not given, we use the + default reactor. + :param timeout: The maximum time allowed for running a test. The + default is 0.005s. + :param debug: Whether or not to enable Twisted's debugging. Use this + to get information about unhandled Deferreds and left-over + DelayedCalls. Defaults to False. + """ + super(AsynchronousDeferredRunTest, self).__init__(case, handlers) + if reactor is None: + from twisted.internet import reactor + self._reactor = reactor + self._timeout = timeout + self._debug = debug + + @classmethod + def make_factory(cls, reactor=None, timeout=0.005, debug=False): + """Make a factory that conforms to the RunTest factory interface.""" + # This is horrible, but it means that the return value of the method + # will be able to be assigned to a class variable *and* also be + # invoked directly. + class AsynchronousDeferredRunTestFactory: + def __call__(self, case, handlers=None): + return cls(case, handlers, reactor, timeout, debug) + return AsynchronousDeferredRunTestFactory() + + @defer.deferredGenerator + def _run_cleanups(self): + """Run the cleanups on the test case. + + We expect that the cleanups on the test case can also return + asynchronous Deferreds. As such, we take the responsibility for + running the cleanups, rather than letting TestCase do it. + """ + while self.case._cleanups: + f, args, kwargs = self.case._cleanups.pop() + d = defer.maybeDeferred(f, *args, **kwargs) + thing = defer.waitForDeferred(d) + yield thing + try: + thing.getResult() + except Exception: + exc_info = sys.exc_info() + self.case._report_traceback(exc_info) + last_exception = exc_info[1] + yield last_exception + + def _make_spinner(self): + """Make the `Spinner` to be used to run the tests.""" + return Spinner(self._reactor, debug=self._debug) + + def _run_deferred(self): + """Run the test, assuming everything in it is Deferred-returning. + + This should return a Deferred that fires with True if the test was + successful and False if the test was not successful. It should *not* + call addSuccess on the result, because there's reactor clean up that + we needs to be done afterwards. + """ + fails = [] + + def fail_if_exception_caught(exception_caught): + if self.exception_caught == exception_caught: + fails.append(None) + + def clean_up(ignored=None): + """Run the cleanups.""" + d = self._run_cleanups() + def clean_up_done(result): + if result is not None: + self._exceptions.append(result) + fails.append(None) + return d.addCallback(clean_up_done) + + def set_up_done(exception_caught): + """Set up is done, either clean up or run the test.""" + if self.exception_caught == exception_caught: + fails.append(None) + return clean_up() + else: + d = self._run_user(self.case._run_test_method, self.result) + d.addCallback(fail_if_exception_caught) + d.addBoth(tear_down) + return d + + def tear_down(ignored): + d = self._run_user(self.case._run_teardown, self.result) + d.addCallback(fail_if_exception_caught) + d.addBoth(clean_up) + return d + + d = self._run_user(self.case._run_setup, self.result) + d.addCallback(set_up_done) + d.addBoth(lambda ignored: len(fails) == 0) + return d + + def _log_user_exception(self, e): + """Raise 'e' and report it as a user exception.""" + try: + raise e + except e.__class__: + self._got_user_exception(sys.exc_info()) + + def _blocking_run_deferred(self, spinner): + try: + return trap_unhandled_errors( + spinner.run, self._timeout, self._run_deferred) + except NoResultError: + # We didn't get a result at all! This could be for any number of + # reasons, but most likely someone hit Ctrl-C during the test. + raise KeyboardInterrupt + except TimeoutError: + # The function took too long to run. + self._log_user_exception(TimeoutError(self.case, self._timeout)) + return False, [] + + def _run_core(self): + # Add an observer to trap all logged errors. + self.case.reactor = self._reactor + error_observer = _log_observer + full_log = StringIO() + full_observer = log.FileLogObserver(full_log) + spinner = self._make_spinner() + successful, unhandled = run_with_log_observers( + [error_observer.gotEvent, full_observer.emit], + self._blocking_run_deferred, spinner) + + self.case.addDetail( + 'twisted-log', Content(UTF8_TEXT, full_log.readlines)) + + logged_errors = error_observer.flushErrors() + for logged_error in logged_errors: + successful = False + self._got_user_failure(logged_error, tb_label='logged-error') + + if unhandled: + successful = False + for debug_info in unhandled: + f = debug_info.failResult + info = debug_info._getDebugTracebacks() + if info: + self.case.addDetail( + 'unhandled-error-in-deferred-debug', + text_content(info)) + self._got_user_failure(f, 'unhandled-error-in-deferred') + + junk = spinner.clear_junk() + if junk: + successful = False + self._log_user_exception(UncleanReactorError(junk)) + + if successful: + self.result.addSuccess(self.case, details=self.case.getDetails()) + + def _run_user(self, function, *args): + """Run a user-supplied function. + + This just makes sure that it returns a Deferred, regardless of how the + user wrote it. + """ + d = defer.maybeDeferred(function, *args) + return d.addErrback(self._got_user_failure) + + +class AsynchronousDeferredRunTestForBrokenTwisted(AsynchronousDeferredRunTest): + """Test runner that works around Twisted brokenness re reactor junk. + + There are many APIs within Twisted itself where a Deferred fires but + leaves cleanup work scheduled for the reactor to do. Arguably, many of + these are bugs. This runner iterates the reactor event loop a number of + times after every test, in order to shake out these buggy-but-commonplace + events. + """ + + def _make_spinner(self): + spinner = super( + AsynchronousDeferredRunTestForBrokenTwisted, self)._make_spinner() + spinner._OBLIGATORY_REACTOR_ITERATIONS = 2 + return spinner + + +def assert_fails_with(d, *exc_types, **kwargs): + """Assert that 'd' will fail with one of 'exc_types'. + + The normal way to use this is to return the result of 'assert_fails_with' + from your unit test. + + Note that this function is experimental and unstable. Use at your own + peril; expect the API to change. + + :param d: A Deferred that is expected to fail. + :param exc_types: The exception types that the Deferred is expected to + fail with. + :param failureException: An optional keyword argument. If provided, will + raise that exception instead of + ``testtools.TestCase.failureException``. + :return: A Deferred that will fail with an ``AssertionError`` if 'd' does + not fail with one of the exception types. + """ + failureException = kwargs.pop('failureException', None) + if failureException is None: + # Avoid circular imports. + from testtools import TestCase + failureException = TestCase.failureException + expected_names = ", ".join(exc_type.__name__ for exc_type in exc_types) + def got_success(result): + raise failureException( + "%s not raised (%r returned)" % (expected_names, result)) + def got_failure(failure): + if failure.check(*exc_types): + return failure.value + raise failureException("%s raised instead of %s:\n %s" % ( + failure.type.__name__, expected_names, failure.getTraceback())) + return d.addCallbacks(got_success, got_failure) + + +def flush_logged_errors(*error_types): + return _log_observer.flushErrors(*error_types) + + +class UncleanReactorError(Exception): + """Raised when the reactor has junk in it.""" + + def __init__(self, junk): + Exception.__init__(self, + "The reactor still thinks it needs to do things. Close all " + "connections, kill all processes and make sure all delayed " + "calls have either fired or been cancelled:\n%s" + % ''.join(map(self._get_junk_info, junk))) + + def _get_junk_info(self, junk): + from twisted.internet.base import DelayedCall + if isinstance(junk, DelayedCall): + ret = str(junk) + else: + ret = repr(junk) + return ' %s\n' % (ret,) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/distutilscmd.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/distutilscmd.py new file mode 100644 index 00000000000..91e14ca504f --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/distutilscmd.py @@ -0,0 +1,62 @@ +# Copyright (c) 2010-2011 testtools developers . See LICENSE for details. + +"""Extensions to the standard Python unittest library.""" + +import sys + +from distutils.core import Command +from distutils.errors import DistutilsOptionError + +from testtools.run import TestProgram, TestToolsTestRunner + + +class TestCommand(Command): + """Command to run unit tests with testtools""" + + description = "run unit tests with testtools" + + user_options = [ + ('catch', 'c', "Catch ctrl-C and display results so far"), + ('buffer', 'b', "Buffer stdout and stderr during tests"), + ('failfast', 'f', "Stop on first fail or error"), + ('test-module=','m', "Run 'test_suite' in specified module"), + ('test-suite=','s', + "Test suite to run (e.g. 'some_module.test_suite')") + ] + + def __init__(self, dist): + Command.__init__(self, dist) + self.runner = TestToolsTestRunner(sys.stdout) + + + def initialize_options(self): + self.test_suite = None + self.test_module = None + self.catch = None + self.buffer = None + self.failfast = None + + def finalize_options(self): + if self.test_suite is None: + if self.test_module is None: + raise DistutilsOptionError( + "You must specify a module or a suite to run tests from") + else: + self.test_suite = self.test_module+".test_suite" + elif self.test_module: + raise DistutilsOptionError( + "You may specify a module or a suite, but not both") + self.test_args = [self.test_suite] + if self.verbose: + self.test_args.insert(0, '--verbose') + if self.buffer: + self.test_args.insert(0, '--buffer') + if self.catch: + self.test_args.insert(0, '--catch') + if self.failfast: + self.test_args.insert(0, '--failfast') + + def run(self): + self.program = TestProgram( + argv=self.test_args, testRunner=self.runner, stdout=sys.stdout, + exit=False) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/helpers.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/helpers.py new file mode 100644 index 00000000000..401d2cc10ed --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/helpers.py @@ -0,0 +1,48 @@ +# Copyright (c) 2010-2012 testtools developers. See LICENSE for details. + +__all__ = [ + 'safe_hasattr', + 'try_import', + 'try_imports', + ] + +import sys + +# Compat - removal announced in 0.9.25. +from extras import ( + safe_hasattr, + try_import, + try_imports, + ) + + +def map_values(function, dictionary): + """Map ``function`` across the values of ``dictionary``. + + :return: A dict with the same keys as ``dictionary``, where the value + of each key ``k`` is ``function(dictionary[k])``. + """ + return dict((k, function(dictionary[k])) for k in dictionary) + + +def filter_values(function, dictionary): + """Filter ``dictionary`` by its values using ``function``.""" + return dict((k, v) for k, v in dictionary.items() if function(v)) + + +def dict_subtract(a, b): + """Return the part of ``a`` that's not in ``b``.""" + return dict((k, a[k]) for k in set(a) - set(b)) + + +def list_subtract(a, b): + """Return a list ``a`` without the elements of ``b``. + + If a particular value is in ``a`` twice and ``b`` once then the returned + list then that value will appear once in the returned list. + """ + a_only = list(a) + for x in b: + if x in a_only: + a_only.remove(x) + return a_only diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/__init__.py new file mode 100644 index 00000000000..771d8142b32 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/__init__.py @@ -0,0 +1,119 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +"""All the matchers. + +Matchers, a way to express complex assertions outside the testcase. + +Inspired by 'hamcrest'. + +Matcher provides the abstract API that all matchers need to implement. + +Bundled matchers are listed in __all__: a list can be obtained by running +$ python -c 'import testtools.matchers; print testtools.matchers.__all__' +""" + +__all__ = [ + 'AfterPreprocessing', + 'AllMatch', + 'Annotate', + 'AnyMatch', + 'Contains', + 'ContainsAll', + 'ContainedByDict', + 'ContainsDict', + 'DirContains', + 'DirExists', + 'DocTestMatches', + 'EndsWith', + 'Equals', + 'FileContains', + 'FileExists', + 'GreaterThan', + 'HasLength', + 'HasPermissions', + 'Is', + 'IsInstance', + 'KeysEqual', + 'LessThan', + 'MatchesAll', + 'MatchesAny', + 'MatchesDict', + 'MatchesException', + 'MatchesListwise', + 'MatchesPredicate', + 'MatchesPredicateWithParams', + 'MatchesRegex', + 'MatchesSetwise', + 'MatchesStructure', + 'NotEquals', + 'Not', + 'PathExists', + 'Raises', + 'raises', + 'SamePath', + 'StartsWith', + 'TarballContains', + ] + +from ._basic import ( + Contains, + EndsWith, + Equals, + GreaterThan, + HasLength, + Is, + IsInstance, + LessThan, + MatchesRegex, + NotEquals, + StartsWith, + ) +from ._datastructures import ( + ContainsAll, + MatchesListwise, + MatchesSetwise, + MatchesStructure, + ) +from ._dict import ( + ContainedByDict, + ContainsDict, + KeysEqual, + MatchesDict, + ) +from ._doctest import ( + DocTestMatches, + ) +from ._exception import ( + MatchesException, + Raises, + raises, + ) +from ._filesystem import ( + DirContains, + DirExists, + FileContains, + FileExists, + HasPermissions, + PathExists, + SamePath, + TarballContains, + ) +from ._higherorder import ( + AfterPreprocessing, + AllMatch, + Annotate, + AnyMatch, + MatchesAll, + MatchesAny, + MatchesPredicate, + MatchesPredicateWithParams, + Not, + ) + +# XXX: These are not explicitly included in __all__. It's unclear how much of +# the public interface they really are. +from ._impl import ( + Matcher, + Mismatch, + MismatchError, + ) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_basic.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_basic.py new file mode 100644 index 00000000000..2d9f143f10e --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_basic.py @@ -0,0 +1,326 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + +__all__ = [ + 'Contains', + 'EndsWith', + 'Equals', + 'GreaterThan', + 'HasLength', + 'Is', + 'IsInstance', + 'LessThan', + 'MatchesRegex', + 'NotEquals', + 'StartsWith', + ] + +import operator +from pprint import pformat +import re + +from ..compat import ( + _isbytes, + istext, + str_is_unicode, + text_repr, + ) +from ..helpers import list_subtract +from ._higherorder import ( + MatchesPredicateWithParams, + PostfixedMismatch, + ) +from ._impl import ( + Matcher, + Mismatch, + ) + + +def _format(thing): + """ + Blocks of text with newlines are formatted as triple-quote + strings. Everything else is pretty-printed. + """ + if istext(thing) or _isbytes(thing): + return text_repr(thing) + return pformat(thing) + + +class _BinaryComparison(object): + """Matcher that compares an object to another object.""" + + def __init__(self, expected): + self.expected = expected + + def __str__(self): + return "%s(%r)" % (self.__class__.__name__, self.expected) + + def match(self, other): + if self.comparator(other, self.expected): + return None + return _BinaryMismatch(self.expected, self.mismatch_string, other) + + def comparator(self, expected, other): + raise NotImplementedError(self.comparator) + + +class _BinaryMismatch(Mismatch): + """Two things did not match.""" + + def __init__(self, expected, mismatch_string, other): + self.expected = expected + self._mismatch_string = mismatch_string + self.other = other + + def describe(self): + left = repr(self.expected) + right = repr(self.other) + if len(left) + len(right) > 70: + return "%s:\nreference = %s\nactual = %s\n" % ( + self._mismatch_string, _format(self.expected), + _format(self.other)) + else: + return "%s %s %s" % (left, self._mismatch_string, right) + + +class Equals(_BinaryComparison): + """Matches if the items are equal.""" + + comparator = operator.eq + mismatch_string = '!=' + + +class NotEquals(_BinaryComparison): + """Matches if the items are not equal. + + In most cases, this is equivalent to ``Not(Equals(foo))``. The difference + only matters when testing ``__ne__`` implementations. + """ + + comparator = operator.ne + mismatch_string = '==' + + +class Is(_BinaryComparison): + """Matches if the items are identical.""" + + comparator = operator.is_ + mismatch_string = 'is not' + + +class LessThan(_BinaryComparison): + """Matches if the item is less than the matchers reference object.""" + + comparator = operator.__lt__ + mismatch_string = 'is not >' + + +class GreaterThan(_BinaryComparison): + """Matches if the item is greater than the matchers reference object.""" + + comparator = operator.__gt__ + mismatch_string = 'is not <' + + +class SameMembers(Matcher): + """Matches if two iterators have the same members. + + This is not the same as set equivalence. The two iterators must be of the + same length and have the same repetitions. + """ + + def __init__(self, expected): + super(SameMembers, self).__init__() + self.expected = expected + + def __str__(self): + return '%s(%r)' % (self.__class__.__name__, self.expected) + + def match(self, observed): + expected_only = list_subtract(self.expected, observed) + observed_only = list_subtract(observed, self.expected) + if expected_only == observed_only == []: + return + return PostfixedMismatch( + "\nmissing: %s\nextra: %s" % ( + _format(expected_only), _format(observed_only)), + _BinaryMismatch(self.expected, 'elements differ', observed)) + + +class DoesNotStartWith(Mismatch): + + def __init__(self, matchee, expected): + """Create a DoesNotStartWith Mismatch. + + :param matchee: the string that did not match. + :param expected: the string that 'matchee' was expected to start with. + """ + self.matchee = matchee + self.expected = expected + + def describe(self): + return "%s does not start with %s." % ( + text_repr(self.matchee), text_repr(self.expected)) + + +class StartsWith(Matcher): + """Checks whether one string starts with another.""" + + def __init__(self, expected): + """Create a StartsWith Matcher. + + :param expected: the string that matchees should start with. + """ + self.expected = expected + + def __str__(self): + return "StartsWith(%r)" % (self.expected,) + + def match(self, matchee): + if not matchee.startswith(self.expected): + return DoesNotStartWith(matchee, self.expected) + return None + + +class DoesNotEndWith(Mismatch): + + def __init__(self, matchee, expected): + """Create a DoesNotEndWith Mismatch. + + :param matchee: the string that did not match. + :param expected: the string that 'matchee' was expected to end with. + """ + self.matchee = matchee + self.expected = expected + + def describe(self): + return "%s does not end with %s." % ( + text_repr(self.matchee), text_repr(self.expected)) + + +class EndsWith(Matcher): + """Checks whether one string ends with another.""" + + def __init__(self, expected): + """Create a EndsWith Matcher. + + :param expected: the string that matchees should end with. + """ + self.expected = expected + + def __str__(self): + return "EndsWith(%r)" % (self.expected,) + + def match(self, matchee): + if not matchee.endswith(self.expected): + return DoesNotEndWith(matchee, self.expected) + return None + + +class IsInstance(object): + """Matcher that wraps isinstance.""" + + def __init__(self, *types): + self.types = tuple(types) + + def __str__(self): + return "%s(%s)" % (self.__class__.__name__, + ', '.join(type.__name__ for type in self.types)) + + def match(self, other): + if isinstance(other, self.types): + return None + return NotAnInstance(other, self.types) + + +class NotAnInstance(Mismatch): + + def __init__(self, matchee, types): + """Create a NotAnInstance Mismatch. + + :param matchee: the thing which is not an instance of any of types. + :param types: A tuple of the types which were expected. + """ + self.matchee = matchee + self.types = types + + def describe(self): + if len(self.types) == 1: + typestr = self.types[0].__name__ + else: + typestr = 'any of (%s)' % ', '.join(type.__name__ for type in + self.types) + return "'%s' is not an instance of %s" % (self.matchee, typestr) + + +class DoesNotContain(Mismatch): + + def __init__(self, matchee, needle): + """Create a DoesNotContain Mismatch. + + :param matchee: the object that did not contain needle. + :param needle: the needle that 'matchee' was expected to contain. + """ + self.matchee = matchee + self.needle = needle + + def describe(self): + return "%r not in %r" % (self.needle, self.matchee) + + +class Contains(Matcher): + """Checks whether something is contained in another thing.""" + + def __init__(self, needle): + """Create a Contains Matcher. + + :param needle: the thing that needs to be contained by matchees. + """ + self.needle = needle + + def __str__(self): + return "Contains(%r)" % (self.needle,) + + def match(self, matchee): + try: + if self.needle not in matchee: + return DoesNotContain(matchee, self.needle) + except TypeError: + # e.g. 1 in 2 will raise TypeError + return DoesNotContain(matchee, self.needle) + return None + + +class MatchesRegex(object): + """Matches if the matchee is matched by a regular expression.""" + + def __init__(self, pattern, flags=0): + self.pattern = pattern + self.flags = flags + + def __str__(self): + args = ['%r' % self.pattern] + flag_arg = [] + # dir() sorts the attributes for us, so we don't need to do it again. + for flag in dir(re): + if len(flag) == 1: + if self.flags & getattr(re, flag): + flag_arg.append('re.%s' % flag) + if flag_arg: + args.append('|'.join(flag_arg)) + return '%s(%s)' % (self.__class__.__name__, ', '.join(args)) + + def match(self, value): + if not re.match(self.pattern, value, self.flags): + pattern = self.pattern + if not isinstance(pattern, str_is_unicode and str or unicode): + pattern = pattern.decode("latin1") + pattern = pattern.encode("unicode_escape").decode("ascii") + return Mismatch("%r does not match /%s/" % ( + value, pattern.replace("\\\\", "\\"))) + + +def has_len(x, y): + return len(x) == y + + +HasLength = MatchesPredicateWithParams(has_len, "len({0}) != {1}", "HasLength") diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_datastructures.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_datastructures.py new file mode 100644 index 00000000000..70de790738a --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_datastructures.py @@ -0,0 +1,228 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + +__all__ = [ + 'ContainsAll', + 'MatchesListwise', + 'MatchesSetwise', + 'MatchesStructure', + ] + +"""Matchers that operate with knowledge of Python data structures.""" + +from ..helpers import map_values +from ._higherorder import ( + Annotate, + MatchesAll, + MismatchesAll, + ) +from ._impl import Mismatch + + +def ContainsAll(items): + """Make a matcher that checks whether a list of things is contained + in another thing. + + The matcher effectively checks that the provided sequence is a subset of + the matchee. + """ + from ._basic import Contains + return MatchesAll(*map(Contains, items), first_only=False) + + +class MatchesListwise(object): + """Matches if each matcher matches the corresponding value. + + More easily explained by example than in words: + + >>> from ._basic import Equals + >>> MatchesListwise([Equals(1)]).match([1]) + >>> MatchesListwise([Equals(1), Equals(2)]).match([1, 2]) + >>> print (MatchesListwise([Equals(1), Equals(2)]).match([2, 1]).describe()) + Differences: [ + 1 != 2 + 2 != 1 + ] + >>> matcher = MatchesListwise([Equals(1), Equals(2)], first_only=True) + >>> print (matcher.match([3, 4]).describe()) + 1 != 3 + """ + + def __init__(self, matchers, first_only=False): + """Construct a MatchesListwise matcher. + + :param matchers: A list of matcher that the matched values must match. + :param first_only: If True, then only report the first mismatch, + otherwise report all of them. Defaults to False. + """ + self.matchers = matchers + self.first_only = first_only + + def match(self, values): + from ._basic import Equals + mismatches = [] + length_mismatch = Annotate( + "Length mismatch", Equals(len(self.matchers))).match(len(values)) + if length_mismatch: + mismatches.append(length_mismatch) + for matcher, value in zip(self.matchers, values): + mismatch = matcher.match(value) + if mismatch: + if self.first_only: + return mismatch + mismatches.append(mismatch) + if mismatches: + return MismatchesAll(mismatches) + + +class MatchesStructure(object): + """Matcher that matches an object structurally. + + 'Structurally' here means that attributes of the object being matched are + compared against given matchers. + + `fromExample` allows the creation of a matcher from a prototype object and + then modified versions can be created with `update`. + + `byEquality` creates a matcher in much the same way as the constructor, + except that the matcher for each of the attributes is assumed to be + `Equals`. + + `byMatcher` creates a similar matcher to `byEquality`, but you get to pick + the matcher, rather than just using `Equals`. + """ + + def __init__(self, **kwargs): + """Construct a `MatchesStructure`. + + :param kwargs: A mapping of attributes to matchers. + """ + self.kws = kwargs + + @classmethod + def byEquality(cls, **kwargs): + """Matches an object where the attributes equal the keyword values. + + Similar to the constructor, except that the matcher is assumed to be + Equals. + """ + from ._basic import Equals + return cls.byMatcher(Equals, **kwargs) + + @classmethod + def byMatcher(cls, matcher, **kwargs): + """Matches an object where the attributes match the keyword values. + + Similar to the constructor, except that the provided matcher is used + to match all of the values. + """ + return cls(**map_values(matcher, kwargs)) + + @classmethod + def fromExample(cls, example, *attributes): + from ._basic import Equals + kwargs = {} + for attr in attributes: + kwargs[attr] = Equals(getattr(example, attr)) + return cls(**kwargs) + + def update(self, **kws): + new_kws = self.kws.copy() + for attr, matcher in kws.items(): + if matcher is None: + new_kws.pop(attr, None) + else: + new_kws[attr] = matcher + return type(self)(**new_kws) + + def __str__(self): + kws = [] + for attr, matcher in sorted(self.kws.items()): + kws.append("%s=%s" % (attr, matcher)) + return "%s(%s)" % (self.__class__.__name__, ', '.join(kws)) + + def match(self, value): + matchers = [] + values = [] + for attr, matcher in sorted(self.kws.items()): + matchers.append(Annotate(attr, matcher)) + values.append(getattr(value, attr)) + return MatchesListwise(matchers).match(values) + + +class MatchesSetwise(object): + """Matches if all the matchers match elements of the value being matched. + + That is, each element in the 'observed' set must match exactly one matcher + from the set of matchers, with no matchers left over. + + The difference compared to `MatchesListwise` is that the order of the + matchings does not matter. + """ + + def __init__(self, *matchers): + self.matchers = matchers + + def match(self, observed): + remaining_matchers = set(self.matchers) + not_matched = [] + for value in observed: + for matcher in remaining_matchers: + if matcher.match(value) is None: + remaining_matchers.remove(matcher) + break + else: + not_matched.append(value) + if not_matched or remaining_matchers: + remaining_matchers = list(remaining_matchers) + # There are various cases that all should be reported somewhat + # differently. + + # There are two trivial cases: + # 1) There are just some matchers left over. + # 2) There are just some values left over. + + # Then there are three more interesting cases: + # 3) There are the same number of matchers and values left over. + # 4) There are more matchers left over than values. + # 5) There are more values left over than matchers. + + if len(not_matched) == 0: + if len(remaining_matchers) > 1: + msg = "There were %s matchers left over: " % ( + len(remaining_matchers),) + else: + msg = "There was 1 matcher left over: " + msg += ', '.join(map(str, remaining_matchers)) + return Mismatch(msg) + elif len(remaining_matchers) == 0: + if len(not_matched) > 1: + return Mismatch( + "There were %s values left over: %s" % ( + len(not_matched), not_matched)) + else: + return Mismatch( + "There was 1 value left over: %s" % ( + not_matched, )) + else: + common_length = min(len(remaining_matchers), len(not_matched)) + if common_length == 0: + raise AssertionError("common_length can't be 0 here") + if common_length > 1: + msg = "There were %s mismatches" % (common_length,) + else: + msg = "There was 1 mismatch" + if len(remaining_matchers) > len(not_matched): + extra_matchers = remaining_matchers[common_length:] + msg += " and %s extra matcher" % (len(extra_matchers), ) + if len(extra_matchers) > 1: + msg += "s" + msg += ': ' + ', '.join(map(str, extra_matchers)) + elif len(not_matched) > len(remaining_matchers): + extra_values = not_matched[common_length:] + msg += " and %s extra value" % (len(extra_values), ) + if len(extra_values) > 1: + msg += "s" + msg += ': ' + str(extra_values) + return Annotate( + msg, MatchesListwise(remaining_matchers[:common_length]) + ).match(not_matched[:common_length]) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_dict.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_dict.py new file mode 100644 index 00000000000..b1ec9151b24 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_dict.py @@ -0,0 +1,259 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + +__all__ = [ + 'KeysEqual', + ] + +from ..helpers import ( + dict_subtract, + filter_values, + map_values, + ) +from ._higherorder import ( + AnnotatedMismatch, + PrefixedMismatch, + MismatchesAll, + ) +from ._impl import Matcher, Mismatch + + +def LabelledMismatches(mismatches, details=None): + """A collection of mismatches, each labelled.""" + return MismatchesAll( + (PrefixedMismatch(k, v) for (k, v) in sorted(mismatches.items())), + wrap=False) + + +class MatchesAllDict(Matcher): + """Matches if all of the matchers it is created with match. + + A lot like ``MatchesAll``, but takes a dict of Matchers and labels any + mismatches with the key of the dictionary. + """ + + def __init__(self, matchers): + super(MatchesAllDict, self).__init__() + self.matchers = matchers + + def __str__(self): + return 'MatchesAllDict(%s)' % (_format_matcher_dict(self.matchers),) + + def match(self, observed): + mismatches = {} + for label in self.matchers: + mismatches[label] = self.matchers[label].match(observed) + return _dict_to_mismatch( + mismatches, result_mismatch=LabelledMismatches) + + +class DictMismatches(Mismatch): + """A mismatch with a dict of child mismatches.""" + + def __init__(self, mismatches, details=None): + super(DictMismatches, self).__init__(None, details=details) + self.mismatches = mismatches + + def describe(self): + lines = ['{'] + lines.extend( + [' %r: %s,' % (key, mismatch.describe()) + for (key, mismatch) in sorted(self.mismatches.items())]) + lines.append('}') + return '\n'.join(lines) + + +def _dict_to_mismatch(data, to_mismatch=None, + result_mismatch=DictMismatches): + if to_mismatch: + data = map_values(to_mismatch, data) + mismatches = filter_values(bool, data) + if mismatches: + return result_mismatch(mismatches) + + +class _MatchCommonKeys(Matcher): + """Match on keys in a dictionary. + + Given a dictionary where the values are matchers, this will look for + common keys in the matched dictionary and match if and only if all common + keys match the given matchers. + + Thus:: + + >>> structure = {'a': Equals('x'), 'b': Equals('y')} + >>> _MatchCommonKeys(structure).match({'a': 'x', 'c': 'z'}) + None + """ + + def __init__(self, dict_of_matchers): + super(_MatchCommonKeys, self).__init__() + self._matchers = dict_of_matchers + + def _compare_dicts(self, expected, observed): + common_keys = set(expected.keys()) & set(observed.keys()) + mismatches = {} + for key in common_keys: + mismatch = expected[key].match(observed[key]) + if mismatch: + mismatches[key] = mismatch + return mismatches + + def match(self, observed): + mismatches = self._compare_dicts(self._matchers, observed) + if mismatches: + return DictMismatches(mismatches) + + +class _SubDictOf(Matcher): + """Matches if the matched dict only has keys that are in given dict.""" + + def __init__(self, super_dict, format_value=repr): + super(_SubDictOf, self).__init__() + self.super_dict = super_dict + self.format_value = format_value + + def match(self, observed): + excess = dict_subtract(observed, self.super_dict) + return _dict_to_mismatch( + excess, lambda v: Mismatch(self.format_value(v))) + + +class _SuperDictOf(Matcher): + """Matches if all of the keys in the given dict are in the matched dict. + """ + + def __init__(self, sub_dict, format_value=repr): + super(_SuperDictOf, self).__init__() + self.sub_dict = sub_dict + self.format_value = format_value + + def match(self, super_dict): + return _SubDictOf(super_dict, self.format_value).match(self.sub_dict) + + +def _format_matcher_dict(matchers): + return '{%s}' % ( + ', '.join(sorted('%r: %s' % (k, v) for k, v in matchers.items()))) + + +class _CombinedMatcher(Matcher): + """Many matchers labelled and combined into one uber-matcher. + + Subclass this and then specify a dict of matcher factories that take a + single 'expected' value and return a matcher. The subclass will match + only if all of the matchers made from factories match. + + Not **entirely** dissimilar from ``MatchesAll``. + """ + + matcher_factories = {} + + def __init__(self, expected): + super(_CombinedMatcher, self).__init__() + self._expected = expected + + def format_expected(self, expected): + return repr(expected) + + def __str__(self): + return '%s(%s)' % ( + self.__class__.__name__, self.format_expected(self._expected)) + + def match(self, observed): + matchers = dict( + (k, v(self._expected)) for k, v in self.matcher_factories.items()) + return MatchesAllDict(matchers).match(observed) + + +class MatchesDict(_CombinedMatcher): + """Match a dictionary exactly, by its keys. + + Specify a dictionary mapping keys (often strings) to matchers. This is + the 'expected' dict. Any dictionary that matches this must have exactly + the same keys, and the values must match the corresponding matchers in the + expected dict. + """ + + matcher_factories = { + 'Extra': _SubDictOf, + 'Missing': lambda m: _SuperDictOf(m, format_value=str), + 'Differences': _MatchCommonKeys, + } + + format_expected = lambda self, expected: _format_matcher_dict(expected) + + +class ContainsDict(_CombinedMatcher): + """Match a dictionary for that contains a specified sub-dictionary. + + Specify a dictionary mapping keys (often strings) to matchers. This is + the 'expected' dict. Any dictionary that matches this must have **at + least** these keys, and the values must match the corresponding matchers + in the expected dict. Dictionaries that have more keys will also match. + + In other words, any matching dictionary must contain the dictionary given + to the constructor. + + Does not check for strict sub-dictionary. That is, equal dictionaries + match. + """ + + matcher_factories = { + 'Missing': lambda m: _SuperDictOf(m, format_value=str), + 'Differences': _MatchCommonKeys, + } + + format_expected = lambda self, expected: _format_matcher_dict(expected) + + +class ContainedByDict(_CombinedMatcher): + """Match a dictionary for which this is a super-dictionary. + + Specify a dictionary mapping keys (often strings) to matchers. This is + the 'expected' dict. Any dictionary that matches this must have **only** + these keys, and the values must match the corresponding matchers in the + expected dict. Dictionaries that have fewer keys can also match. + + In other words, any matching dictionary must be contained by the + dictionary given to the constructor. + + Does not check for strict super-dictionary. That is, equal dictionaries + match. + """ + + matcher_factories = { + 'Extra': _SubDictOf, + 'Differences': _MatchCommonKeys, + } + + format_expected = lambda self, expected: _format_matcher_dict(expected) + + +class KeysEqual(Matcher): + """Checks whether a dict has particular keys.""" + + def __init__(self, *expected): + """Create a `KeysEqual` Matcher. + + :param expected: The keys the dict is expected to have. If a dict, + then we use the keys of that dict, if a collection, we assume it + is a collection of expected keys. + """ + super(KeysEqual, self).__init__() + try: + self.expected = expected[0].keys() + except AttributeError: + self.expected = list(expected) + + def __str__(self): + return "KeysEqual(%s)" % ', '.join(map(repr, self.expected)) + + def match(self, matchee): + from ._basic import _BinaryMismatch, Equals + expected = sorted(self.expected) + matched = Equals(expected).match(sorted(matchee.keys())) + if matched: + return AnnotatedMismatch( + 'Keys not equal', + _BinaryMismatch(expected, 'does not match', matchee)) + return None diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_doctest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_doctest.py new file mode 100644 index 00000000000..41f3c003e53 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_doctest.py @@ -0,0 +1,104 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + +__all__ = [ + 'DocTestMatches', + ] + +import doctest +import re + +from ..compat import str_is_unicode +from ._impl import Mismatch + + +class _NonManglingOutputChecker(doctest.OutputChecker): + """Doctest checker that works with unicode rather than mangling strings + + This is needed because current Python versions have tried to fix string + encoding related problems, but regressed the default behaviour with + unicode inputs in the process. + + In Python 2.6 and 2.7 ``OutputChecker.output_difference`` is was changed + to return a bytestring encoded as per ``sys.stdout.encoding``, or utf-8 if + that can't be determined. Worse, that encoding process happens in the + innocent looking `_indent` global function. Because the + `DocTestMismatch.describe` result may well not be destined for printing to + stdout, this is no good for us. To get a unicode return as before, the + method is monkey patched if ``doctest._encoding`` exists. + + Python 3 has a different problem. For some reason both inputs are encoded + to ascii with 'backslashreplace', making an escaped string matches its + unescaped form. Overriding the offending ``OutputChecker._toAscii`` method + is sufficient to revert this. + """ + + def _toAscii(self, s): + """Return ``s`` unchanged rather than mangling it to ascii""" + return s + + # Only do this overriding hackery if doctest has a broken _input function + if getattr(doctest, "_encoding", None) is not None: + from types import FunctionType as __F + __f = doctest.OutputChecker.output_difference.im_func + __g = dict(__f.func_globals) + def _indent(s, indent=4, _pattern=re.compile("^(?!$)", re.MULTILINE)): + """Prepend non-empty lines in ``s`` with ``indent`` number of spaces""" + return _pattern.sub(indent*" ", s) + __g["_indent"] = _indent + output_difference = __F(__f.func_code, __g, "output_difference") + del __F, __f, __g, _indent + + +class DocTestMatches(object): + """See if a string matches a doctest example.""" + + def __init__(self, example, flags=0): + """Create a DocTestMatches to match example. + + :param example: The example to match e.g. 'foo bar baz' + :param flags: doctest comparison flags to match on. e.g. + doctest.ELLIPSIS. + """ + if not example.endswith('\n'): + example += '\n' + self.want = example # required variable name by doctest. + self.flags = flags + self._checker = _NonManglingOutputChecker() + + def __str__(self): + if self.flags: + flagstr = ", flags=%d" % self.flags + else: + flagstr = "" + return 'DocTestMatches(%r%s)' % (self.want, flagstr) + + def _with_nl(self, actual): + result = self.want.__class__(actual) + if not result.endswith('\n'): + result += '\n' + return result + + def match(self, actual): + with_nl = self._with_nl(actual) + if self._checker.check_output(self.want, with_nl, self.flags): + return None + return DocTestMismatch(self, with_nl) + + def _describe_difference(self, with_nl): + return self._checker.output_difference(self, with_nl, self.flags) + + +class DocTestMismatch(Mismatch): + """Mismatch object for DocTestMatches.""" + + def __init__(self, matcher, with_nl): + self.matcher = matcher + self.with_nl = with_nl + + def describe(self): + s = self.matcher._describe_difference(self.with_nl) + if str_is_unicode or isinstance(s, unicode): + return s + # GZ 2011-08-24: This is actually pretty bogus, most C0 codes should + # be escaped, in addition to non-ascii bytes. + return s.decode("latin1").encode("ascii", "backslashreplace") diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py new file mode 100644 index 00000000000..1938f152b78 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py @@ -0,0 +1,126 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + +__all__ = [ + 'MatchesException', + 'Raises', + 'raises', + ] + +import sys + +from testtools.compat import ( + classtypes, + _error_repr, + isbaseexception, + istext, + ) +from ._basic import MatchesRegex +from ._higherorder import AfterPreproccessing +from ._impl import ( + Matcher, + Mismatch, + ) + + +class MatchesException(Matcher): + """Match an exc_info tuple against an exception instance or type.""" + + def __init__(self, exception, value_re=None): + """Create a MatchesException that will match exc_info's for exception. + + :param exception: Either an exception instance or type. + If an instance is given, the type and arguments of the exception + are checked. If a type is given only the type of the exception is + checked. If a tuple is given, then as with isinstance, any of the + types in the tuple matching is sufficient to match. + :param value_re: If 'exception' is a type, and the matchee exception + is of the right type, then match against this. If value_re is a + string, then assume value_re is a regular expression and match + the str() of the exception against it. Otherwise, assume value_re + is a matcher, and match the exception against it. + """ + Matcher.__init__(self) + self.expected = exception + if istext(value_re): + value_re = AfterPreproccessing(str, MatchesRegex(value_re), False) + self.value_re = value_re + expected_type = type(self.expected) + self._is_instance = not any(issubclass(expected_type, class_type) + for class_type in classtypes() + (tuple,)) + + def match(self, other): + if type(other) != tuple: + return Mismatch('%r is not an exc_info tuple' % other) + expected_class = self.expected + if self._is_instance: + expected_class = expected_class.__class__ + if not issubclass(other[0], expected_class): + return Mismatch('%r is not a %r' % (other[0], expected_class)) + if self._is_instance: + if other[1].args != self.expected.args: + return Mismatch('%s has different arguments to %s.' % ( + _error_repr(other[1]), _error_repr(self.expected))) + elif self.value_re is not None: + return self.value_re.match(other[1]) + + def __str__(self): + if self._is_instance: + return "MatchesException(%s)" % _error_repr(self.expected) + return "MatchesException(%s)" % repr(self.expected) + + +class Raises(Matcher): + """Match if the matchee raises an exception when called. + + Exceptions which are not subclasses of Exception propogate out of the + Raises.match call unless they are explicitly matched. + """ + + def __init__(self, exception_matcher=None): + """Create a Raises matcher. + + :param exception_matcher: Optional validator for the exception raised + by matchee. If supplied the exc_info tuple for the exception raised + is passed into that matcher. If no exception_matcher is supplied + then the simple fact of raising an exception is considered enough + to match on. + """ + self.exception_matcher = exception_matcher + + def match(self, matchee): + try: + result = matchee() + return Mismatch('%r returned %r' % (matchee, result)) + # Catch all exceptions: Raises() should be able to match a + # KeyboardInterrupt or SystemExit. + except: + exc_info = sys.exc_info() + if self.exception_matcher: + mismatch = self.exception_matcher.match(exc_info) + if not mismatch: + del exc_info + return + else: + mismatch = None + # The exception did not match, or no explicit matching logic was + # performed. If the exception is a non-user exception (that is, not + # a subclass of Exception on Python 2.5+) then propogate it. + if isbaseexception(exc_info[1]): + del exc_info + raise + return mismatch + + def __str__(self): + return 'Raises()' + + +def raises(exception): + """Make a matcher that checks that a callable raises an exception. + + This is a convenience function, exactly equivalent to:: + + return Raises(MatchesException(exception)) + + See `Raises` and `MatchesException` for more information. + """ + return Raises(MatchesException(exception)) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_filesystem.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_filesystem.py new file mode 100644 index 00000000000..54f749b1359 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_filesystem.py @@ -0,0 +1,192 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + +"""Matchers for things related to the filesystem.""" + +__all__ = [ + 'FileContains', + 'DirExists', + 'FileExists', + 'HasPermissions', + 'PathExists', + 'SamePath', + 'TarballContains', + ] + +import os +import tarfile + +from ._basic import Equals +from ._higherorder import ( + MatchesAll, + MatchesPredicate, + ) +from ._impl import ( + Matcher, + ) + + +def PathExists(): + """Matches if the given path exists. + + Use like this:: + + assertThat('/some/path', PathExists()) + """ + return MatchesPredicate(os.path.exists, "%s does not exist.") + + +def DirExists(): + """Matches if the path exists and is a directory.""" + return MatchesAll( + PathExists(), + MatchesPredicate(os.path.isdir, "%s is not a directory."), + first_only=True) + + +def FileExists(): + """Matches if the given path exists and is a file.""" + return MatchesAll( + PathExists(), + MatchesPredicate(os.path.isfile, "%s is not a file."), + first_only=True) + + +class DirContains(Matcher): + """Matches if the given directory contains files with the given names. + + That is, is the directory listing exactly equal to the given files? + """ + + def __init__(self, filenames=None, matcher=None): + """Construct a ``DirContains`` matcher. + + Can be used in a basic mode where the whole directory listing is + matched against an expected directory listing (by passing + ``filenames``). Can also be used in a more advanced way where the + whole directory listing is matched against an arbitrary matcher (by + passing ``matcher`` instead). + + :param filenames: If specified, match the sorted directory listing + against this list of filenames, sorted. + :param matcher: If specified, match the sorted directory listing + against this matcher. + """ + if filenames == matcher == None: + raise AssertionError( + "Must provide one of `filenames` or `matcher`.") + if None not in (filenames, matcher): + raise AssertionError( + "Must provide either `filenames` or `matcher`, not both.") + if filenames is None: + self.matcher = matcher + else: + self.matcher = Equals(sorted(filenames)) + + def match(self, path): + mismatch = DirExists().match(path) + if mismatch is not None: + return mismatch + return self.matcher.match(sorted(os.listdir(path))) + + +class FileContains(Matcher): + """Matches if the given file has the specified contents.""" + + def __init__(self, contents=None, matcher=None): + """Construct a ``FileContains`` matcher. + + Can be used in a basic mode where the file contents are compared for + equality against the expected file contents (by passing ``contents``). + Can also be used in a more advanced way where the file contents are + matched against an arbitrary matcher (by passing ``matcher`` instead). + + :param contents: If specified, match the contents of the file with + these contents. + :param matcher: If specified, match the contents of the file against + this matcher. + """ + if contents == matcher == None: + raise AssertionError( + "Must provide one of `contents` or `matcher`.") + if None not in (contents, matcher): + raise AssertionError( + "Must provide either `contents` or `matcher`, not both.") + if matcher is None: + self.matcher = Equals(contents) + else: + self.matcher = matcher + + def match(self, path): + mismatch = PathExists().match(path) + if mismatch is not None: + return mismatch + f = open(path) + try: + actual_contents = f.read() + return self.matcher.match(actual_contents) + finally: + f.close() + + def __str__(self): + return "File at path exists and contains %s" % self.contents + + +class HasPermissions(Matcher): + """Matches if a file has the given permissions. + + Permissions are specified and matched as a four-digit octal string. + """ + + def __init__(self, octal_permissions): + """Construct a HasPermissions matcher. + + :param octal_permissions: A four digit octal string, representing the + intended access permissions. e.g. '0775' for rwxrwxr-x. + """ + super(HasPermissions, self).__init__() + self.octal_permissions = octal_permissions + + def match(self, filename): + permissions = oct(os.stat(filename).st_mode)[-4:] + return Equals(self.octal_permissions).match(permissions) + + +class SamePath(Matcher): + """Matches if two paths are the same. + + That is, the paths are equal, or they point to the same file but in + different ways. The paths do not have to exist. + """ + + def __init__(self, path): + super(SamePath, self).__init__() + self.path = path + + def match(self, other_path): + f = lambda x: os.path.abspath(os.path.realpath(x)) + return Equals(f(self.path)).match(f(other_path)) + + +class TarballContains(Matcher): + """Matches if the given tarball contains the given paths. + + Uses TarFile.getnames() to get the paths out of the tarball. + """ + + def __init__(self, paths): + super(TarballContains, self).__init__() + self.paths = paths + self.path_matcher = Equals(sorted(self.paths)) + + def match(self, tarball_path): + # Open underlying file first to ensure it's always closed: + # <http://bugs.python.org/issue10233> + f = open(tarball_path, "rb") + try: + tarball = tarfile.open(tarball_path, fileobj=f) + try: + return self.path_matcher.match(sorted(tarball.getnames())) + finally: + tarball.close() + finally: + f.close() diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_higherorder.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_higherorder.py new file mode 100644 index 00000000000..3570f573747 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_higherorder.py @@ -0,0 +1,368 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + +__all__ = [ + 'AfterPreprocessing', + 'AllMatch', + 'Annotate', + 'AnyMatch', + 'MatchesAny', + 'MatchesAll', + 'Not', + ] + +import types + +from ._impl import ( + Matcher, + Mismatch, + MismatchDecorator, + ) + + +class MatchesAny(object): + """Matches if any of the matchers it is created with match.""" + + def __init__(self, *matchers): + self.matchers = matchers + + def match(self, matchee): + results = [] + for matcher in self.matchers: + mismatch = matcher.match(matchee) + if mismatch is None: + return None + results.append(mismatch) + return MismatchesAll(results) + + def __str__(self): + return "MatchesAny(%s)" % ', '.join([ + str(matcher) for matcher in self.matchers]) + + +class MatchesAll(object): + """Matches if all of the matchers it is created with match.""" + + def __init__(self, *matchers, **options): + """Construct a MatchesAll matcher. + + Just list the component matchers as arguments in the ``*args`` + style. If you want only the first mismatch to be reported, past in + first_only=True as a keyword argument. By default, all mismatches are + reported. + """ + self.matchers = matchers + self.first_only = options.get('first_only', False) + + def __str__(self): + return 'MatchesAll(%s)' % ', '.join(map(str, self.matchers)) + + def match(self, matchee): + results = [] + for matcher in self.matchers: + mismatch = matcher.match(matchee) + if mismatch is not None: + if self.first_only: + return mismatch + results.append(mismatch) + if results: + return MismatchesAll(results) + else: + return None + + +class MismatchesAll(Mismatch): + """A mismatch with many child mismatches.""" + + def __init__(self, mismatches, wrap=True): + self.mismatches = mismatches + self._wrap = wrap + + def describe(self): + descriptions = [] + if self._wrap: + descriptions = ["Differences: ["] + for mismatch in self.mismatches: + descriptions.append(mismatch.describe()) + if self._wrap: + descriptions.append("]") + return '\n'.join(descriptions) + + +class Not(object): + """Inverts a matcher.""" + + def __init__(self, matcher): + self.matcher = matcher + + def __str__(self): + return 'Not(%s)' % (self.matcher,) + + def match(self, other): + mismatch = self.matcher.match(other) + if mismatch is None: + return MatchedUnexpectedly(self.matcher, other) + else: + return None + + +class MatchedUnexpectedly(Mismatch): + """A thing matched when it wasn't supposed to.""" + + def __init__(self, matcher, other): + self.matcher = matcher + self.other = other + + def describe(self): + return "%r matches %s" % (self.other, self.matcher) + + +class Annotate(object): + """Annotates a matcher with a descriptive string. + + Mismatches are then described as '<mismatch>: <annotation>'. + """ + + def __init__(self, annotation, matcher): + self.annotation = annotation + self.matcher = matcher + + @classmethod + def if_message(cls, annotation, matcher): + """Annotate ``matcher`` only if ``annotation`` is non-empty.""" + if not annotation: + return matcher + return cls(annotation, matcher) + + def __str__(self): + return 'Annotate(%r, %s)' % (self.annotation, self.matcher) + + def match(self, other): + mismatch = self.matcher.match(other) + if mismatch is not None: + return AnnotatedMismatch(self.annotation, mismatch) + + +class PostfixedMismatch(MismatchDecorator): + """A mismatch annotated with a descriptive string.""" + + def __init__(self, annotation, mismatch): + super(PostfixedMismatch, self).__init__(mismatch) + self.annotation = annotation + self.mismatch = mismatch + + def describe(self): + return '%s: %s' % (self.original.describe(), self.annotation) + + +AnnotatedMismatch = PostfixedMismatch + + +class PrefixedMismatch(MismatchDecorator): + + def __init__(self, prefix, mismatch): + super(PrefixedMismatch, self).__init__(mismatch) + self.prefix = prefix + + def describe(self): + return '%s: %s' % (self.prefix, self.original.describe()) + + +class AfterPreprocessing(object): + """Matches if the value matches after passing through a function. + + This can be used to aid in creating trivial matchers as functions, for + example:: + + def PathHasFileContent(content): + def _read(path): + return open(path).read() + return AfterPreprocessing(_read, Equals(content)) + """ + + def __init__(self, preprocessor, matcher, annotate=True): + """Create an AfterPreprocessing matcher. + + :param preprocessor: A function called with the matchee before + matching. + :param matcher: What to match the preprocessed matchee against. + :param annotate: Whether or not to annotate the matcher with + something explaining how we transformed the matchee. Defaults + to True. + """ + self.preprocessor = preprocessor + self.matcher = matcher + self.annotate = annotate + + def _str_preprocessor(self): + if isinstance(self.preprocessor, types.FunctionType): + return '<function %s>' % self.preprocessor.__name__ + return str(self.preprocessor) + + def __str__(self): + return "AfterPreprocessing(%s, %s)" % ( + self._str_preprocessor(), self.matcher) + + def match(self, value): + after = self.preprocessor(value) + if self.annotate: + matcher = Annotate( + "after %s on %r" % (self._str_preprocessor(), value), + self.matcher) + else: + matcher = self.matcher + return matcher.match(after) + + +# This is the old, deprecated. spelling of the name, kept for backwards +# compatibility. +AfterPreproccessing = AfterPreprocessing + + +class AllMatch(object): + """Matches if all provided values match the given matcher.""" + + def __init__(self, matcher): + self.matcher = matcher + + def __str__(self): + return 'AllMatch(%s)' % (self.matcher,) + + def match(self, values): + mismatches = [] + for value in values: + mismatch = self.matcher.match(value) + if mismatch: + mismatches.append(mismatch) + if mismatches: + return MismatchesAll(mismatches) + + +class AnyMatch(object): + """Matches if any of the provided values match the given matcher.""" + + def __init__(self, matcher): + self.matcher = matcher + + def __str__(self): + return 'AnyMatch(%s)' % (self.matcher,) + + def match(self, values): + mismatches = [] + for value in values: + mismatch = self.matcher.match(value) + if mismatch: + mismatches.append(mismatch) + else: + return None + return MismatchesAll(mismatches) + + +class MatchesPredicate(Matcher): + """Match if a given function returns True. + + It is reasonably common to want to make a very simple matcher based on a + function that you already have that returns True or False given a single + argument (i.e. a predicate function). This matcher makes it very easy to + do so. e.g.:: + + IsEven = MatchesPredicate(lambda x: x % 2 == 0, '%s is not even') + self.assertThat(4, IsEven) + """ + + def __init__(self, predicate, message): + """Create a ``MatchesPredicate`` matcher. + + :param predicate: A function that takes a single argument and returns + a value that will be interpreted as a boolean. + :param message: A message to describe a mismatch. It will be formatted + with '%' and be given whatever was passed to ``match()``. Thus, it + needs to contain exactly one thing like '%s', '%d' or '%f'. + """ + self.predicate = predicate + self.message = message + + def __str__(self): + return '%s(%r, %r)' % ( + self.__class__.__name__, self.predicate, self.message) + + def match(self, x): + if not self.predicate(x): + return Mismatch(self.message % x) + + +def MatchesPredicateWithParams(predicate, message, name=None): + """Match if a given parameterised function returns True. + + It is reasonably common to want to make a very simple matcher based on a + function that you already have that returns True or False given some + arguments. This matcher makes it very easy to do so. e.g.:: + + HasLength = MatchesPredicate( + lambda x, y: len(x) == y, 'len({0}) is not {1}') + # This assertion will fail, as 'len([1, 2]) == 3' is False. + self.assertThat([1, 2], HasLength(3)) + + Note that unlike MatchesPredicate MatchesPredicateWithParams returns a + factory which you then customise to use by constructing an actual matcher + from it. + + The predicate function should take the object to match as its first + parameter. Any additional parameters supplied when constructing a matcher + are supplied to the predicate as additional parameters when checking for a + match. + + :param predicate: The predicate function. + :param message: A format string for describing mis-matches. + :param name: Optional replacement name for the matcher. + """ + def construct_matcher(*args, **kwargs): + return _MatchesPredicateWithParams( + predicate, message, name, *args, **kwargs) + return construct_matcher + + +class _MatchesPredicateWithParams(Matcher): + + def __init__(self, predicate, message, name, *args, **kwargs): + """Create a ``MatchesPredicateWithParams`` matcher. + + :param predicate: A function that takes an object to match and + additional params as given in ``*args`` and ``**kwargs``. The + result of the function will be interpreted as a boolean to + determine a match. + :param message: A message to describe a mismatch. It will be formatted + with .format() and be given a tuple containing whatever was passed + to ``match()`` + ``*args`` in ``*args``, and whatever was passed to + ``**kwargs`` as its ``**kwargs``. + + For instance, to format a single parameter:: + + "{0} is not a {1}" + + To format a keyword arg:: + + "{0} is not a {type_to_check}" + :param name: What name to use for the matcher class. Pass None to use + the default. + """ + self.predicate = predicate + self.message = message + self.name = name + self.args = args + self.kwargs = kwargs + + def __str__(self): + args = [str(arg) for arg in self.args] + kwargs = ["%s=%s" % item for item in self.kwargs.items()] + args = ", ".join(args + kwargs) + if self.name is None: + name = 'MatchesPredicateWithParams(%r, %r)' % ( + self.predicate, self.message) + else: + name = self.name + return '%s(%s)' % (name, args) + + def match(self, x): + if not self.predicate(x, *self.args, **self.kwargs): + return Mismatch( + self.message.format(*((x,) + self.args), **self.kwargs)) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_impl.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_impl.py new file mode 100644 index 00000000000..36e5ee02218 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_impl.py @@ -0,0 +1,175 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + +"""Matchers, a way to express complex assertions outside the testcase. + +Inspired by 'hamcrest'. + +Matcher provides the abstract API that all matchers need to implement. + +Bundled matchers are listed in __all__: a list can be obtained by running +$ python -c 'import testtools.matchers; print testtools.matchers.__all__' +""" + +__all__ = [ + 'Matcher', + 'Mismatch', + 'MismatchDecorator', + 'MismatchError', + ] + +from testtools.compat import ( + _isbytes, + istext, + str_is_unicode, + text_repr + ) + + +class Matcher(object): + """A pattern matcher. + + A Matcher must implement match and __str__ to be used by + testtools.TestCase.assertThat. Matcher.match(thing) returns None when + thing is completely matched, and a Mismatch object otherwise. + + Matchers can be useful outside of test cases, as they are simply a + pattern matching language expressed as objects. + + testtools.matchers is inspired by hamcrest, but is pythonic rather than + a Java transcription. + """ + + def match(self, something): + """Return None if this matcher matches something, a Mismatch otherwise. + """ + raise NotImplementedError(self.match) + + def __str__(self): + """Get a sensible human representation of the matcher. + + This should include the parameters given to the matcher and any + state that would affect the matches operation. + """ + raise NotImplementedError(self.__str__) + + +class Mismatch(object): + """An object describing a mismatch detected by a Matcher.""" + + def __init__(self, description=None, details=None): + """Construct a `Mismatch`. + + :param description: A description to use. If not provided, + `Mismatch.describe` must be implemented. + :param details: Extra details about the mismatch. Defaults + to the empty dict. + """ + if description: + self._description = description + if details is None: + details = {} + self._details = details + + def describe(self): + """Describe the mismatch. + + This should be either a human-readable string or castable to a string. + In particular, is should either be plain ascii or unicode on Python 2, + and care should be taken to escape control characters. + """ + try: + return self._description + except AttributeError: + raise NotImplementedError(self.describe) + + def get_details(self): + """Get extra details about the mismatch. + + This allows the mismatch to provide extra information beyond the basic + description, including large text or binary files, or debugging internals + without having to force it to fit in the output of 'describe'. + + The testtools assertion assertThat will query get_details and attach + all its values to the test, permitting them to be reported in whatever + manner the test environment chooses. + + :return: a dict mapping names to Content objects. name is a string to + name the detail, and the Content object is the detail to add + to the result. For more information see the API to which items from + this dict are passed testtools.TestCase.addDetail. + """ + return getattr(self, '_details', {}) + + def __repr__(self): + return "<testtools.matchers.Mismatch object at %x attributes=%r>" % ( + id(self), self.__dict__) + + +class MismatchError(AssertionError): + """Raised when a mismatch occurs.""" + + # This class exists to work around + # <https://bugs.launchpad.net/testtools/+bug/804127>. It provides a + # guaranteed way of getting a readable exception, no matter what crazy + # characters are in the matchee, matcher or mismatch. + + def __init__(self, matchee, matcher, mismatch, verbose=False): + # Have to use old-style upcalling for Python 2.4 and 2.5 + # compatibility. + AssertionError.__init__(self) + self.matchee = matchee + self.matcher = matcher + self.mismatch = mismatch + self.verbose = verbose + + def __str__(self): + difference = self.mismatch.describe() + if self.verbose: + # GZ 2011-08-24: Smelly API? Better to take any object and special + # case text inside? + if istext(self.matchee) or _isbytes(self.matchee): + matchee = text_repr(self.matchee, multiline=False) + else: + matchee = repr(self.matchee) + return ( + 'Match failed. Matchee: %s\nMatcher: %s\nDifference: %s\n' + % (matchee, self.matcher, difference)) + else: + return difference + + if not str_is_unicode: + + __unicode__ = __str__ + + def __str__(self): + return self.__unicode__().encode("ascii", "backslashreplace") + + +class MismatchDecorator(object): + """Decorate a ``Mismatch``. + + Forwards all messages to the original mismatch object. Probably the best + way to use this is inherit from this class and then provide your own + custom decoration logic. + """ + + def __init__(self, original): + """Construct a `MismatchDecorator`. + + :param original: A `Mismatch` object to decorate. + """ + self.original = original + + def __repr__(self): + return '<testtools.matchers.MismatchDecorator(%r)>' % (self.original,) + + def describe(self): + return self.original.describe() + + def get_details(self): + return self.original.get_details() + + +# Signal that this is part of the testing framework, and that code from this +# should not normally appear in tracebacks. +__unittest = True diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/monkey.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/monkey.py new file mode 100644 index 00000000000..ba0ac8fd8bf --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/monkey.py @@ -0,0 +1,97 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Helpers for monkey-patching Python code.""" + +__all__ = [ + 'MonkeyPatcher', + 'patch', + ] + + +class MonkeyPatcher(object): + """A set of monkey-patches that can be applied and removed all together. + + Use this to cover up attributes with new objects. Particularly useful for + testing difficult code. + """ + + # Marker used to indicate that the patched attribute did not exist on the + # object before we patched it. + _NO_SUCH_ATTRIBUTE = object() + + def __init__(self, *patches): + """Construct a `MonkeyPatcher`. + + :param patches: The patches to apply, each should be (obj, name, + new_value). Providing patches here is equivalent to calling + `add_patch`. + """ + # List of patches to apply in (obj, name, value). + self._patches_to_apply = [] + # List of the original values for things that have been patched. + # (obj, name, value) format. + self._originals = [] + for patch in patches: + self.add_patch(*patch) + + def add_patch(self, obj, name, value): + """Add a patch to overwrite 'name' on 'obj' with 'value'. + + The attribute C{name} on C{obj} will be assigned to C{value} when + C{patch} is called or during C{run_with_patches}. + + You can restore the original values with a call to restore(). + """ + self._patches_to_apply.append((obj, name, value)) + + def patch(self): + """Apply all of the patches that have been specified with `add_patch`. + + Reverse this operation using L{restore}. + """ + for obj, name, value in self._patches_to_apply: + original_value = getattr(obj, name, self._NO_SUCH_ATTRIBUTE) + self._originals.append((obj, name, original_value)) + setattr(obj, name, value) + + def restore(self): + """Restore all original values to any patched objects. + + If the patched attribute did not exist on an object before it was + patched, `restore` will delete the attribute so as to return the + object to its original state. + """ + while self._originals: + obj, name, value = self._originals.pop() + if value is self._NO_SUCH_ATTRIBUTE: + delattr(obj, name) + else: + setattr(obj, name, value) + + def run_with_patches(self, f, *args, **kw): + """Run 'f' with the given args and kwargs with all patches applied. + + Restores all objects to their original state when finished. + """ + self.patch() + try: + return f(*args, **kw) + finally: + self.restore() + + +def patch(obj, attribute, value): + """Set 'obj.attribute' to 'value' and return a callable to restore 'obj'. + + If 'attribute' is not set on 'obj' already, then the returned callable + will delete the attribute when called. + + :param obj: An object to monkey-patch. + :param attribute: The name of the attribute to patch. + :param value: The value to set 'obj.attribute' to. + :return: A nullary callable that, when run, will restore 'obj' to its + original state. + """ + patcher = MonkeyPatcher((obj, attribute, value)) + patcher.patch() + return patcher.restore diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/run.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/run.py new file mode 100755 index 00000000000..466da76a7d4 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/run.py @@ -0,0 +1,399 @@ +# Copyright (c) 2009 testtools developers. See LICENSE for details. + +"""python -m testtools.run testspec [testspec...] + +Run some tests with the testtools extended API. + +For instance, to run the testtools test suite. + $ python -m testtools.run testtools.tests.test_suite +""" + +from functools import partial +import os +import unittest +import sys + +from extras import safe_hasattr + +from testtools import TextTestResult +from testtools.compat import classtypes, istext, unicode_output_stream +from testtools.testsuite import filter_by_ids, iterate_tests, sorted_tests + + +defaultTestLoader = unittest.defaultTestLoader +defaultTestLoaderCls = unittest.TestLoader + +if getattr(defaultTestLoader, 'discover', None) is None: + try: + import discover + defaultTestLoader = discover.DiscoveringTestLoader() + defaultTestLoaderCls = discover.DiscoveringTestLoader + have_discover = True + except ImportError: + have_discover = False +else: + have_discover = True + + +def list_test(test): + """Return the test ids that would be run if test() was run. + + When things fail to import they can be represented as well, though + we use an ugly hack (see http://bugs.python.org/issue19746 for details) + to determine that. The difference matters because if a user is + filtering tests to run on the returned ids, a failed import can reduce + the visible tests but it can be impossible to tell that the selected + test would have been one of the imported ones. + + :return: A tuple of test ids that would run and error strings + describing things that failed to import. + """ + unittest_import_str = 'unittest.loader.ModuleImportFailure.' + test_ids = [] + errors = [] + for test in iterate_tests(test): + # to this ugly. + if test.id().startswith(unittest_import_str): + errors.append(test.id()[len(unittest_import_str):]) + else: + test_ids.append(test.id()) + return test_ids, errors + + +class TestToolsTestRunner(object): + """ A thunk object to support unittest.TestProgram.""" + + def __init__(self, verbosity=None, failfast=None, buffer=None, + stdout=None): + """Create a TestToolsTestRunner. + + :param verbosity: Ignored. + :param failfast: Stop running tests at the first failure. + :param buffer: Ignored. + :param stdout: Stream to use for stdout. + """ + self.failfast = failfast + self.stdout = stdout + + def list(self, test): + """List the tests that would be run if test() was run.""" + test_ids, errors = list_test(test) + for test_id in test_ids: + self.stdout.write('%s\n' % test_id) + if errors: + self.stdout.write('Failed to import\n') + for test_id in errors: + self.stdout.write('%s\n' % test_id) + sys.exit(2) + + def run(self, test): + "Run the given test case or test suite." + result = TextTestResult( + unicode_output_stream(sys.stdout), failfast=self.failfast) + result.startTestRun() + try: + return test.run(result) + finally: + result.stopTestRun() + + +#################### +# Taken from python 2.7 and slightly modified for compatibility with +# older versions. Delete when 2.7 is the oldest supported version. +# Modifications: +# - Use have_discover to raise an error if the user tries to use +# discovery on an old version and doesn't have discover installed. +# - If --catch is given check that installHandler is available, as +# it won't be on old python versions. +# - print calls have been been made single-source python3 compatibile. +# - exception handling likewise. +# - The default help has been changed to USAGE_AS_MAIN and USAGE_FROM_MODULE +# removed. +# - A tweak has been added to detect 'python -m *.run' and use a +# better progName in that case. +# - self.module is more comprehensively set to None when being invoked from +# the commandline - __name__ is used as a sentinel value. +# - --list has been added which can list tests (should be upstreamed). +# - --load-list has been added which can reduce the tests used (should be +# upstreamed). +# - The limitation of using getopt is declared to the user. +# - http://bugs.python.org/issue16709 is worked around, by sorting tests when +# discover is used. + +FAILFAST = " -f, --failfast Stop on first failure\n" +CATCHBREAK = " -c, --catch Catch control-C and display results\n" +BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n" + +USAGE_AS_MAIN = """\ +Usage: %(progName)s [options] [tests] + +Options: + -h, --help Show this message + -v, --verbose Verbose output + -q, --quiet Minimal output + -l, --list List tests rather than executing them. + --load-list Specifies a file containing test ids, only tests matching + those ids are executed. +%(failfast)s%(catchbreak)s%(buffer)s +Examples: + %(progName)s test_module - run tests from test_module + %(progName)s module.TestClass - run tests from module.TestClass + %(progName)s module.Class.test_method - run specified test method + +All options must come before [tests]. [tests] can be a list of any number of +test modules, classes and test methods. + +Alternative Usage: %(progName)s discover [options] + +Options: + -v, --verbose Verbose output +%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default) + -p pattern Pattern to match test files ('test*.py' default) + -t directory Top level directory of project (default to + start directory) + -l, --list List tests rather than executing them. + --load-list Specifies a file containing test ids, only tests matching + those ids are executed. + +For test discovery all test modules must be importable from the top +level directory of the project. +""" + + +class TestProgram(object): + """A command-line program that runs a set of tests; this is primarily + for making test modules conveniently executable. + """ + USAGE = USAGE_AS_MAIN + + # defaults for testing + failfast = catchbreak = buffer = progName = None + + def __init__(self, module=__name__, defaultTest=None, argv=None, + testRunner=None, testLoader=defaultTestLoader, + exit=True, verbosity=1, failfast=None, catchbreak=None, + buffer=None, stdout=None): + if module == __name__: + self.module = None + elif istext(module): + self.module = __import__(module) + for part in module.split('.')[1:]: + self.module = getattr(self.module, part) + else: + self.module = module + if argv is None: + argv = sys.argv + if stdout is None: + stdout = sys.stdout + + self.exit = exit + self.failfast = failfast + self.catchbreak = catchbreak + self.verbosity = verbosity + self.buffer = buffer + self.defaultTest = defaultTest + self.listtests = False + self.load_list = None + self.testRunner = testRunner + self.testLoader = testLoader + progName = argv[0] + if progName.endswith('%srun.py' % os.path.sep): + elements = progName.split(os.path.sep) + progName = '%s.run' % elements[-2] + else: + progName = os.path.basename(argv[0]) + self.progName = progName + self.parseArgs(argv) + if self.load_list: + # TODO: preserve existing suites (like testresources does in + # OptimisingTestSuite.add, but with a standard protocol). + # This is needed because the load_tests hook allows arbitrary + # suites, even if that is rarely used. + source = open(self.load_list, 'rb') + try: + lines = source.readlines() + finally: + source.close() + test_ids = set(line.strip().decode('utf-8') for line in lines) + self.test = filter_by_ids(self.test, test_ids) + if not self.listtests: + self.runTests() + else: + runner = self._get_runner() + if safe_hasattr(runner, 'list'): + runner.list(self.test) + else: + for test in iterate_tests(self.test): + stdout.write('%s\n' % test.id()) + + def usageExit(self, msg=None): + if msg: + print(msg) + usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '', + 'buffer': ''} + if self.failfast != False: + usage['failfast'] = FAILFAST + if self.catchbreak != False: + usage['catchbreak'] = CATCHBREAK + if self.buffer != False: + usage['buffer'] = BUFFEROUTPUT + print(self.USAGE % usage) + sys.exit(2) + + def parseArgs(self, argv): + if len(argv) > 1 and argv[1].lower() == 'discover': + self._do_discovery(argv[2:]) + return + + import getopt + long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer', + 'list', 'load-list='] + try: + options, args = getopt.getopt(argv[1:], 'hHvqfcbl', long_opts) + for opt, value in options: + if opt in ('-h','-H','--help'): + self.usageExit() + if opt in ('-q','--quiet'): + self.verbosity = 0 + if opt in ('-v','--verbose'): + self.verbosity = 2 + if opt in ('-f','--failfast'): + if self.failfast is None: + self.failfast = True + # Should this raise an exception if -f is not valid? + if opt in ('-c','--catch'): + if self.catchbreak is None: + self.catchbreak = True + # Should this raise an exception if -c is not valid? + if opt in ('-b','--buffer'): + if self.buffer is None: + self.buffer = True + # Should this raise an exception if -b is not valid? + if opt in ('-l', '--list'): + self.listtests = True + if opt == '--load-list': + self.load_list = value + if len(args) == 0 and self.defaultTest is None: + # createTests will load tests from self.module + self.testNames = None + elif len(args) > 0: + self.testNames = args + else: + self.testNames = (self.defaultTest,) + self.createTests() + except getopt.error: + self.usageExit(sys.exc_info()[1]) + + def createTests(self): + if self.testNames is None: + self.test = self.testLoader.loadTestsFromModule(self.module) + else: + self.test = self.testLoader.loadTestsFromNames(self.testNames, + self.module) + + def _do_discovery(self, argv, Loader=defaultTestLoaderCls): + # handle command line args for test discovery + if not have_discover: + raise AssertionError("Unable to use discovery, must use python 2.7 " + "or greater, or install the discover package.") + self.progName = '%s discover' % self.progName + import optparse + parser = optparse.OptionParser() + parser.prog = self.progName + parser.add_option('-v', '--verbose', dest='verbose', default=False, + help='Verbose output', action='store_true') + if self.failfast != False: + parser.add_option('-f', '--failfast', dest='failfast', default=False, + help='Stop on first fail or error', + action='store_true') + if self.catchbreak != False: + parser.add_option('-c', '--catch', dest='catchbreak', default=False, + help='Catch ctrl-C and display results so far', + action='store_true') + if self.buffer != False: + parser.add_option('-b', '--buffer', dest='buffer', default=False, + help='Buffer stdout and stderr during tests', + action='store_true') + parser.add_option('-s', '--start-directory', dest='start', default='.', + help="Directory to start discovery ('.' default)") + parser.add_option('-p', '--pattern', dest='pattern', default='test*.py', + help="Pattern to match tests ('test*.py' default)") + parser.add_option('-t', '--top-level-directory', dest='top', default=None, + help='Top level directory of project (defaults to start directory)') + parser.add_option('-l', '--list', dest='listtests', default=False, action="store_true", + help='List tests rather than running them.') + parser.add_option('--load-list', dest='load_list', default=None, + help='Specify a filename containing the test ids to use.') + + options, args = parser.parse_args(argv) + if len(args) > 3: + self.usageExit() + + for name, value in zip(('start', 'pattern', 'top'), args): + setattr(options, name, value) + + # only set options from the parsing here + # if they weren't set explicitly in the constructor + if self.failfast is None: + self.failfast = options.failfast + if self.catchbreak is None: + self.catchbreak = options.catchbreak + if self.buffer is None: + self.buffer = options.buffer + self.listtests = options.listtests + self.load_list = options.load_list + + if options.verbose: + self.verbosity = 2 + + start_dir = options.start + pattern = options.pattern + top_level_dir = options.top + + loader = Loader() + # See http://bugs.python.org/issue16709 + # While sorting here is intrusive, its better than being random. + # Rules for the sort: + # - standard suites are flattened, and the resulting tests sorted by + # id. + # - non-standard suites are preserved as-is, and sorted into position + # by the first test found by iterating the suite. + # We do this by a DSU process: flatten and grab a key, sort, strip the + # keys. + loaded = loader.discover(start_dir, pattern, top_level_dir) + self.test = sorted_tests(loaded) + + def runTests(self): + if (self.catchbreak + and getattr(unittest, 'installHandler', None) is not None): + unittest.installHandler() + testRunner = self._get_runner() + self.result = testRunner.run(self.test) + if self.exit: + sys.exit(not self.result.wasSuccessful()) + + def _get_runner(self): + if self.testRunner is None: + self.testRunner = TestToolsTestRunner + try: + testRunner = self.testRunner(verbosity=self.verbosity, + failfast=self.failfast, + buffer=self.buffer) + except TypeError: + # didn't accept the verbosity, buffer or failfast arguments + try: + testRunner = self.testRunner() + except TypeError: + # it is assumed to be a TestRunner instance + testRunner = self.testRunner + return testRunner + + +################ + +def main(argv, stdout): + program = TestProgram(argv=argv, testRunner=partial(TestToolsTestRunner, stdout=stdout), + stdout=stdout) + +if __name__ == '__main__': + main(sys.argv, sys.stdout) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/runtest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/runtest.py new file mode 100644 index 00000000000..26ae387211b --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/runtest.py @@ -0,0 +1,212 @@ +# Copyright (c) 2009-2010 testtools developers. See LICENSE for details. + +"""Individual test case execution.""" + +__all__ = [ + 'MultipleExceptions', + 'RunTest', + ] + +import sys + +from testtools.testresult import ExtendedToOriginalDecorator + + +class MultipleExceptions(Exception): + """Represents many exceptions raised from some operation. + + :ivar args: The sys.exc_info() tuples for each exception. + """ + + +class RunTest(object): + """An object to run a test. + + RunTest objects are used to implement the internal logic involved in + running a test. TestCase.__init__ stores _RunTest as the class of RunTest + to execute. Passing the runTest= parameter to TestCase.__init__ allows a + different RunTest class to be used to execute the test. + + Subclassing or replacing RunTest can be useful to add functionality to the + way that tests are run in a given project. + + :ivar case: The test case that is to be run. + :ivar result: The result object a case is reporting to. + :ivar handlers: A list of (ExceptionClass, handler_function) for + exceptions that should be caught if raised from the user + code. Exceptions that are caught are checked against this list in + first to last order. There is a catch-all of 'Exception' at the end + of the list, so to add a new exception to the list, insert it at the + front (which ensures that it will be checked before any existing base + classes in the list. If you add multiple exceptions some of which are + subclasses of each other, add the most specific exceptions last (so + they come before their parent classes in the list). + :ivar exception_caught: An object returned when _run_user catches an + exception. + :ivar _exceptions: A list of caught exceptions, used to do the single + reporting of error/failure/skip etc. + """ + + def __init__(self, case, handlers=None): + """Create a RunTest to run a case. + + :param case: A testtools.TestCase test case object. + :param handlers: Exception handlers for this RunTest. These are stored + in self.handlers and can be modified later if needed. + """ + self.case = case + self.handlers = handlers or [] + self.exception_caught = object() + self._exceptions = [] + + def run(self, result=None): + """Run self.case reporting activity to result. + + :param result: Optional testtools.TestResult to report activity to. + :return: The result object the test was run against. + """ + if result is None: + actual_result = self.case.defaultTestResult() + actual_result.startTestRun() + else: + actual_result = result + try: + return self._run_one(actual_result) + finally: + if result is None: + actual_result.stopTestRun() + + def _run_one(self, result): + """Run one test reporting to result. + + :param result: A testtools.TestResult to report activity to. + This result object is decorated with an ExtendedToOriginalDecorator + to ensure that the latest TestResult API can be used with + confidence by client code. + :return: The result object the test was run against. + """ + return self._run_prepared_result(ExtendedToOriginalDecorator(result)) + + def _run_prepared_result(self, result): + """Run one test reporting to result. + + :param result: A testtools.TestResult to report activity to. + :return: The result object the test was run against. + """ + result.startTest(self.case) + self.result = result + try: + self._exceptions = [] + self._run_core() + if self._exceptions: + # One or more caught exceptions, now trigger the test's + # reporting method for just one. + e = self._exceptions.pop() + for exc_class, handler in self.handlers: + if isinstance(e, exc_class): + handler(self.case, self.result, e) + break + finally: + result.stopTest(self.case) + return result + + def _run_core(self): + """Run the user supplied test code.""" + if self.exception_caught == self._run_user(self.case._run_setup, + self.result): + # Don't run the test method if we failed getting here. + self._run_cleanups(self.result) + return + # Run everything from here on in. If any of the methods raise an + # exception we'll have failed. + failed = False + try: + if self.exception_caught == self._run_user( + self.case._run_test_method, self.result): + failed = True + finally: + try: + if self.exception_caught == self._run_user( + self.case._run_teardown, self.result): + failed = True + finally: + try: + if self.exception_caught == self._run_user( + self._run_cleanups, self.result): + failed = True + finally: + if getattr(self.case, 'force_failure', None): + self._run_user(_raise_force_fail_error) + failed = True + if not failed: + self.result.addSuccess(self.case, + details=self.case.getDetails()) + + def _run_cleanups(self, result): + """Run the cleanups that have been added with addCleanup. + + See the docstring for addCleanup for more information. + + :return: None if all cleanups ran without error, + ``exception_caught`` if there was an error. + """ + failing = False + while self.case._cleanups: + function, arguments, keywordArguments = self.case._cleanups.pop() + got_exception = self._run_user( + function, *arguments, **keywordArguments) + if got_exception == self.exception_caught: + failing = True + if failing: + return self.exception_caught + + def _run_user(self, fn, *args, **kwargs): + """Run a user supplied function. + + Exceptions are processed by `_got_user_exception`. + + :return: Either whatever 'fn' returns or ``exception_caught`` if + 'fn' raised an exception. + """ + try: + return fn(*args, **kwargs) + except KeyboardInterrupt: + raise + except: + return self._got_user_exception(sys.exc_info()) + + def _got_user_exception(self, exc_info, tb_label='traceback'): + """Called when user code raises an exception. + + If 'exc_info' is a `MultipleExceptions`, then we recurse into it + unpacking the errors that it's made up from. + + :param exc_info: A sys.exc_info() tuple for the user error. + :param tb_label: An optional string label for the error. If + not specified, will default to 'traceback'. + :return: 'exception_caught' if we catch one of the exceptions that + have handlers in 'handlers', otherwise raise the error. + """ + if exc_info[0] is MultipleExceptions: + for sub_exc_info in exc_info[1].args: + self._got_user_exception(sub_exc_info, tb_label) + return self.exception_caught + try: + e = exc_info[1] + self.case.onException(exc_info, tb_label=tb_label) + finally: + del exc_info + for exc_class, handler in self.handlers: + if isinstance(e, exc_class): + self._exceptions.append(e) + return self.exception_caught + raise e + + +def _raise_force_fail_error(): + raise AssertionError("Forced Test Failure") + + +# Signal that this is part of the testing framework, and that code from this +# should not normally appear in tracebacks. +__unittest = True diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tags.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tags.py new file mode 100644 index 00000000000..b55bd38667b --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tags.py @@ -0,0 +1,34 @@ +# Copyright (c) 2012 testtools developers. See LICENSE for details. + +"""Tag support.""" + + +class TagContext(object): + """A tag context.""" + + def __init__(self, parent=None): + """Create a new TagContext. + + :param parent: If provided, uses this as the parent context. Any tags + that are current on the parent at the time of construction are + current in this context. + """ + self.parent = parent + self._tags = set() + if parent: + self._tags.update(parent.get_current_tags()) + + def get_current_tags(self): + """Return any current tags.""" + return set(self._tags) + + def change_tags(self, new_tags, gone_tags): + """Change the tags on this context. + + :param new_tags: A set of tags to add to this context. + :param gone_tags: A set of tags to remove from this context. + :return: The tags now current on this context. + """ + self._tags.update(new_tags) + self._tags.difference_update(gone_tags) + return self.get_current_tags() diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testcase.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testcase.py new file mode 100644 index 00000000000..59ea2052a9a --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testcase.py @@ -0,0 +1,942 @@ +# Copyright (c) 2008-2011 testtools developers. See LICENSE for details. + +"""Test case related stuff.""" + +__metaclass__ = type +__all__ = [ + 'attr', + 'clone_test_with_new_id', + 'ExpectedException', + 'gather_details', + 'run_test_with', + 'skip', + 'skipIf', + 'skipUnless', + 'TestCase', + ] + +import copy +import itertools +import sys +import types +import unittest + +from extras import ( + safe_hasattr, + try_import, + ) + +from testtools import ( + content, + ) +from testtools.compat import ( + advance_iterator, + reraise, + ) +from testtools.matchers import ( + Annotate, + Contains, + Equals, + MatchesAll, + MatchesException, + MismatchError, + Is, + IsInstance, + Not, + Raises, + ) +from testtools.monkey import patch +from testtools.runtest import RunTest +from testtools.testresult import ( + ExtendedToOriginalDecorator, + TestResult, + ) + +wraps = try_import('functools.wraps') + +class TestSkipped(Exception): + """Raised within TestCase.run() when a test is skipped.""" +TestSkipped = try_import('unittest2.case.SkipTest', TestSkipped) +TestSkipped = try_import('unittest.case.SkipTest', TestSkipped) + + +class _UnexpectedSuccess(Exception): + """An unexpected success was raised. + + Note that this exception is private plumbing in testtools' testcase + module. + """ +_UnexpectedSuccess = try_import( + 'unittest2.case._UnexpectedSuccess', _UnexpectedSuccess) +_UnexpectedSuccess = try_import( + 'unittest.case._UnexpectedSuccess', _UnexpectedSuccess) + +class _ExpectedFailure(Exception): + """An expected failure occured. + + Note that this exception is private plumbing in testtools' testcase + module. + """ +_ExpectedFailure = try_import( + 'unittest2.case._ExpectedFailure', _ExpectedFailure) +_ExpectedFailure = try_import( + 'unittest.case._ExpectedFailure', _ExpectedFailure) + + +def run_test_with(test_runner, **kwargs): + """Decorate a test as using a specific ``RunTest``. + + e.g.:: + + @run_test_with(CustomRunner, timeout=42) + def test_foo(self): + self.assertTrue(True) + + The returned decorator works by setting an attribute on the decorated + function. `TestCase.__init__` looks for this attribute when deciding on a + ``RunTest`` factory. If you wish to use multiple decorators on a test + method, then you must either make this one the top-most decorator, or you + must write your decorators so that they update the wrapping function with + the attributes of the wrapped function. The latter is recommended style + anyway. ``functools.wraps``, ``functools.wrapper`` and + ``twisted.python.util.mergeFunctionMetadata`` can help you do this. + + :param test_runner: A ``RunTest`` factory that takes a test case and an + optional list of exception handlers. See ``RunTest``. + :param kwargs: Keyword arguments to pass on as extra arguments to + 'test_runner'. + :return: A decorator to be used for marking a test as needing a special + runner. + """ + def decorator(function): + # Set an attribute on 'function' which will inform TestCase how to + # make the runner. + function._run_test_with = ( + lambda case, handlers=None: + test_runner(case, handlers=handlers, **kwargs)) + return function + return decorator + + +def _copy_content(content_object): + """Make a copy of the given content object. + + The content within ``content_object`` is iterated and saved. This is + useful when the source of the content is volatile, a log file in a + temporary directory for example. + + :param content_object: A `content.Content` instance. + :return: A `content.Content` instance with the same mime-type as + ``content_object`` and a non-volatile copy of its content. + """ + content_bytes = list(content_object.iter_bytes()) + content_callback = lambda: content_bytes + return content.Content(content_object.content_type, content_callback) + + +def gather_details(source_dict, target_dict): + """Merge the details from ``source_dict`` into ``target_dict``. + + :param source_dict: A dictionary of details will be gathered. + :param target_dict: A dictionary into which details will be gathered. + """ + for name, content_object in source_dict.items(): + new_name = name + disambiguator = itertools.count(1) + while new_name in target_dict: + new_name = '%s-%d' % (name, advance_iterator(disambiguator)) + name = new_name + target_dict[name] = _copy_content(content_object) + + +class TestCase(unittest.TestCase): + """Extensions to the basic TestCase. + + :ivar exception_handlers: Exceptions to catch from setUp, runTest and + tearDown. This list is able to be modified at any time and consists of + (exception_class, handler(case, result, exception_value)) pairs. + :ivar force_failure: Force testtools.RunTest to fail the test after the + test has completed. + :cvar run_tests_with: A factory to make the ``RunTest`` to run tests with. + Defaults to ``RunTest``. The factory is expected to take a test case + and an optional list of exception handlers. + """ + + skipException = TestSkipped + + run_tests_with = RunTest + + def __init__(self, *args, **kwargs): + """Construct a TestCase. + + :param testMethod: The name of the method to run. + :keyword runTest: Optional class to use to execute the test. If not + supplied ``RunTest`` is used. The instance to be used is created + when run() is invoked, so will be fresh each time. Overrides + ``TestCase.run_tests_with`` if given. + """ + runTest = kwargs.pop('runTest', None) + super(TestCase, self).__init__(*args, **kwargs) + self._cleanups = [] + self._unique_id_gen = itertools.count(1) + # Generators to ensure unique traceback ids. Maps traceback label to + # iterators. + self._traceback_id_gens = {} + self.__setup_called = False + self.__teardown_called = False + # __details is lazy-initialized so that a constructed-but-not-run + # TestCase is safe to use with clone_test_with_new_id. + self.__details = None + test_method = self._get_test_method() + if runTest is None: + runTest = getattr( + test_method, '_run_test_with', self.run_tests_with) + self.__RunTest = runTest + self.__exception_handlers = [] + self.exception_handlers = [ + (self.skipException, self._report_skip), + (self.failureException, self._report_failure), + (_ExpectedFailure, self._report_expected_failure), + (_UnexpectedSuccess, self._report_unexpected_success), + (Exception, self._report_error), + ] + if sys.version_info < (2, 6): + # Catch old-style string exceptions with None as the instance + self.exception_handlers.append((type(None), self._report_error)) + + def __eq__(self, other): + eq = getattr(unittest.TestCase, '__eq__', None) + if eq is not None and not unittest.TestCase.__eq__(self, other): + return False + return self.__dict__ == other.__dict__ + + def __repr__(self): + # We add id to the repr because it makes testing testtools easier. + return "<%s id=0x%0x>" % (self.id(), id(self)) + + def addDetail(self, name, content_object): + """Add a detail to be reported with this test's outcome. + + For more details see pydoc testtools.TestResult. + + :param name: The name to give this detail. + :param content_object: The content object for this detail. See + testtools.content for more detail. + """ + if self.__details is None: + self.__details = {} + self.__details[name] = content_object + + def getDetails(self): + """Get the details dict that will be reported with this test's outcome. + + For more details see pydoc testtools.TestResult. + """ + if self.__details is None: + self.__details = {} + return self.__details + + def patch(self, obj, attribute, value): + """Monkey-patch 'obj.attribute' to 'value' while the test is running. + + If 'obj' has no attribute, then the monkey-patch will still go ahead, + and the attribute will be deleted instead of restored to its original + value. + + :param obj: The object to patch. Can be anything. + :param attribute: The attribute on 'obj' to patch. + :param value: The value to set 'obj.attribute' to. + """ + self.addCleanup(patch(obj, attribute, value)) + + def shortDescription(self): + return self.id() + + def skipTest(self, reason): + """Cause this test to be skipped. + + This raises self.skipException(reason). skipException is raised + to permit a skip to be triggered at any point (during setUp or the + testMethod itself). The run() method catches skipException and + translates that into a call to the result objects addSkip method. + + :param reason: The reason why the test is being skipped. This must + support being cast into a unicode string for reporting. + """ + raise self.skipException(reason) + + # skipTest is how python2.7 spells this. Sometime in the future + # This should be given a deprecation decorator - RBC 20100611. + skip = skipTest + + def _formatTypes(self, classOrIterable): + """Format a class or a bunch of classes for display in an error.""" + className = getattr(classOrIterable, '__name__', None) + if className is None: + className = ', '.join(klass.__name__ for klass in classOrIterable) + return className + + def addCleanup(self, function, *arguments, **keywordArguments): + """Add a cleanup function to be called after tearDown. + + Functions added with addCleanup will be called in reverse order of + adding after tearDown, or after setUp if setUp raises an exception. + + If a function added with addCleanup raises an exception, the error + will be recorded as a test error, and the next cleanup will then be + run. + + Cleanup functions are always called before a test finishes running, + even if setUp is aborted by an exception. + """ + self._cleanups.append((function, arguments, keywordArguments)) + + def addOnException(self, handler): + """Add a handler to be called when an exception occurs in test code. + + This handler cannot affect what result methods are called, and is + called before any outcome is called on the result object. An example + use for it is to add some diagnostic state to the test details dict + which is expensive to calculate and not interesting for reporting in + the success case. + + Handlers are called before the outcome (such as addFailure) that + the exception has caused. + + Handlers are called in first-added, first-called order, and if they + raise an exception, that will propogate out of the test running + machinery, halting test processing. As a result, do not call code that + may unreasonably fail. + """ + self.__exception_handlers.append(handler) + + def _add_reason(self, reason): + self.addDetail('reason', content.text_content(reason)) + + def assertEqual(self, expected, observed, message=''): + """Assert that 'expected' is equal to 'observed'. + + :param expected: The expected value. + :param observed: The observed value. + :param message: An optional message to include in the error. + """ + matcher = Equals(expected) + self.assertThat(observed, matcher, message) + + failUnlessEqual = assertEquals = assertEqual + + def assertIn(self, needle, haystack): + """Assert that needle is in haystack.""" + self.assertThat(haystack, Contains(needle)) + + def assertIsNone(self, observed, message=''): + """Assert that 'observed' is equal to None. + + :param observed: The observed value. + :param message: An optional message describing the error. + """ + matcher = Is(None) + self.assertThat(observed, matcher, message) + + def assertIsNotNone(self, observed, message=''): + """Assert that 'observed' is not equal to None. + + :param observed: The observed value. + :param message: An optional message describing the error. + """ + matcher = Not(Is(None)) + self.assertThat(observed, matcher, message) + + def assertIs(self, expected, observed, message=''): + """Assert that 'expected' is 'observed'. + + :param expected: The expected value. + :param observed: The observed value. + :param message: An optional message describing the error. + """ + matcher = Is(expected) + self.assertThat(observed, matcher, message) + + def assertIsNot(self, expected, observed, message=''): + """Assert that 'expected' is not 'observed'.""" + matcher = Not(Is(expected)) + self.assertThat(observed, matcher, message) + + def assertNotIn(self, needle, haystack): + """Assert that needle is not in haystack.""" + matcher = Not(Contains(needle)) + self.assertThat(haystack, matcher) + + def assertIsInstance(self, obj, klass, msg=None): + if isinstance(klass, tuple): + matcher = IsInstance(*klass) + else: + matcher = IsInstance(klass) + self.assertThat(obj, matcher, msg) + + def assertRaises(self, excClass, callableObj, *args, **kwargs): + """Fail unless an exception of class excClass is thrown + by callableObj when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + """ + class ReRaiseOtherTypes(object): + def match(self, matchee): + if not issubclass(matchee[0], excClass): + reraise(*matchee) + class CaptureMatchee(object): + def match(self, matchee): + self.matchee = matchee[1] + capture = CaptureMatchee() + matcher = Raises(MatchesAll(ReRaiseOtherTypes(), + MatchesException(excClass), capture)) + our_callable = Nullary(callableObj, *args, **kwargs) + self.assertThat(our_callable, matcher) + return capture.matchee + failUnlessRaises = assertRaises + + def assertThat(self, matchee, matcher, message='', verbose=False): + """Assert that matchee is matched by matcher. + + :param matchee: An object to match with matcher. + :param matcher: An object meeting the testtools.Matcher protocol. + :raises MismatchError: When matcher does not match thing. + """ + matcher = Annotate.if_message(message, matcher) + mismatch = matcher.match(matchee) + if not mismatch: + return + existing_details = self.getDetails() + for (name, content) in mismatch.get_details().items(): + self.addDetailUniqueName(name, content) + raise MismatchError(matchee, matcher, mismatch, verbose) + + def addDetailUniqueName(self, name, content_object): + """Add a detail to the test, but ensure it's name is unique. + + This method checks whether ``name`` conflicts with a detail that has + already been added to the test. If it does, it will modify ``name`` to + avoid the conflict. + + For more details see pydoc testtools.TestResult. + + :param name: The name to give this detail. + :param content_object: The content object for this detail. See + testtools.content for more detail. + """ + existing_details = self.getDetails() + full_name = name + suffix = 1 + while full_name in existing_details: + full_name = "%s-%d" % (name, suffix) + suffix += 1 + self.addDetail(full_name, content_object) + + def defaultTestResult(self): + return TestResult() + + def expectFailure(self, reason, predicate, *args, **kwargs): + """Check that a test fails in a particular way. + + If the test fails in the expected way, a KnownFailure is caused. If it + succeeds an UnexpectedSuccess is caused. + + The expected use of expectFailure is as a barrier at the point in a + test where the test would fail. For example: + >>> def test_foo(self): + >>> self.expectFailure("1 should be 0", self.assertNotEqual, 1, 0) + >>> self.assertEqual(1, 0) + + If in the future 1 were to equal 0, the expectFailure call can simply + be removed. This separation preserves the original intent of the test + while it is in the expectFailure mode. + """ + # TODO: implement with matchers. + self._add_reason(reason) + try: + predicate(*args, **kwargs) + except self.failureException: + # GZ 2010-08-12: Don't know how to avoid exc_info cycle as the new + # unittest _ExpectedFailure wants old traceback + exc_info = sys.exc_info() + try: + self._report_traceback(exc_info) + raise _ExpectedFailure(exc_info) + finally: + del exc_info + else: + raise _UnexpectedSuccess(reason) + + def getUniqueInteger(self): + """Get an integer unique to this test. + + Returns an integer that is guaranteed to be unique to this instance. + Use this when you need an arbitrary integer in your test, or as a + helper for custom anonymous factory methods. + """ + return advance_iterator(self._unique_id_gen) + + def getUniqueString(self, prefix=None): + """Get a string unique to this test. + + Returns a string that is guaranteed to be unique to this instance. Use + this when you need an arbitrary string in your test, or as a helper + for custom anonymous factory methods. + + :param prefix: The prefix of the string. If not provided, defaults + to the id of the tests. + :return: A bytestring of '<prefix>-<unique_int>'. + """ + if prefix is None: + prefix = self.id() + return '%s-%d' % (prefix, self.getUniqueInteger()) + + def onException(self, exc_info, tb_label='traceback'): + """Called when an exception propogates from test code. + + :seealso addOnException: + """ + if exc_info[0] not in [ + TestSkipped, _UnexpectedSuccess, _ExpectedFailure]: + self._report_traceback(exc_info, tb_label=tb_label) + for handler in self.__exception_handlers: + handler(exc_info) + + @staticmethod + def _report_error(self, result, err): + result.addError(self, details=self.getDetails()) + + @staticmethod + def _report_expected_failure(self, result, err): + result.addExpectedFailure(self, details=self.getDetails()) + + @staticmethod + def _report_failure(self, result, err): + result.addFailure(self, details=self.getDetails()) + + @staticmethod + def _report_skip(self, result, err): + if err.args: + reason = err.args[0] + else: + reason = "no reason given." + self._add_reason(reason) + result.addSkip(self, details=self.getDetails()) + + def _report_traceback(self, exc_info, tb_label='traceback'): + id_gen = self._traceback_id_gens.setdefault( + tb_label, itertools.count(0)) + while True: + tb_id = advance_iterator(id_gen) + if tb_id: + tb_label = '%s-%d' % (tb_label, tb_id) + if tb_label not in self.getDetails(): + break + self.addDetail(tb_label, content.TracebackContent(exc_info, self)) + + @staticmethod + def _report_unexpected_success(self, result, err): + result.addUnexpectedSuccess(self, details=self.getDetails()) + + def run(self, result=None): + return self.__RunTest(self, self.exception_handlers).run(result) + + def _run_setup(self, result): + """Run the setUp function for this test. + + :param result: A testtools.TestResult to report activity to. + :raises ValueError: If the base class setUp is not called, a + ValueError is raised. + """ + ret = self.setUp() + if not self.__setup_called: + raise ValueError( + "In File: %s\n" + "TestCase.setUp was not called. Have you upcalled all the " + "way up the hierarchy from your setUp? e.g. Call " + "super(%s, self).setUp() from your setUp()." + % (sys.modules[self.__class__.__module__].__file__, + self.__class__.__name__)) + return ret + + def _run_teardown(self, result): + """Run the tearDown function for this test. + + :param result: A testtools.TestResult to report activity to. + :raises ValueError: If the base class tearDown is not called, a + ValueError is raised. + """ + ret = self.tearDown() + if not self.__teardown_called: + raise ValueError( + "In File: %s\n" + "TestCase.tearDown was not called. Have you upcalled all the " + "way up the hierarchy from your tearDown? e.g. Call " + "super(%s, self).tearDown() from your tearDown()." + % (sys.modules[self.__class__.__module__].__file__, + self.__class__.__name__)) + return ret + + def _get_test_method(self): + absent_attr = object() + # Python 2.5+ + method_name = getattr(self, '_testMethodName', absent_attr) + if method_name is absent_attr: + # Python 2.4 + method_name = getattr(self, '_TestCase__testMethodName') + return getattr(self, method_name) + + def _run_test_method(self, result): + """Run the test method for this test. + + :param result: A testtools.TestResult to report activity to. + :return: None. + """ + return self._get_test_method()() + + def useFixture(self, fixture): + """Use fixture in a test case. + + The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called. + + :param fixture: The fixture to use. + :return: The fixture, after setting it up and scheduling a cleanup for + it. + """ + try: + fixture.setUp() + except: + gather_details(fixture.getDetails(), self.getDetails()) + raise + else: + self.addCleanup(fixture.cleanUp) + self.addCleanup( + gather_details, fixture.getDetails(), self.getDetails()) + return fixture + + def setUp(self): + super(TestCase, self).setUp() + self.__setup_called = True + + def tearDown(self): + super(TestCase, self).tearDown() + unittest.TestCase.tearDown(self) + self.__teardown_called = True + + +class PlaceHolder(object): + """A placeholder test. + + `PlaceHolder` implements much of the same interface as TestCase and is + particularly suitable for being added to TestResults. + """ + + failureException = None + + def __init__(self, test_id, short_description=None, details=None, + outcome='addSuccess', error=None, tags=None, timestamps=(None, None)): + """Construct a `PlaceHolder`. + + :param test_id: The id of the placeholder test. + :param short_description: The short description of the place holder + test. If not provided, the id will be used instead. + :param details: Outcome details as accepted by addSuccess etc. + :param outcome: The outcome to call. Defaults to 'addSuccess'. + :param tags: Tags to report for the test. + :param timestamps: A two-tuple of timestamps for the test start and + finish. Each timestamp may be None to indicate it is not known. + """ + self._test_id = test_id + self._short_description = short_description + self._details = details or {} + self._outcome = outcome + if error is not None: + self._details['traceback'] = content.TracebackContent(error, self) + tags = tags or frozenset() + self._tags = frozenset(tags) + self._timestamps = timestamps + + def __call__(self, result=None): + return self.run(result=result) + + def __repr__(self): + internal = [self._outcome, self._test_id, self._details] + if self._short_description is not None: + internal.append(self._short_description) + return "<%s.%s(%s)>" % ( + self.__class__.__module__, + self.__class__.__name__, + ", ".join(map(repr, internal))) + + def __str__(self): + return self.id() + + def countTestCases(self): + return 1 + + def debug(self): + pass + + def id(self): + return self._test_id + + def _result(self, result): + if result is None: + return TestResult() + else: + return ExtendedToOriginalDecorator(result) + + def run(self, result=None): + result = self._result(result) + if self._timestamps[0] is not None: + result.time(self._timestamps[0]) + result.tags(self._tags, set()) + result.startTest(self) + if self._timestamps[1] is not None: + result.time(self._timestamps[1]) + outcome = getattr(result, self._outcome) + outcome(self, details=self._details) + result.stopTest(self) + result.tags(set(), self._tags) + + def shortDescription(self): + if self._short_description is None: + return self.id() + else: + return self._short_description + + +def ErrorHolder(test_id, error, short_description=None, details=None): + """Construct an `ErrorHolder`. + + :param test_id: The id of the test. + :param error: The exc info tuple that will be used as the test's error. + This is inserted into the details as 'traceback' - any existing key + will be overridden. + :param short_description: An optional short description of the test. + :param details: Outcome details as accepted by addSuccess etc. + """ + return PlaceHolder(test_id, short_description=short_description, + details=details, outcome='addError', error=error) + + +def _clone_test_id_callback(test, callback): + """Copy a `TestCase`, and make it call callback for its id(). + + This is only expected to be used on tests that have been constructed but + not executed. + + :param test: A TestCase instance. + :param callback: A callable that takes no parameters and returns a string. + :return: A copy.copy of the test with id=callback. + """ + newTest = copy.copy(test) + newTest.id = callback + return newTest + + +def clone_test_with_new_id(test, new_id): + """Copy a `TestCase`, and give the copied test a new id. + + This is only expected to be used on tests that have been constructed but + not executed. + """ + return _clone_test_id_callback(test, lambda: new_id) + + +def attr(*args): + """Decorator for adding attributes to WithAttributes. + + :param args: The name of attributes to add. + :return: A callable that when applied to a WithAttributes will + alter its id to enumerate the added attributes. + """ + def decorate(fn): + if not safe_hasattr(fn, '__testtools_attrs'): + fn.__testtools_attrs = set() + fn.__testtools_attrs.update(args) + return fn + return decorate + + +class WithAttributes(object): + """A mix-in class for modifying test id by attributes. + + e.g. + >>> class MyTest(WithAttributes, TestCase): + ... @attr('foo') + ... def test_bar(self): + ... pass + >>> MyTest('test_bar').id() + testtools.testcase.MyTest/test_bar[foo] + """ + + def id(self): + orig = super(WithAttributes, self).id() + # Depends on testtools.TestCase._get_test_method, be nice to support + # plain unittest. + fn = self._get_test_method() + attributes = getattr(fn, '__testtools_attrs', None) + if not attributes: + return orig + return orig + '[' + ','.join(sorted(attributes)) + ']' + + +def skip(reason): + """A decorator to skip unit tests. + + This is just syntactic sugar so users don't have to change any of their + unit tests in order to migrate to python 2.7, which provides the + @unittest.skip decorator. + """ + def decorator(test_item): + if wraps is not None: + @wraps(test_item) + def skip_wrapper(*args, **kwargs): + raise TestCase.skipException(reason) + else: + def skip_wrapper(test_item): + test_item.skip(reason) + return skip_wrapper + return decorator + + +def skipIf(condition, reason): + """A decorator to skip a test if the condition is true.""" + if condition: + return skip(reason) + def _id(obj): + return obj + return _id + + +def skipUnless(condition, reason): + """A decorator to skip a test unless the condition is true.""" + if not condition: + return skip(reason) + def _id(obj): + return obj + return _id + + +class ExpectedException: + """A context manager to handle expected exceptions. + + In Python 2.5 or later:: + + def test_foo(self): + with ExpectedException(ValueError, 'fo.*'): + raise ValueError('foo') + + will pass. If the raised exception has a type other than the specified + type, it will be re-raised. If it has a 'str()' that does not match the + given regular expression, an AssertionError will be raised. If no + exception is raised, an AssertionError will be raised. + """ + + def __init__(self, exc_type, value_re=None, msg=None): + """Construct an `ExpectedException`. + + :param exc_type: The type of exception to expect. + :param value_re: A regular expression to match against the + 'str()' of the raised exception. + :param msg: An optional message explaining the failure. + """ + self.exc_type = exc_type + self.value_re = value_re + self.msg = msg + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, traceback): + if exc_type is None: + error_msg = '%s not raised.' % self.exc_type.__name__ + if self.msg: + error_msg = error_msg + ' : ' + self.msg + raise AssertionError(error_msg) + if exc_type != self.exc_type: + return False + if self.value_re: + matcher = MatchesException(self.exc_type, self.value_re) + if self.msg: + matcher = Annotate(self.msg, matcher) + mismatch = matcher.match((exc_type, exc_value, traceback)) + if mismatch: + raise AssertionError(mismatch.describe()) + return True + + +class Nullary(object): + """Turn a callable into a nullary callable. + + The advantage of this over ``lambda: f(*args, **kwargs)`` is that it + preserves the ``repr()`` of ``f``. + """ + + def __init__(self, callable_object, *args, **kwargs): + self._callable_object = callable_object + self._args = args + self._kwargs = kwargs + + def __call__(self): + return self._callable_object(*self._args, **self._kwargs) + + def __repr__(self): + return repr(self._callable_object) + + +class DecorateTestCaseResult(object): + """Decorate a TestCase and permit customisation of the result for runs.""" + + def __init__(self, case, callout, before_run=None, after_run=None): + """Construct a DecorateTestCaseResult. + + :param case: The case to decorate. + :param callout: A callback to call when run/__call__/debug is called. + Must take a result parameter and return a result object to be used. + For instance: lambda result: result. + :param before_run: If set, call this with the decorated result before + calling into the decorated run/__call__ method. + :param before_run: If set, call this with the decorated result after + calling into the decorated run/__call__ method. + """ + self.decorated = case + self.callout = callout + self.before_run = before_run + self.after_run = after_run + + def _run(self, result, run_method): + result = self.callout(result) + if self.before_run: + self.before_run(result) + try: + return run_method(result) + finally: + if self.after_run: + self.after_run(result) + + def run(self, result=None): + self._run(result, self.decorated.run) + + def __call__(self, result=None): + self._run(result, self.decorated) + + def __getattr__(self, name): + return getattr(self.decorated, name) + + def __delattr__(self, name): + delattr(self.decorated, name) + + def __setattr__(self, name, value): + if name in ('decorated', 'callout', 'before_run', 'after_run'): + self.__dict__[name] = value + return + setattr(self.decorated, name, value) + + +# Signal that this is part of the testing framework, and that code from this +# should not normally appear in tracebacks. +__unittest = True diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/__init__.py new file mode 100644 index 00000000000..5bf8f9c673c --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/__init__.py @@ -0,0 +1,49 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + +"""Test result objects.""" + +__all__ = [ + 'CopyStreamResult', + 'ExtendedToOriginalDecorator', + 'ExtendedToStreamDecorator', + 'MultiTestResult', + 'StreamFailFast', + 'StreamResult', + 'StreamResultRouter', + 'StreamSummary', + 'StreamTagger', + 'StreamToDict', + 'StreamToExtendedDecorator', + 'StreamToQueue', + 'Tagger', + 'TestByTestResult', + 'TestControl', + 'TestResult', + 'TestResultDecorator', + 'TextTestResult', + 'ThreadsafeForwardingResult', + 'TimestampingStreamResult', + ] + +from testtools.testresult.real import ( + CopyStreamResult, + ExtendedToOriginalDecorator, + ExtendedToStreamDecorator, + MultiTestResult, + StreamFailFast, + StreamResult, + StreamResultRouter, + StreamSummary, + StreamTagger, + StreamToDict, + StreamToExtendedDecorator, + StreamToQueue, + Tagger, + TestByTestResult, + TestControl, + TestResult, + TestResultDecorator, + TextTestResult, + ThreadsafeForwardingResult, + TimestampingStreamResult, + ) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/doubles.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/doubles.py new file mode 100644 index 00000000000..d86f7fae2c1 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/doubles.py @@ -0,0 +1,174 @@ +# Copyright (c) 2009-2010 testtools developers. See LICENSE for details. + +"""Doubles of test result objects, useful for testing unittest code.""" + +__all__ = [ + 'Python26TestResult', + 'Python27TestResult', + 'ExtendedTestResult', + 'StreamResult', + ] + + +from testtools.tags import TagContext + + +class LoggingBase(object): + """Basic support for logging of results.""" + + def __init__(self): + self._events = [] + self.shouldStop = False + self._was_successful = True + self.testsRun = 0 + + +class Python26TestResult(LoggingBase): + """A precisely python 2.6 like test result, that logs.""" + + def addError(self, test, err): + self._was_successful = False + self._events.append(('addError', test, err)) + + def addFailure(self, test, err): + self._was_successful = False + self._events.append(('addFailure', test, err)) + + def addSuccess(self, test): + self._events.append(('addSuccess', test)) + + def startTest(self, test): + self._events.append(('startTest', test)) + self.testsRun += 1 + + def stop(self): + self.shouldStop = True + + def stopTest(self, test): + self._events.append(('stopTest', test)) + + def wasSuccessful(self): + return self._was_successful + + +class Python27TestResult(Python26TestResult): + """A precisely python 2.7 like test result, that logs.""" + + def __init__(self): + super(Python27TestResult, self).__init__() + self.failfast = False + + def addError(self, test, err): + super(Python27TestResult, self).addError(test, err) + if self.failfast: + self.stop() + + def addFailure(self, test, err): + super(Python27TestResult, self).addFailure(test, err) + if self.failfast: + self.stop() + + def addExpectedFailure(self, test, err): + self._events.append(('addExpectedFailure', test, err)) + + def addSkip(self, test, reason): + self._events.append(('addSkip', test, reason)) + + def addUnexpectedSuccess(self, test): + self._events.append(('addUnexpectedSuccess', test)) + if self.failfast: + self.stop() + + def startTestRun(self): + self._events.append(('startTestRun',)) + + def stopTestRun(self): + self._events.append(('stopTestRun',)) + + +class ExtendedTestResult(Python27TestResult): + """A test result like the proposed extended unittest result API.""" + + def __init__(self): + super(ExtendedTestResult, self).__init__() + self._tags = TagContext() + + def addError(self, test, err=None, details=None): + self._was_successful = False + self._events.append(('addError', test, err or details)) + + def addFailure(self, test, err=None, details=None): + self._was_successful = False + self._events.append(('addFailure', test, err or details)) + + def addExpectedFailure(self, test, err=None, details=None): + self._events.append(('addExpectedFailure', test, err or details)) + + def addSkip(self, test, reason=None, details=None): + self._events.append(('addSkip', test, reason or details)) + + def addSuccess(self, test, details=None): + if details: + self._events.append(('addSuccess', test, details)) + else: + self._events.append(('addSuccess', test)) + + def addUnexpectedSuccess(self, test, details=None): + self._was_successful = False + if details is not None: + self._events.append(('addUnexpectedSuccess', test, details)) + else: + self._events.append(('addUnexpectedSuccess', test)) + + def progress(self, offset, whence): + self._events.append(('progress', offset, whence)) + + def startTestRun(self): + super(ExtendedTestResult, self).startTestRun() + self._was_successful = True + self._tags = TagContext() + + def startTest(self, test): + super(ExtendedTestResult, self).startTest(test) + self._tags = TagContext(self._tags) + + def stopTest(self, test): + self._tags = self._tags.parent + super(ExtendedTestResult, self).stopTest(test) + + @property + def current_tags(self): + return self._tags.get_current_tags() + + def tags(self, new_tags, gone_tags): + self._tags.change_tags(new_tags, gone_tags) + self._events.append(('tags', new_tags, gone_tags)) + + def time(self, time): + self._events.append(('time', time)) + + def wasSuccessful(self): + return self._was_successful + + +class StreamResult(object): + """A StreamResult implementation for testing. + + All events are logged to _events. + """ + + def __init__(self): + self._events = [] + + def startTestRun(self): + self._events.append(('startTestRun',)) + + def stopTestRun(self): + self._events.append(('stopTestRun',)) + + def status(self, test_id=None, test_status=None, test_tags=None, + runnable=True, file_name=None, file_bytes=None, eof=False, + mime_type=None, route_code=None, timestamp=None): + self._events.append(('status', test_id, test_status, test_tags, + runnable, file_name, file_bytes, eof, mime_type, route_code, + timestamp)) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/real.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/real.py new file mode 100644 index 00000000000..e8d70b399d7 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/real.py @@ -0,0 +1,1776 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +"""Test results and related things.""" + +__metaclass__ = type +__all__ = [ + 'ExtendedToOriginalDecorator', + 'ExtendedToStreamDecorator', + 'MultiTestResult', + 'StreamFailFast', + 'StreamResult', + 'StreamSummary', + 'StreamTagger', + 'StreamToDict', + 'StreamToExtendedDecorator', + 'StreamToQueue', + 'Tagger', + 'TestControl', + 'TestResult', + 'TestResultDecorator', + 'ThreadsafeForwardingResult', + 'TimestampingStreamResult', + ] + +import datetime +from operator import methodcaller +import sys +import unittest + +from extras import safe_hasattr, try_import, try_imports +parse_mime_type = try_import('mimeparse.parse_mime_type') +Queue = try_imports(['Queue.Queue', 'queue.Queue']) + +from testtools.compat import all, str_is_unicode, _u, _b +from testtools.content import ( + Content, + text_content, + TracebackContent, + ) +from testtools.content_type import ContentType +from testtools.tags import TagContext +# circular import +# from testtools.testcase import PlaceHolder +PlaceHolder = None + +# From http://docs.python.org/library/datetime.html +_ZERO = datetime.timedelta(0) + +# A UTC class. + +class UTC(datetime.tzinfo): + """UTC""" + + def utcoffset(self, dt): + return _ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return _ZERO + +utc = UTC() + + +class TestResult(unittest.TestResult): + """Subclass of unittest.TestResult extending the protocol for flexability. + + This test result supports an experimental protocol for providing additional + data to in test outcomes. All the outcome methods take an optional dict + 'details'. If supplied any other detail parameters like 'err' or 'reason' + should not be provided. The details dict is a mapping from names to + MIME content objects (see testtools.content). This permits attaching + tracebacks, log files, or even large objects like databases that were + part of the test fixture. Until this API is accepted into upstream + Python it is considered experimental: it may be replaced at any point + by a newer version more in line with upstream Python. Compatibility would + be aimed for in this case, but may not be possible. + + :ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip. + """ + + def __init__(self, failfast=False): + # startTestRun resets all attributes, and older clients don't know to + # call startTestRun, so it is called once here. + # Because subclasses may reasonably not expect this, we call the + # specific version we want to run. + self.failfast = failfast + TestResult.startTestRun(self) + + def addExpectedFailure(self, test, err=None, details=None): + """Called when a test has failed in an expected manner. + + Like with addSuccess and addError, testStopped should still be called. + + :param test: The test that has been skipped. + :param err: The exc_info of the error that was raised. + :return: None + """ + # This is the python 2.7 implementation + self.expectedFailures.append( + (test, self._err_details_to_string(test, err, details))) + + def addError(self, test, err=None, details=None): + """Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info(). + + :param details: Alternative way to supply details about the outcome. + see the class docstring for more information. + """ + self.errors.append((test, + self._err_details_to_string(test, err, details))) + if self.failfast: + self.stop() + + def addFailure(self, test, err=None, details=None): + """Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info(). + + :param details: Alternative way to supply details about the outcome. + see the class docstring for more information. + """ + self.failures.append((test, + self._err_details_to_string(test, err, details))) + if self.failfast: + self.stop() + + def addSkip(self, test, reason=None, details=None): + """Called when a test has been skipped rather than running. + + Like with addSuccess and addError, testStopped should still be called. + + This must be called by the TestCase. 'addError' and 'addFailure' will + not call addSkip, since they have no assumptions about the kind of + errors that a test can raise. + + :param test: The test that has been skipped. + :param reason: The reason for the test being skipped. For instance, + u"pyGL is not available". + :param details: Alternative way to supply details about the outcome. + see the class docstring for more information. + :return: None + """ + if reason is None: + reason = details.get('reason') + if reason is None: + reason = 'No reason given' + else: + reason = reason.as_text() + skip_list = self.skip_reasons.setdefault(reason, []) + skip_list.append(test) + + def addSuccess(self, test, details=None): + """Called when a test succeeded.""" + + def addUnexpectedSuccess(self, test, details=None): + """Called when a test was expected to fail, but succeed.""" + self.unexpectedSuccesses.append(test) + if self.failfast: + self.stop() + + def wasSuccessful(self): + """Has this result been successful so far? + + If there have been any errors, failures or unexpected successes, + return False. Otherwise, return True. + + Note: This differs from standard unittest in that we consider + unexpected successes to be equivalent to failures, rather than + successes. + """ + return not (self.errors or self.failures or self.unexpectedSuccesses) + + def _err_details_to_string(self, test, err=None, details=None): + """Convert an error in exc_info form or a contents dict to a string.""" + if err is not None: + return TracebackContent(err, test).as_text() + return _details_to_str(details, special='traceback') + + def _exc_info_to_unicode(self, err, test): + # Deprecated. Only present because subunit upcalls to it. See + # <https://bugs.launchpad.net/testtools/+bug/929063>. + return TracebackContent(err, test).as_text() + + def _now(self): + """Return the current 'test time'. + + If the time() method has not been called, this is equivalent to + datetime.now(), otherwise its the last supplied datestamp given to the + time() method. + """ + if self.__now is None: + return datetime.datetime.now(utc) + else: + return self.__now + + def startTestRun(self): + """Called before a test run starts. + + New in Python 2.7. The testtools version resets the result to a + pristine condition ready for use in another test run. Note that this + is different from Python 2.7's startTestRun, which does nothing. + """ + # failfast is reset by the super __init__, so stash it. + failfast = self.failfast + super(TestResult, self).__init__() + self.skip_reasons = {} + self.__now = None + self._tags = TagContext() + # -- Start: As per python 2.7 -- + self.expectedFailures = [] + self.unexpectedSuccesses = [] + self.failfast = failfast + # -- End: As per python 2.7 -- + + def stopTestRun(self): + """Called after a test run completes + + New in python 2.7 + """ + + def startTest(self, test): + super(TestResult, self).startTest(test) + self._tags = TagContext(self._tags) + + def stopTest(self, test): + self._tags = self._tags.parent + super(TestResult, self).stopTest(test) + + @property + def current_tags(self): + """The currently set tags.""" + return self._tags.get_current_tags() + + def tags(self, new_tags, gone_tags): + """Add and remove tags from the test. + + :param new_tags: A set of tags to be added to the stream. + :param gone_tags: A set of tags to be removed from the stream. + """ + self._tags.change_tags(new_tags, gone_tags) + + def time(self, a_datetime): + """Provide a timestamp to represent the current time. + + This is useful when test activity is time delayed, or happening + concurrently and getting the system time between API calls will not + accurately represent the duration of tests (or the whole run). + + Calling time() sets the datetime used by the TestResult object. + Time is permitted to go backwards when using this call. + + :param a_datetime: A datetime.datetime object with TZ information or + None to reset the TestResult to gathering time from the system. + """ + self.__now = a_datetime + + def done(self): + """Called when the test runner is done. + + deprecated in favour of stopTestRun. + """ + + +class StreamResult(object): + """A test result for reporting the activity of a test run. + + Typical use + ----------- + + >>> result = StreamResult() + >>> result.startTestRun() + >>> try: + ... case.run(result) + ... finally: + ... result.stopTestRun() + + The case object will be either a TestCase or a TestSuite, and + generally make a sequence of calls like:: + + >>> result.status(self.id(), 'inprogress') + >>> result.status(self.id(), 'success') + + General concepts + ---------------- + + StreamResult is built to process events that are emitted by tests during a + test run or test enumeration. The test run may be running concurrently, and + even be spread out across multiple machines. + + All events are timestamped to prevent network buffering or scheduling + latency causing false timing reports. Timestamps are datetime objects in + the UTC timezone. + + A route_code is a unicode string that identifies where a particular test + run. This is optional in the API but very useful when multiplexing multiple + streams together as it allows identification of interactions between tests + that were run on the same hardware or in the same test process. Generally + actual tests never need to bother with this - it is added and processed + by StreamResult's that do multiplexing / run analysis. route_codes are + also used to route stdin back to pdb instances. + + The StreamResult base class does no accounting or processing, rather it + just provides an empty implementation of every method, suitable for use + as a base class regardless of intent. + """ + + def startTestRun(self): + """Start a test run. + + This will prepare the test result to process results (which might imply + connecting to a database or remote machine). + """ + + def stopTestRun(self): + """Stop a test run. + + This informs the result that no more test updates will be received. At + this point any test ids that have started and not completed can be + considered failed-or-hung. + """ + + def status(self, test_id=None, test_status=None, test_tags=None, + runnable=True, file_name=None, file_bytes=None, eof=False, + mime_type=None, route_code=None, timestamp=None): + """Inform the result about a test status. + + :param test_id: The test whose status is being reported. None to + report status about the test run as a whole. + :param test_status: The status for the test. There are two sorts of + status - interim and final status events. As many interim events + can be generated as desired, but only one final event. After a + final status event any further file or status events from the + same test_id+route_code may be discarded or associated with a new + test by the StreamResult. (But no exception will be thrown). + + Interim states: + * None - no particular status is being reported, or status being + reported is not associated with a test (e.g. when reporting on + stdout / stderr chatter). + * inprogress - the test is currently running. Emitted by tests when + they start running and at any intermediary point they might + choose to indicate their continual operation. + + Final states: + * exists - the test exists. This is used when a test is not being + executed. Typically this is when querying what tests could be run + in a test run (which is useful for selecting tests to run). + * xfail - the test failed but that was expected. This is purely + informative - the test is not considered to be a failure. + * uxsuccess - the test passed but was expected to fail. The test + will be considered a failure. + * success - the test has finished without error. + * fail - the test failed (or errored). The test will be considered + a failure. + * skip - the test was selected to run but chose to be skipped. E.g. + a test dependency was missing. This is purely informative - the + test is not considered to be a failure. + + :param test_tags: Optional set of tags to apply to the test. Tags + have no intrinsic meaning - that is up to the test author. + :param runnable: Allows status reports to mark that they are for + tests which are not able to be explicitly run. For instance, + subtests will report themselves as non-runnable. + :param file_name: The name for the file_bytes. Any unicode string may + be used. While there is no semantic value attached to the name + of any attachment, the names 'stdout' and 'stderr' and 'traceback' + are recommended for use only for output sent to stdout, stderr and + tracebacks of exceptions. When file_name is supplied, file_bytes + must be a bytes instance. + :param file_bytes: A bytes object containing content for the named + file. This can just be a single chunk of the file - emitting + another file event with more later. Must be None unleses a + file_name is supplied. + :param eof: True if this chunk is the last chunk of the file, any + additional chunks with the same name should be treated as an error + and discarded. Ignored unless file_name has been supplied. + :param mime_type: An optional MIME type for the file. stdout and + stderr will generally be "text/plain; charset=utf8". If None, + defaults to application/octet-stream. Ignored unless file_name + has been supplied. + """ + + +def domap(*args, **kwargs): + return list(map(*args, **kwargs)) + + +class CopyStreamResult(StreamResult): + """Copies all event it receives to multiple results. + + This provides an easy facility for combining multiple StreamResults. + + For TestResult the equivalent class was ``MultiTestResult``. + """ + + def __init__(self, targets): + super(CopyStreamResult, self).__init__() + self.targets = targets + + def startTestRun(self): + super(CopyStreamResult, self).startTestRun() + domap(methodcaller('startTestRun'), self.targets) + + def stopTestRun(self): + super(CopyStreamResult, self).stopTestRun() + domap(methodcaller('stopTestRun'), self.targets) + + def status(self, *args, **kwargs): + super(CopyStreamResult, self).status(*args, **kwargs) + domap(methodcaller('status', *args, **kwargs), self.targets) + + +class StreamFailFast(StreamResult): + """Call the supplied callback if an error is seen in a stream. + + An example callback:: + + def do_something(): + pass + """ + + def __init__(self, on_error): + self.on_error = on_error + + def status(self, test_id=None, test_status=None, test_tags=None, + runnable=True, file_name=None, file_bytes=None, eof=False, + mime_type=None, route_code=None, timestamp=None): + if test_status in ('uxsuccess', 'fail'): + self.on_error() + + +class StreamResultRouter(StreamResult): + """A StreamResult that routes events. + + StreamResultRouter forwards received events to another StreamResult object, + selected by a dynamic forwarding policy. Events where no destination is + found are forwarded to the fallback StreamResult, or an error is raised. + + Typical use is to construct a router with a fallback and then either + create up front mapping rules, or create them as-needed from the fallback + handler:: + + >>> router = StreamResultRouter() + >>> sink = doubles.StreamResult() + >>> router.add_rule(sink, 'route_code_prefix', route_prefix='0', + ... consume_route=True) + >>> router.status(test_id='foo', route_code='0/1', test_status='uxsuccess') + + StreamResultRouter has no buffering. + + When adding routes (and for the fallback) whether to call startTestRun and + stopTestRun or to not call them is controllable by passing + 'do_start_stop_run'. The default is to call them for the fallback only. + If a route is added after startTestRun has been called, and + do_start_stop_run is True then startTestRun is called immediately on the + new route sink. + + There is no a-priori defined lookup order for routes: if they are ambiguous + the behaviour is undefined. Only a single route is chosen for any event. + """ + + _policies = {} + + def __init__(self, fallback=None, do_start_stop_run=True): + """Construct a StreamResultRouter with optional fallback. + + :param fallback: A StreamResult to forward events to when no route + exists for them. + :param do_start_stop_run: If False do not pass startTestRun and + stopTestRun onto the fallback. + """ + self.fallback = fallback + self._route_code_prefixes = {} + self._test_ids = {} + # Records sinks that should have do_start_stop_run called on them. + self._sinks = [] + if do_start_stop_run and fallback: + self._sinks.append(fallback) + self._in_run = False + + def startTestRun(self): + super(StreamResultRouter, self).startTestRun() + for sink in self._sinks: + sink.startTestRun() + self._in_run = True + + def stopTestRun(self): + super(StreamResultRouter, self).stopTestRun() + for sink in self._sinks: + sink.stopTestRun() + self._in_run = False + + def status(self, **kwargs): + route_code = kwargs.get('route_code', None) + test_id = kwargs.get('test_id', None) + if route_code is not None: + prefix = route_code.split('/')[0] + else: + prefix = route_code + if prefix in self._route_code_prefixes: + target, consume_route = self._route_code_prefixes[prefix] + if route_code is not None and consume_route: + route_code = route_code[len(prefix) + 1:] + if not route_code: + route_code = None + kwargs['route_code'] = route_code + elif test_id in self._test_ids: + target = self._test_ids[test_id] + else: + target = self.fallback + target.status(**kwargs) + + def add_rule(self, sink, policy, do_start_stop_run=False, **policy_args): + """Add a rule to route events to sink when they match a given policy. + + :param sink: A StreamResult to receive events. + :param policy: A routing policy. Valid policies are + 'route_code_prefix' and 'test_id'. + :param do_start_stop_run: If True then startTestRun and stopTestRun + events will be passed onto this sink. + + :raises: ValueError if the policy is unknown + :raises: TypeError if the policy is given arguments it cannot handle. + + ``route_code_prefix`` routes events based on a prefix of the route + code in the event. It takes a ``route_prefix`` argument to match on + (e.g. '0') and a ``consume_route`` argument, which, if True, removes + the prefix from the ``route_code`` when forwarding events. + + ``test_id`` routes events based on the test id. It takes a single + argument, ``test_id``. Use ``None`` to select non-test events. + """ + policy_method = StreamResultRouter._policies.get(policy, None) + if not policy_method: + raise ValueError("bad policy %r" % (policy,)) + policy_method(self, sink, **policy_args) + if do_start_stop_run: + self._sinks.append(sink) + if self._in_run: + sink.startTestRun() + + def _map_route_code_prefix(self, sink, route_prefix, consume_route=False): + if '/' in route_prefix: + raise TypeError( + "%r is more than one route step long" % (route_prefix,)) + self._route_code_prefixes[route_prefix] = (sink, consume_route) + _policies['route_code_prefix'] = _map_route_code_prefix + + def _map_test_id(self, sink, test_id): + self._test_ids[test_id] = sink + _policies['test_id'] = _map_test_id + + +class StreamTagger(CopyStreamResult): + """Adds or discards tags from StreamResult events.""" + + def __init__(self, targets, add=None, discard=None): + """Create a StreamTagger. + + :param targets: A list of targets to forward events onto. + :param add: Either None or an iterable of tags to add to each event. + :param discard: Either None or an iterable of tags to discard from each + event. + """ + super(StreamTagger, self).__init__(targets) + self.add = frozenset(add or ()) + self.discard = frozenset(discard or ()) + + def status(self, *args, **kwargs): + test_tags = kwargs.get('test_tags') or set() + test_tags.update(self.add) + test_tags.difference_update(self.discard) + kwargs['test_tags'] = test_tags or None + super(StreamTagger, self).status(*args, **kwargs) + + +class StreamToDict(StreamResult): + """A specialised StreamResult that emits a callback as tests complete. + + Top level file attachments are simply discarded. Hung tests are detected + by stopTestRun and notified there and then. + + The callback is passed a dict with the following keys: + + * id: the test id. + * tags: The tags for the test. A set of unicode strings. + * details: A dict of file attachments - ``testtools.content.Content`` + objects. + * status: One of the StreamResult status codes (including inprogress) or + 'unknown' (used if only file events for a test were received...) + * timestamps: A pair of timestamps - the first one received with this + test id, and the one in the event that triggered the notification. + Hung tests have a None for the second end event. Timestamps are not + compared - their ordering is purely order received in the stream. + + Only the most recent tags observed in the stream are reported. + """ + + def __init__(self, on_test): + """Create a StreamToDict calling on_test on test completions. + + :param on_test: A callback that accepts one parameter - a dict + describing a test. + """ + super(StreamToDict, self).__init__() + self.on_test = on_test + if parse_mime_type is None: + raise ImportError("mimeparse module missing.") + + def startTestRun(self): + super(StreamToDict, self).startTestRun() + self._inprogress = {} + + def status(self, test_id=None, test_status=None, test_tags=None, + runnable=True, file_name=None, file_bytes=None, eof=False, + mime_type=None, route_code=None, timestamp=None): + super(StreamToDict, self).status(test_id, test_status, + test_tags=test_tags, runnable=runnable, file_name=file_name, + file_bytes=file_bytes, eof=eof, mime_type=mime_type, + route_code=route_code, timestamp=timestamp) + key = self._ensure_key(test_id, route_code, timestamp) + # update fields + if not key: + return + if test_status is not None: + self._inprogress[key]['status'] = test_status + self._inprogress[key]['timestamps'][1] = timestamp + case = self._inprogress[key] + if file_name is not None: + if file_name not in case['details']: + if mime_type is None: + mime_type = 'application/octet-stream' + primary, sub, parameters = parse_mime_type(mime_type) + if 'charset' in parameters: + if ',' in parameters['charset']: + # testtools was emitting a bad encoding, workaround it, + # Though this does lose data - probably want to drop + # this in a few releases. + parameters['charset'] = parameters['charset'][ + :parameters['charset'].find(',')] + content_type = ContentType(primary, sub, parameters) + content_bytes = [] + case['details'][file_name] = Content( + content_type, lambda:content_bytes) + case['details'][file_name].iter_bytes().append(file_bytes) + if test_tags is not None: + self._inprogress[key]['tags'] = test_tags + # notify completed tests. + if test_status not in (None, 'inprogress'): + self.on_test(self._inprogress.pop(key)) + + def stopTestRun(self): + super(StreamToDict, self).stopTestRun() + while self._inprogress: + case = self._inprogress.popitem()[1] + case['timestamps'][1] = None + self.on_test(case) + + def _ensure_key(self, test_id, route_code, timestamp): + if test_id is None: + return + key = (test_id, route_code) + if key not in self._inprogress: + self._inprogress[key] = { + 'id': test_id, + 'tags': set(), + 'details': {}, + 'status': 'unknown', + 'timestamps': [timestamp, None]} + return key + + +_status_map = { + 'inprogress': 'addFailure', + 'unknown': 'addFailure', + 'success': 'addSuccess', + 'skip': 'addSkip', + 'fail': 'addFailure', + 'xfail': 'addExpectedFailure', + 'uxsuccess': 'addUnexpectedSuccess', + } + + +def test_dict_to_case(test_dict): + """Convert a test dict into a TestCase object. + + :param test_dict: A test dict as generated by StreamToDict. + :return: A PlaceHolder test object. + """ + # Circular import. + global PlaceHolder + if PlaceHolder is None: + from testtools.testcase import PlaceHolder + outcome = _status_map[test_dict['status']] + return PlaceHolder(test_dict['id'], outcome=outcome, + details=test_dict['details'], tags=test_dict['tags'], + timestamps=test_dict['timestamps']) + + +class StreamSummary(StreamToDict): + """A specialised StreamResult that summarises a stream. + + The summary uses the same representation as the original + unittest.TestResult contract, allowing it to be consumed by any test + runner. + """ + + def __init__(self): + super(StreamSummary, self).__init__(self._gather_test) + self._handle_status = { + 'success': self._success, + 'skip': self._skip, + 'exists': self._exists, + 'fail': self._fail, + 'xfail': self._xfail, + 'uxsuccess': self._uxsuccess, + 'unknown': self._incomplete, + 'inprogress': self._incomplete, + } + + def startTestRun(self): + super(StreamSummary, self).startTestRun() + self.failures = [] + self.errors = [] + self.testsRun = 0 + self.skipped = [] + self.expectedFailures = [] + self.unexpectedSuccesses = [] + + def wasSuccessful(self): + """Return False if any failure has occured. + + Note that incomplete tests can only be detected when stopTestRun is + called, so that should be called before checking wasSuccessful. + """ + return (not self.failures and not self.errors) + + def _gather_test(self, test_dict): + if test_dict['status'] == 'exists': + return + self.testsRun += 1 + case = test_dict_to_case(test_dict) + self._handle_status[test_dict['status']](case) + + def _incomplete(self, case): + self.errors.append((case, "Test did not complete")) + + def _success(self, case): + pass + + def _skip(self, case): + if 'reason' not in case._details: + reason = "Unknown" + else: + reason = case._details['reason'].as_text() + self.skipped.append((case, reason)) + + def _exists(self, case): + pass + + def _fail(self, case): + message = _details_to_str(case._details, special="traceback") + self.errors.append((case, message)) + + def _xfail(self, case): + message = _details_to_str(case._details, special="traceback") + self.expectedFailures.append((case, message)) + + def _uxsuccess(self, case): + case._outcome = 'addUnexpectedSuccess' + self.unexpectedSuccesses.append(case) + + +class TestControl(object): + """Controls a running test run, allowing it to be interrupted. + + :ivar shouldStop: If True, tests should not run and should instead + return immediately. Similarly a TestSuite should check this between + each test and if set stop dispatching any new tests and return. + """ + + def __init__(self): + super(TestControl, self).__init__() + self.shouldStop = False + + def stop(self): + """Indicate that tests should stop running.""" + self.shouldStop = True + + +class MultiTestResult(TestResult): + """A test result that dispatches to many test results.""" + + def __init__(self, *results): + # Setup _results first, as the base class __init__ assigns to failfast. + self._results = list(map(ExtendedToOriginalDecorator, results)) + super(MultiTestResult, self).__init__() + + def __repr__(self): + return '<%s (%s)>' % ( + self.__class__.__name__, ', '.join(map(repr, self._results))) + + def _dispatch(self, message, *args, **kwargs): + return tuple( + getattr(result, message)(*args, **kwargs) + for result in self._results) + + def _get_failfast(self): + return getattr(self._results[0], 'failfast', False) + def _set_failfast(self, value): + self._dispatch('__setattr__', 'failfast', value) + failfast = property(_get_failfast, _set_failfast) + + def _get_shouldStop(self): + return any(self._dispatch('__getattr__', 'shouldStop')) + def _set_shouldStop(self, value): + # Called because we subclass TestResult. Probably should not do that. + pass + shouldStop = property(_get_shouldStop, _set_shouldStop) + + def startTest(self, test): + super(MultiTestResult, self).startTest(test) + return self._dispatch('startTest', test) + + def stop(self): + return self._dispatch('stop') + + def stopTest(self, test): + super(MultiTestResult, self).stopTest(test) + return self._dispatch('stopTest', test) + + def addError(self, test, error=None, details=None): + return self._dispatch('addError', test, error, details=details) + + def addExpectedFailure(self, test, err=None, details=None): + return self._dispatch( + 'addExpectedFailure', test, err, details=details) + + def addFailure(self, test, err=None, details=None): + return self._dispatch('addFailure', test, err, details=details) + + def addSkip(self, test, reason=None, details=None): + return self._dispatch('addSkip', test, reason, details=details) + + def addSuccess(self, test, details=None): + return self._dispatch('addSuccess', test, details=details) + + def addUnexpectedSuccess(self, test, details=None): + return self._dispatch('addUnexpectedSuccess', test, details=details) + + def startTestRun(self): + super(MultiTestResult, self).startTestRun() + return self._dispatch('startTestRun') + + def stopTestRun(self): + return self._dispatch('stopTestRun') + + def tags(self, new_tags, gone_tags): + super(MultiTestResult, self).tags(new_tags, gone_tags) + return self._dispatch('tags', new_tags, gone_tags) + + def time(self, a_datetime): + return self._dispatch('time', a_datetime) + + def done(self): + return self._dispatch('done') + + def wasSuccessful(self): + """Was this result successful? + + Only returns True if every constituent result was successful. + """ + return all(self._dispatch('wasSuccessful')) + + +class TextTestResult(TestResult): + """A TestResult which outputs activity to a text stream.""" + + def __init__(self, stream, failfast=False): + """Construct a TextTestResult writing to stream.""" + super(TextTestResult, self).__init__(failfast=failfast) + self.stream = stream + self.sep1 = '=' * 70 + '\n' + self.sep2 = '-' * 70 + '\n' + + def _delta_to_float(self, a_timedelta): + return (a_timedelta.days * 86400.0 + a_timedelta.seconds + + a_timedelta.microseconds / 1000000.0) + + def _show_list(self, label, error_list): + for test, output in error_list: + self.stream.write(self.sep1) + self.stream.write("%s: %s\n" % (label, test.id())) + self.stream.write(self.sep2) + self.stream.write(output) + + def startTestRun(self): + super(TextTestResult, self).startTestRun() + self.__start = self._now() + self.stream.write("Tests running...\n") + + def stopTestRun(self): + if self.testsRun != 1: + plural = 's' + else: + plural = '' + stop = self._now() + self._show_list('ERROR', self.errors) + self._show_list('FAIL', self.failures) + for test in self.unexpectedSuccesses: + self.stream.write( + "%sUNEXPECTED SUCCESS: %s\n%s" % ( + self.sep1, test.id(), self.sep2)) + self.stream.write("\nRan %d test%s in %.3fs\n" % + (self.testsRun, plural, + self._delta_to_float(stop - self.__start))) + if self.wasSuccessful(): + self.stream.write("OK\n") + else: + self.stream.write("FAILED (") + details = [] + details.append("failures=%d" % ( + sum(map(len, ( + self.failures, self.errors, self.unexpectedSuccesses))))) + self.stream.write(", ".join(details)) + self.stream.write(")\n") + super(TextTestResult, self).stopTestRun() + + +class ThreadsafeForwardingResult(TestResult): + """A TestResult which ensures the target does not receive mixed up calls. + + Multiple ``ThreadsafeForwardingResults`` can forward to the same target + result, and that target result will only ever receive the complete set of + events for one test at a time. + + This is enforced using a semaphore, which further guarantees that tests + will be sent atomically even if the ``ThreadsafeForwardingResults`` are in + different threads. + + ``ThreadsafeForwardingResult`` is typically used by + ``ConcurrentTestSuite``, which creates one ``ThreadsafeForwardingResult`` + per thread, each of which wraps of the TestResult that + ``ConcurrentTestSuite.run()`` is called with. + + target.startTestRun() and target.stopTestRun() are called once for each + ThreadsafeForwardingResult that forwards to the same target. If the target + takes special action on these events, it should take care to accommodate + this. + + time() and tags() calls are batched to be adjacent to the test result and + in the case of tags() are coerced into test-local scope, avoiding the + opportunity for bugs around global state in the target. + """ + + def __init__(self, target, semaphore): + """Create a ThreadsafeForwardingResult forwarding to target. + + :param target: A ``TestResult``. + :param semaphore: A ``threading.Semaphore`` with limit 1. + """ + TestResult.__init__(self) + self.result = ExtendedToOriginalDecorator(target) + self.semaphore = semaphore + self._test_start = None + self._global_tags = set(), set() + self._test_tags = set(), set() + + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, self.result) + + def _any_tags(self, tags): + return bool(tags[0] or tags[1]) + + def _add_result_with_semaphore(self, method, test, *args, **kwargs): + now = self._now() + self.semaphore.acquire() + try: + self.result.time(self._test_start) + self.result.startTest(test) + self.result.time(now) + if self._any_tags(self._global_tags): + self.result.tags(*self._global_tags) + if self._any_tags(self._test_tags): + self.result.tags(*self._test_tags) + self._test_tags = set(), set() + try: + method(test, *args, **kwargs) + finally: + self.result.stopTest(test) + finally: + self.semaphore.release() + self._test_start = None + + def addError(self, test, err=None, details=None): + self._add_result_with_semaphore(self.result.addError, + test, err, details=details) + + def addExpectedFailure(self, test, err=None, details=None): + self._add_result_with_semaphore(self.result.addExpectedFailure, + test, err, details=details) + + def addFailure(self, test, err=None, details=None): + self._add_result_with_semaphore(self.result.addFailure, + test, err, details=details) + + def addSkip(self, test, reason=None, details=None): + self._add_result_with_semaphore(self.result.addSkip, + test, reason, details=details) + + def addSuccess(self, test, details=None): + self._add_result_with_semaphore(self.result.addSuccess, + test, details=details) + + def addUnexpectedSuccess(self, test, details=None): + self._add_result_with_semaphore(self.result.addUnexpectedSuccess, + test, details=details) + + def progress(self, offset, whence): + pass + + def startTestRun(self): + super(ThreadsafeForwardingResult, self).startTestRun() + self.semaphore.acquire() + try: + self.result.startTestRun() + finally: + self.semaphore.release() + + def _get_shouldStop(self): + self.semaphore.acquire() + try: + return self.result.shouldStop + finally: + self.semaphore.release() + def _set_shouldStop(self, value): + # Another case where we should not subclass TestResult + pass + shouldStop = property(_get_shouldStop, _set_shouldStop) + + def stop(self): + self.semaphore.acquire() + try: + self.result.stop() + finally: + self.semaphore.release() + + def stopTestRun(self): + self.semaphore.acquire() + try: + self.result.stopTestRun() + finally: + self.semaphore.release() + + def done(self): + self.semaphore.acquire() + try: + self.result.done() + finally: + self.semaphore.release() + + def startTest(self, test): + self._test_start = self._now() + super(ThreadsafeForwardingResult, self).startTest(test) + + def wasSuccessful(self): + return self.result.wasSuccessful() + + def tags(self, new_tags, gone_tags): + """See `TestResult`.""" + super(ThreadsafeForwardingResult, self).tags(new_tags, gone_tags) + if self._test_start is not None: + self._test_tags = _merge_tags( + self._test_tags, (new_tags, gone_tags)) + else: + self._global_tags = _merge_tags( + self._global_tags, (new_tags, gone_tags)) + + +def _merge_tags(existing, changed): + new_tags, gone_tags = changed + result_new = set(existing[0]) + result_gone = set(existing[1]) + result_new.update(new_tags) + result_new.difference_update(gone_tags) + result_gone.update(gone_tags) + result_gone.difference_update(new_tags) + return result_new, result_gone + + +class ExtendedToOriginalDecorator(object): + """Permit new TestResult API code to degrade gracefully with old results. + + This decorates an existing TestResult and converts missing outcomes + such as addSkip to older outcomes such as addSuccess. It also supports + the extended details protocol. In all cases the most recent protocol + is attempted first, and fallbacks only occur when the decorated result + does not support the newer style of calling. + """ + + def __init__(self, decorated): + self.decorated = decorated + self._tags = TagContext() + # Only used for old TestResults that do not have failfast. + self._failfast = False + + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, self.decorated) + + def __getattr__(self, name): + return getattr(self.decorated, name) + + def addError(self, test, err=None, details=None): + try: + self._check_args(err, details) + if details is not None: + try: + return self.decorated.addError(test, details=details) + except TypeError: + # have to convert + err = self._details_to_exc_info(details) + return self.decorated.addError(test, err) + finally: + if self.failfast: + self.stop() + + def addExpectedFailure(self, test, err=None, details=None): + self._check_args(err, details) + addExpectedFailure = getattr( + self.decorated, 'addExpectedFailure', None) + if addExpectedFailure is None: + return self.addSuccess(test) + if details is not None: + try: + return addExpectedFailure(test, details=details) + except TypeError: + # have to convert + err = self._details_to_exc_info(details) + return addExpectedFailure(test, err) + + def addFailure(self, test, err=None, details=None): + try: + self._check_args(err, details) + if details is not None: + try: + return self.decorated.addFailure(test, details=details) + except TypeError: + # have to convert + err = self._details_to_exc_info(details) + return self.decorated.addFailure(test, err) + finally: + if self.failfast: + self.stop() + + def addSkip(self, test, reason=None, details=None): + self._check_args(reason, details) + addSkip = getattr(self.decorated, 'addSkip', None) + if addSkip is None: + return self.decorated.addSuccess(test) + if details is not None: + try: + return addSkip(test, details=details) + except TypeError: + # extract the reason if it's available + try: + reason = details['reason'].as_text() + except KeyError: + reason = _details_to_str(details) + return addSkip(test, reason) + + def addUnexpectedSuccess(self, test, details=None): + try: + outcome = getattr(self.decorated, 'addUnexpectedSuccess', None) + if outcome is None: + try: + test.fail("") + except test.failureException: + return self.addFailure(test, sys.exc_info()) + if details is not None: + try: + return outcome(test, details=details) + except TypeError: + pass + return outcome(test) + finally: + if self.failfast: + self.stop() + + def addSuccess(self, test, details=None): + if details is not None: + try: + return self.decorated.addSuccess(test, details=details) + except TypeError: + pass + return self.decorated.addSuccess(test) + + def _check_args(self, err, details): + param_count = 0 + if err is not None: + param_count += 1 + if details is not None: + param_count += 1 + if param_count != 1: + raise ValueError("Must pass only one of err '%s' and details '%s" + % (err, details)) + + def _details_to_exc_info(self, details): + """Convert a details dict to an exc_info tuple.""" + return ( + _StringException, + _StringException(_details_to_str(details, special='traceback')), + None) + + @property + def current_tags(self): + return getattr( + self.decorated, 'current_tags', self._tags.get_current_tags()) + + def done(self): + try: + return self.decorated.done() + except AttributeError: + return + + def _get_failfast(self): + return getattr(self.decorated, 'failfast', self._failfast) + def _set_failfast(self, value): + if safe_hasattr(self.decorated, 'failfast'): + self.decorated.failfast = value + else: + self._failfast = value + failfast = property(_get_failfast, _set_failfast) + + def progress(self, offset, whence): + method = getattr(self.decorated, 'progress', None) + if method is None: + return + return method(offset, whence) + + @property + def shouldStop(self): + return self.decorated.shouldStop + + def startTest(self, test): + self._tags = TagContext(self._tags) + return self.decorated.startTest(test) + + def startTestRun(self): + self._tags = TagContext() + try: + return self.decorated.startTestRun() + except AttributeError: + return + + def stop(self): + return self.decorated.stop() + + def stopTest(self, test): + self._tags = self._tags.parent + return self.decorated.stopTest(test) + + def stopTestRun(self): + try: + return self.decorated.stopTestRun() + except AttributeError: + return + + def tags(self, new_tags, gone_tags): + method = getattr(self.decorated, 'tags', None) + if method is not None: + return method(new_tags, gone_tags) + else: + self._tags.change_tags(new_tags, gone_tags) + + def time(self, a_datetime): + method = getattr(self.decorated, 'time', None) + if method is None: + return + return method(a_datetime) + + def wasSuccessful(self): + return self.decorated.wasSuccessful() + + +class ExtendedToStreamDecorator(CopyStreamResult, StreamSummary, TestControl): + """Permit using old TestResult API code with new StreamResult objects. + + This decorates a StreamResult and converts old (Python 2.6 / 2.7 / + Extended) TestResult API calls into StreamResult calls. + + It also supports regular StreamResult calls, making it safe to wrap around + any StreamResult. + """ + + def __init__(self, decorated): + super(ExtendedToStreamDecorator, self).__init__([decorated]) + # Deal with mismatched base class constructors. + TestControl.__init__(self) + self._started = False + + def _get_failfast(self): + return len(self.targets) == 2 + def _set_failfast(self, value): + if value: + if len(self.targets) == 2: + return + self.targets.append(StreamFailFast(self.stop)) + else: + del self.targets[1:] + failfast = property(_get_failfast, _set_failfast) + + def startTest(self, test): + if not self._started: + self.startTestRun() + self.status(test_id=test.id(), test_status='inprogress', timestamp=self._now()) + self._tags = TagContext(self._tags) + + def stopTest(self, test): + self._tags = self._tags.parent + + def addError(self, test, err=None, details=None): + self._check_args(err, details) + self._convert(test, err, details, 'fail') + addFailure = addError + + def _convert(self, test, err, details, status, reason=None): + if not self._started: + self.startTestRun() + test_id = test.id() + now = self._now() + if err is not None: + if details is None: + details = {} + details['traceback'] = TracebackContent(err, test) + if details is not None: + for name, content in details.items(): + mime_type = repr(content.content_type) + for file_bytes in content.iter_bytes(): + self.status(file_name=name, file_bytes=file_bytes, + mime_type=mime_type, test_id=test_id, timestamp=now) + self.status(file_name=name, file_bytes=_b(""), eof=True, + mime_type=mime_type, test_id=test_id, timestamp=now) + if reason is not None: + self.status(file_name='reason', file_bytes=reason.encode('utf8'), + eof=True, mime_type="text/plain; charset=utf8", + test_id=test_id, timestamp=now) + self.status(test_id=test_id, test_status=status, + test_tags=self.current_tags, timestamp=now) + + def addExpectedFailure(self, test, err=None, details=None): + self._check_args(err, details) + self._convert(test, err, details, 'xfail') + + def addSkip(self, test, reason=None, details=None): + self._convert(test, None, details, 'skip', reason) + + def addUnexpectedSuccess(self, test, details=None): + self._convert(test, None, details, 'uxsuccess') + + def addSuccess(self, test, details=None): + self._convert(test, None, details, 'success') + + def _check_args(self, err, details): + param_count = 0 + if err is not None: + param_count += 1 + if details is not None: + param_count += 1 + if param_count != 1: + raise ValueError("Must pass only one of err '%s' and details '%s" + % (err, details)) + + def startTestRun(self): + super(ExtendedToStreamDecorator, self).startTestRun() + self._tags = TagContext() + self.shouldStop = False + self.__now = None + self._started = True + + def stopTest(self, test): + self._tags = self._tags.parent + + @property + def current_tags(self): + """The currently set tags.""" + return self._tags.get_current_tags() + + def tags(self, new_tags, gone_tags): + """Add and remove tags from the test. + + :param new_tags: A set of tags to be added to the stream. + :param gone_tags: A set of tags to be removed from the stream. + """ + self._tags.change_tags(new_tags, gone_tags) + + def _now(self): + """Return the current 'test time'. + + If the time() method has not been called, this is equivalent to + datetime.now(), otherwise its the last supplied datestamp given to the + time() method. + """ + if self.__now is None: + return datetime.datetime.now(utc) + else: + return self.__now + + def time(self, a_datetime): + self.__now = a_datetime + + def wasSuccessful(self): + if not self._started: + self.startTestRun() + return super(ExtendedToStreamDecorator, self).wasSuccessful() + + +class StreamToExtendedDecorator(StreamResult): + """Convert StreamResult API calls into ExtendedTestResult calls. + + This will buffer all calls for all concurrently active tests, and + then flush each test as they complete. + + Incomplete tests will be flushed as errors when the test run stops. + + Non test file attachments are accumulated into a test called + 'testtools.extradata' flushed at the end of the run. + """ + + def __init__(self, decorated): + # ExtendedToOriginalDecorator takes care of thunking details back to + # exceptions/reasons etc. + self.decorated = ExtendedToOriginalDecorator(decorated) + # StreamToDict buffers and gives us individual tests. + self.hook = StreamToDict(self._handle_tests) + + def status(self, test_id=None, test_status=None, *args, **kwargs): + if test_status == 'exists': + return + self.hook.status( + test_id=test_id, test_status=test_status, *args, **kwargs) + + def startTestRun(self): + self.decorated.startTestRun() + self.hook.startTestRun() + + def stopTestRun(self): + self.hook.stopTestRun() + self.decorated.stopTestRun() + + def _handle_tests(self, test_dict): + case = test_dict_to_case(test_dict) + case.run(self.decorated) + + +class StreamToQueue(StreamResult): + """A StreamResult which enqueues events as a dict to a queue.Queue. + + Events have their route code updated to include the route code + StreamToQueue was constructed with before they are submitted. If the event + route code is None, it is replaced with the StreamToQueue route code, + otherwise it is prefixed with the supplied code + a hyphen. + + startTestRun and stopTestRun are forwarded to the queue. Implementors that + dequeue events back into StreamResult calls should take care not to call + startTestRun / stopTestRun on other StreamResult objects multiple times + (e.g. by filtering startTestRun and stopTestRun). + + ``StreamToQueue`` is typically used by + ``ConcurrentStreamTestSuite``, which creates one ``StreamToQueue`` + per thread, forwards status events to the the StreamResult that + ``ConcurrentStreamTestSuite.run()`` was called with, and uses the + stopTestRun event to trigger calling join() on the each thread. + + Unlike ThreadsafeForwardingResult which this supercedes, no buffering takes + place - any event supplied to a StreamToQueue will be inserted into the + queue immediately. + + Events are forwarded as a dict with a key ``event`` which is one of + ``startTestRun``, ``stopTestRun`` or ``status``. When ``event`` is + ``status`` the dict also has keys matching the keyword arguments + of ``StreamResult.status``, otherwise it has one other key ``result`` which + is the result that invoked ``startTestRun``. + """ + + def __init__(self, queue, routing_code): + """Create a StreamToQueue forwarding to target. + + :param queue: A ``queue.Queue`` to receive events. + :param routing_code: The routing code to apply to messages. + """ + super(StreamToQueue, self).__init__() + self.queue = queue + self.routing_code = routing_code + + def startTestRun(self): + self.queue.put(dict(event='startTestRun', result=self)) + + def status(self, test_id=None, test_status=None, test_tags=None, + runnable=True, file_name=None, file_bytes=None, eof=False, + mime_type=None, route_code=None, timestamp=None): + self.queue.put(dict(event='status', test_id=test_id, + test_status=test_status, test_tags=test_tags, runnable=runnable, + file_name=file_name, file_bytes=file_bytes, eof=eof, + mime_type=mime_type, route_code=self.route_code(route_code), + timestamp=timestamp)) + + def stopTestRun(self): + self.queue.put(dict(event='stopTestRun', result=self)) + + def route_code(self, route_code): + """Adjust route_code on the way through.""" + if route_code is None: + return self.routing_code + return self.routing_code + _u("/") + route_code + + +class TestResultDecorator(object): + """General pass-through decorator. + + This provides a base that other TestResults can inherit from to + gain basic forwarding functionality. + """ + + def __init__(self, decorated): + """Create a TestResultDecorator forwarding to decorated.""" + self.decorated = decorated + + def startTest(self, test): + return self.decorated.startTest(test) + + def startTestRun(self): + return self.decorated.startTestRun() + + def stopTest(self, test): + return self.decorated.stopTest(test) + + def stopTestRun(self): + return self.decorated.stopTestRun() + + def addError(self, test, err=None, details=None): + return self.decorated.addError(test, err, details=details) + + def addFailure(self, test, err=None, details=None): + return self.decorated.addFailure(test, err, details=details) + + def addSuccess(self, test, details=None): + return self.decorated.addSuccess(test, details=details) + + def addSkip(self, test, reason=None, details=None): + return self.decorated.addSkip(test, reason, details=details) + + def addExpectedFailure(self, test, err=None, details=None): + return self.decorated.addExpectedFailure(test, err, details=details) + + def addUnexpectedSuccess(self, test, details=None): + return self.decorated.addUnexpectedSuccess(test, details=details) + + def progress(self, offset, whence): + return self.decorated.progress(offset, whence) + + def wasSuccessful(self): + return self.decorated.wasSuccessful() + + @property + def current_tags(self): + return self.decorated.current_tags + + @property + def shouldStop(self): + return self.decorated.shouldStop + + def stop(self): + return self.decorated.stop() + + @property + def testsRun(self): + return self.decorated.testsRun + + def tags(self, new_tags, gone_tags): + return self.decorated.tags(new_tags, gone_tags) + + def time(self, a_datetime): + return self.decorated.time(a_datetime) + + +class Tagger(TestResultDecorator): + """Tag each test individually.""" + + def __init__(self, decorated, new_tags, gone_tags): + """Wrap 'decorated' such that each test is tagged. + + :param new_tags: Tags to be added for each test. + :param gone_tags: Tags to be removed for each test. + """ + super(Tagger, self).__init__(decorated) + self._new_tags = set(new_tags) + self._gone_tags = set(gone_tags) + + def startTest(self, test): + super(Tagger, self).startTest(test) + self.tags(self._new_tags, self._gone_tags) + + +class TestByTestResult(TestResult): + """Call something every time a test completes.""" + + def __init__(self, on_test): + """Construct a ``TestByTestResult``. + + :param on_test: A callable that take a test case, a status (one of + "success", "failure", "error", "skip", or "xfail"), a start time + (a ``datetime`` with timezone), a stop time, an iterable of tags, + and a details dict. Is called at the end of each test (i.e. on + ``stopTest``) with the accumulated values for that test. + """ + super(TestByTestResult, self).__init__() + self._on_test = on_test + + def startTest(self, test): + super(TestByTestResult, self).startTest(test) + self._start_time = self._now() + # There's no supported (i.e. tested) behaviour that relies on these + # being set, but it makes me more comfortable all the same. -- jml + self._status = None + self._details = None + self._stop_time = None + + def stopTest(self, test): + self._stop_time = self._now() + tags = set(self.current_tags) + super(TestByTestResult, self).stopTest(test) + self._on_test( + test=test, + status=self._status, + start_time=self._start_time, + stop_time=self._stop_time, + tags=tags, + details=self._details) + + def _err_to_details(self, test, err, details): + if details: + return details + return {'traceback': TracebackContent(err, test)} + + def addSuccess(self, test, details=None): + super(TestByTestResult, self).addSuccess(test) + self._status = 'success' + self._details = details + + def addFailure(self, test, err=None, details=None): + super(TestByTestResult, self).addFailure(test, err, details) + self._status = 'failure' + self._details = self._err_to_details(test, err, details) + + def addError(self, test, err=None, details=None): + super(TestByTestResult, self).addError(test, err, details) + self._status = 'error' + self._details = self._err_to_details(test, err, details) + + def addSkip(self, test, reason=None, details=None): + super(TestByTestResult, self).addSkip(test, reason, details) + self._status = 'skip' + if details is None: + details = {'reason': text_content(reason)} + elif reason: + # XXX: What if details already has 'reason' key? + details['reason'] = text_content(reason) + self._details = details + + def addExpectedFailure(self, test, err=None, details=None): + super(TestByTestResult, self).addExpectedFailure(test, err, details) + self._status = 'xfail' + self._details = self._err_to_details(test, err, details) + + def addUnexpectedSuccess(self, test, details=None): + super(TestByTestResult, self).addUnexpectedSuccess(test, details) + self._status = 'success' + self._details = details + + +class TimestampingStreamResult(CopyStreamResult): + """A StreamResult decorator that assigns a timestamp when none is present. + + This is convenient for ensuring events are timestamped. + """ + + def __init__(self, target): + super(TimestampingStreamResult, self).__init__([target]) + + def status(self, *args, **kwargs): + timestamp = kwargs.pop('timestamp', None) + if timestamp is None: + timestamp = datetime.datetime.now(utc) + super(TimestampingStreamResult, self).status( + *args, timestamp=timestamp, **kwargs) + + +class _StringException(Exception): + """An exception made from an arbitrary string.""" + + if not str_is_unicode: + def __init__(self, string): + if type(string) is not unicode: + raise TypeError("_StringException expects unicode, got %r" % + (string,)) + Exception.__init__(self, string) + + def __str__(self): + return self.args[0].encode("utf-8") + + def __unicode__(self): + return self.args[0] + # For 3.0 and above the default __str__ is fine, so we don't define one. + + def __hash__(self): + return id(self) + + def __eq__(self, other): + try: + return self.args == other.args + except AttributeError: + return False + + +def _format_text_attachment(name, text): + if '\n' in text: + return "%s: {{{\n%s\n}}}\n" % (name, text) + return "%s: {{{%s}}}" % (name, text) + + +def _details_to_str(details, special=None): + """Convert a details dict to a string. + + :param details: A dictionary mapping short names to ``Content`` objects. + :param special: If specified, an attachment that should have special + attention drawn to it. The primary attachment. Normally it's the + traceback that caused the test to fail. + :return: A formatted string that can be included in text test results. + """ + empty_attachments = [] + binary_attachments = [] + text_attachments = [] + special_content = None + # sorted is for testing, may want to remove that and use a dict + # subclass with defined order for items instead. + for key, content in sorted(details.items()): + if content.content_type.type != 'text': + binary_attachments.append((key, content.content_type)) + continue + text = content.as_text().strip() + if not text: + empty_attachments.append(key) + continue + # We want the 'special' attachment to be at the bottom. + if key == special: + special_content = '%s\n' % (text,) + continue + text_attachments.append(_format_text_attachment(key, text)) + if text_attachments and not text_attachments[-1].endswith('\n'): + text_attachments.append('') + if special_content: + text_attachments.append(special_content) + lines = [] + if binary_attachments: + lines.append('Binary content:\n') + for name, content_type in binary_attachments: + lines.append(' %s (%s)\n' % (name, content_type)) + if empty_attachments: + lines.append('Empty attachments:\n') + for name in empty_attachments: + lines.append(' %s\n' % (name,)) + if (binary_attachments or empty_attachments) and text_attachments: + lines.append('\n') + lines.append('\n'.join(text_attachments)) + return _u('').join(lines) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py new file mode 100644 index 00000000000..db215ff12f8 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py @@ -0,0 +1,47 @@ +# Copyright (c) 2008-2013 testtools developers. See LICENSE for details. + +"""Tests for testtools itself.""" + + +from unittest import TestSuite + + +def test_suite(): + from testtools.tests import ( + matchers, + test_compat, + test_content, + test_content_type, + test_deferredruntest, + test_distutilscmd, + test_fixturesupport, + test_helpers, + test_monkey, + test_run, + test_runtest, + test_spinner, + test_tags, + test_testcase, + test_testresult, + test_testsuite, + ) + modules = [ + matchers, + test_compat, + test_content, + test_content_type, + test_deferredruntest, + test_distutilscmd, + test_fixturesupport, + test_helpers, + test_monkey, + test_run, + test_runtest, + test_spinner, + test_tags, + test_testcase, + test_testresult, + test_testsuite, + ] + suites = map(lambda x: x.test_suite(), modules) + return TestSuite(suites) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py new file mode 100644 index 00000000000..f766da33c9f --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py @@ -0,0 +1,108 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +"""Helpers for tests.""" + +__all__ = [ + 'LoggingResult', + ] + +import sys + +from extras import safe_hasattr + +from testtools import TestResult +from testtools.content import StackLinesContent +from testtools import runtest + + +# Importing to preserve compatibility. +safe_hasattr + +# GZ 2010-08-12: Don't do this, pointlessly creates an exc_info cycle +try: + raise Exception +except Exception: + an_exc_info = sys.exc_info() + +# Deprecated: This classes attributes are somewhat non deterministic which +# leads to hard to predict tests (because Python upstream are changing things. +class LoggingResult(TestResult): + """TestResult that logs its event to a list.""" + + def __init__(self, log): + self._events = log + super(LoggingResult, self).__init__() + + def startTest(self, test): + self._events.append(('startTest', test)) + super(LoggingResult, self).startTest(test) + + def stop(self): + self._events.append('stop') + super(LoggingResult, self).stop() + + def stopTest(self, test): + self._events.append(('stopTest', test)) + super(LoggingResult, self).stopTest(test) + + def addFailure(self, test, error): + self._events.append(('addFailure', test, error)) + super(LoggingResult, self).addFailure(test, error) + + def addError(self, test, error): + self._events.append(('addError', test, error)) + super(LoggingResult, self).addError(test, error) + + def addSkip(self, test, reason): + self._events.append(('addSkip', test, reason)) + super(LoggingResult, self).addSkip(test, reason) + + def addSuccess(self, test): + self._events.append(('addSuccess', test)) + super(LoggingResult, self).addSuccess(test) + + def startTestRun(self): + self._events.append('startTestRun') + super(LoggingResult, self).startTestRun() + + def stopTestRun(self): + self._events.append('stopTestRun') + super(LoggingResult, self).stopTestRun() + + def done(self): + self._events.append('done') + super(LoggingResult, self).done() + + def tags(self, new_tags, gone_tags): + self._events.append(('tags', new_tags, gone_tags)) + super(LoggingResult, self).tags(new_tags, gone_tags) + + def time(self, a_datetime): + self._events.append(('time', a_datetime)) + super(LoggingResult, self).time(a_datetime) + + +def is_stack_hidden(): + return StackLinesContent.HIDE_INTERNAL_STACK + + +def hide_testtools_stack(should_hide=True): + result = StackLinesContent.HIDE_INTERNAL_STACK + StackLinesContent.HIDE_INTERNAL_STACK = should_hide + return result + + +def run_with_stack_hidden(should_hide, f, *args, **kwargs): + old_should_hide = hide_testtools_stack(should_hide) + try: + return f(*args, **kwargs) + finally: + hide_testtools_stack(old_should_hide) + + +class FullStackRunTest(runtest.RunTest): + + def _run_user(self, fn, *args, **kwargs): + return run_with_stack_hidden( + False, + super(FullStackRunTest, self)._run_user, fn, *args, **kwargs) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py new file mode 100644 index 00000000000..ebab308e77c --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) 2009-2012 testtools developers. See LICENSE for details. + + +from unittest import TestSuite + + +def test_suite(): + from testtools.tests.matchers import ( + test_basic, + test_datastructures, + test_dict, + test_doctest, + test_exception, + test_filesystem, + test_higherorder, + test_impl, + ) + modules = [ + test_basic, + test_datastructures, + test_dict, + test_doctest, + test_exception, + test_filesystem, + test_higherorder, + test_impl, + ] + suites = map(lambda x: x.test_suite(), modules) + return TestSuite(suites) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py new file mode 100644 index 00000000000..3ff87278dae --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py @@ -0,0 +1,42 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +from testtools.tests.helpers import FullStackRunTest + + +class TestMatchersInterface(object): + + run_tests_with = FullStackRunTest + + def test_matches_match(self): + matcher = self.matches_matcher + matches = self.matches_matches + mismatches = self.matches_mismatches + for candidate in matches: + self.assertEqual(None, matcher.match(candidate)) + for candidate in mismatches: + mismatch = matcher.match(candidate) + self.assertNotEqual(None, mismatch) + self.assertNotEqual(None, getattr(mismatch, 'describe', None)) + + def test__str__(self): + # [(expected, object to __str__)]. + from testtools.matchers._doctest import DocTestMatches + examples = self.str_examples + for expected, matcher in examples: + self.assertThat(matcher, DocTestMatches(expected)) + + def test_describe_difference(self): + # [(expected, matchee, matcher), ...] + examples = self.describe_examples + for difference, matchee, matcher in examples: + mismatch = matcher.match(matchee) + self.assertEqual(difference, mismatch.describe()) + + def test_mismatch_details(self): + # The mismatch object must provide get_details, which must return a + # dictionary mapping names to Content objects. + examples = self.describe_examples + for difference, matchee, matcher in examples: + mismatch = matcher.match(matchee) + details = mismatch.get_details() + self.assertEqual(dict(details), details) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py new file mode 100644 index 00000000000..c53bc9e9c42 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py @@ -0,0 +1,396 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +import re + +from testtools import TestCase +from testtools.compat import ( + text_repr, + _b, + _u, + ) +from testtools.matchers._basic import ( + _BinaryMismatch, + Contains, + DoesNotEndWith, + DoesNotStartWith, + EndsWith, + Equals, + Is, + IsInstance, + LessThan, + GreaterThan, + HasLength, + MatchesRegex, + NotEquals, + SameMembers, + StartsWith, + ) +from testtools.tests.helpers import FullStackRunTest +from testtools.tests.matchers.helpers import TestMatchersInterface + + +class Test_BinaryMismatch(TestCase): + """Mismatches from binary comparisons need useful describe output""" + + _long_string = "This is a longish multiline non-ascii string\n\xa7" + _long_b = _b(_long_string) + _long_u = _u(_long_string) + + class CustomRepr(object): + def __init__(self, repr_string): + self._repr_string = repr_string + def __repr__(self): + return _u('<object ') + _u(self._repr_string) + _u('>') + + def test_short_objects(self): + o1, o2 = self.CustomRepr('a'), self.CustomRepr('b') + mismatch = _BinaryMismatch(o1, "!~", o2) + self.assertEqual(mismatch.describe(), "%r !~ %r" % (o1, o2)) + + def test_short_mixed_strings(self): + b, u = _b("\xa7"), _u("\xa7") + mismatch = _BinaryMismatch(b, "!~", u) + self.assertEqual(mismatch.describe(), "%r !~ %r" % (b, u)) + + def test_long_bytes(self): + one_line_b = self._long_b.replace(_b("\n"), _b(" ")) + mismatch = _BinaryMismatch(one_line_b, "!~", self._long_b) + self.assertEqual(mismatch.describe(), + "%s:\nreference = %s\nactual = %s\n" % ("!~", + text_repr(one_line_b), + text_repr(self._long_b, multiline=True))) + + def test_long_unicode(self): + one_line_u = self._long_u.replace("\n", " ") + mismatch = _BinaryMismatch(one_line_u, "!~", self._long_u) + self.assertEqual(mismatch.describe(), + "%s:\nreference = %s\nactual = %s\n" % ("!~", + text_repr(one_line_u), + text_repr(self._long_u, multiline=True))) + + def test_long_mixed_strings(self): + mismatch = _BinaryMismatch(self._long_b, "!~", self._long_u) + self.assertEqual(mismatch.describe(), + "%s:\nreference = %s\nactual = %s\n" % ("!~", + text_repr(self._long_b, multiline=True), + text_repr(self._long_u, multiline=True))) + + def test_long_bytes_and_object(self): + obj = object() + mismatch = _BinaryMismatch(self._long_b, "!~", obj) + self.assertEqual(mismatch.describe(), + "%s:\nreference = %s\nactual = %s\n" % ("!~", + text_repr(self._long_b, multiline=True), + repr(obj))) + + def test_long_unicode_and_object(self): + obj = object() + mismatch = _BinaryMismatch(self._long_u, "!~", obj) + self.assertEqual(mismatch.describe(), + "%s:\nreference = %s\nactual = %s\n" % ("!~", + text_repr(self._long_u, multiline=True), + repr(obj))) + + +class TestEqualsInterface(TestCase, TestMatchersInterface): + + matches_matcher = Equals(1) + matches_matches = [1] + matches_mismatches = [2] + + str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))] + + describe_examples = [("1 != 2", 2, Equals(1))] + + +class TestNotEqualsInterface(TestCase, TestMatchersInterface): + + matches_matcher = NotEquals(1) + matches_matches = [2] + matches_mismatches = [1] + + str_examples = [ + ("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))] + + describe_examples = [("1 == 1", 1, NotEquals(1))] + + +class TestIsInterface(TestCase, TestMatchersInterface): + + foo = object() + bar = object() + + matches_matcher = Is(foo) + matches_matches = [foo] + matches_mismatches = [bar, 1] + + str_examples = [("Is(2)", Is(2))] + + describe_examples = [("1 is not 2", 2, Is(1))] + + +class TestIsInstanceInterface(TestCase, TestMatchersInterface): + + class Foo:pass + + matches_matcher = IsInstance(Foo) + matches_matches = [Foo()] + matches_mismatches = [object(), 1, Foo] + + str_examples = [ + ("IsInstance(str)", IsInstance(str)), + ("IsInstance(str, int)", IsInstance(str, int)), + ] + + describe_examples = [ + ("'foo' is not an instance of int", 'foo', IsInstance(int)), + ("'foo' is not an instance of any of (int, type)", 'foo', + IsInstance(int, type)), + ] + + +class TestLessThanInterface(TestCase, TestMatchersInterface): + + matches_matcher = LessThan(4) + matches_matches = [-5, 3] + matches_mismatches = [4, 5, 5000] + + str_examples = [ + ("LessThan(12)", LessThan(12)), + ] + + describe_examples = [ + ('4 is not > 5', 5, LessThan(4)), + ('4 is not > 4', 4, LessThan(4)), + ] + + +class TestGreaterThanInterface(TestCase, TestMatchersInterface): + + matches_matcher = GreaterThan(4) + matches_matches = [5, 8] + matches_mismatches = [-2, 0, 4] + + str_examples = [ + ("GreaterThan(12)", GreaterThan(12)), + ] + + describe_examples = [ + ('5 is not < 4', 4, GreaterThan(5)), + ('4 is not < 4', 4, GreaterThan(4)), + ] + + +class TestContainsInterface(TestCase, TestMatchersInterface): + + matches_matcher = Contains('foo') + matches_matches = ['foo', 'afoo', 'fooa'] + matches_mismatches = ['f', 'fo', 'oo', 'faoo', 'foao'] + + str_examples = [ + ("Contains(1)", Contains(1)), + ("Contains('foo')", Contains('foo')), + ] + + describe_examples = [("1 not in 2", 2, Contains(1))] + + +class DoesNotStartWithTests(TestCase): + + run_tests_with = FullStackRunTest + + def test_describe(self): + mismatch = DoesNotStartWith("fo", "bo") + self.assertEqual("'fo' does not start with 'bo'.", mismatch.describe()) + + def test_describe_non_ascii_unicode(self): + string = _u("A\xA7") + suffix = _u("B\xA7") + mismatch = DoesNotStartWith(string, suffix) + self.assertEqual("%s does not start with %s." % ( + text_repr(string), text_repr(suffix)), + mismatch.describe()) + + def test_describe_non_ascii_bytes(self): + string = _b("A\xA7") + suffix = _b("B\xA7") + mismatch = DoesNotStartWith(string, suffix) + self.assertEqual("%r does not start with %r." % (string, suffix), + mismatch.describe()) + + +class StartsWithTests(TestCase): + + run_tests_with = FullStackRunTest + + def test_str(self): + matcher = StartsWith("bar") + self.assertEqual("StartsWith('bar')", str(matcher)) + + def test_str_with_bytes(self): + b = _b("\xA7") + matcher = StartsWith(b) + self.assertEqual("StartsWith(%r)" % (b,), str(matcher)) + + def test_str_with_unicode(self): + u = _u("\xA7") + matcher = StartsWith(u) + self.assertEqual("StartsWith(%r)" % (u,), str(matcher)) + + def test_match(self): + matcher = StartsWith("bar") + self.assertIs(None, matcher.match("barf")) + + def test_mismatch_returns_does_not_start_with(self): + matcher = StartsWith("bar") + self.assertIsInstance(matcher.match("foo"), DoesNotStartWith) + + def test_mismatch_sets_matchee(self): + matcher = StartsWith("bar") + mismatch = matcher.match("foo") + self.assertEqual("foo", mismatch.matchee) + + def test_mismatch_sets_expected(self): + matcher = StartsWith("bar") + mismatch = matcher.match("foo") + self.assertEqual("bar", mismatch.expected) + + +class DoesNotEndWithTests(TestCase): + + run_tests_with = FullStackRunTest + + def test_describe(self): + mismatch = DoesNotEndWith("fo", "bo") + self.assertEqual("'fo' does not end with 'bo'.", mismatch.describe()) + + def test_describe_non_ascii_unicode(self): + string = _u("A\xA7") + suffix = _u("B\xA7") + mismatch = DoesNotEndWith(string, suffix) + self.assertEqual("%s does not end with %s." % ( + text_repr(string), text_repr(suffix)), + mismatch.describe()) + + def test_describe_non_ascii_bytes(self): + string = _b("A\xA7") + suffix = _b("B\xA7") + mismatch = DoesNotEndWith(string, suffix) + self.assertEqual("%r does not end with %r." % (string, suffix), + mismatch.describe()) + + +class EndsWithTests(TestCase): + + run_tests_with = FullStackRunTest + + def test_str(self): + matcher = EndsWith("bar") + self.assertEqual("EndsWith('bar')", str(matcher)) + + def test_str_with_bytes(self): + b = _b("\xA7") + matcher = EndsWith(b) + self.assertEqual("EndsWith(%r)" % (b,), str(matcher)) + + def test_str_with_unicode(self): + u = _u("\xA7") + matcher = EndsWith(u) + self.assertEqual("EndsWith(%r)" % (u,), str(matcher)) + + def test_match(self): + matcher = EndsWith("arf") + self.assertIs(None, matcher.match("barf")) + + def test_mismatch_returns_does_not_end_with(self): + matcher = EndsWith("bar") + self.assertIsInstance(matcher.match("foo"), DoesNotEndWith) + + def test_mismatch_sets_matchee(self): + matcher = EndsWith("bar") + mismatch = matcher.match("foo") + self.assertEqual("foo", mismatch.matchee) + + def test_mismatch_sets_expected(self): + matcher = EndsWith("bar") + mismatch = matcher.match("foo") + self.assertEqual("bar", mismatch.expected) + + +class TestSameMembers(TestCase, TestMatchersInterface): + + matches_matcher = SameMembers([1, 1, 2, 3, {'foo': 'bar'}]) + matches_matches = [ + [1, 1, 2, 3, {'foo': 'bar'}], + [3, {'foo': 'bar'}, 1, 2, 1], + [3, 2, 1, {'foo': 'bar'}, 1], + (2, {'foo': 'bar'}, 3, 1, 1), + ] + matches_mismatches = [ + set([1, 2, 3]), + [1, 1, 2, 3, 5], + [1, 2, 3, {'foo': 'bar'}], + 'foo', + ] + + describe_examples = [ + (("elements differ:\n" + "reference = ['apple', 'orange', 'canteloupe', 'watermelon', 'lemon', 'banana']\n" + "actual = ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe']\n" + ": \n" + "missing: ['watermelon']\n" + "extra: ['sparrow']" + ), + ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe',], + SameMembers( + ['apple', 'orange', 'canteloupe', 'watermelon', + 'lemon', 'banana',])), + ] + + str_examples = [ + ('SameMembers([1, 2, 3])', SameMembers([1, 2, 3])), + ] + + +class TestMatchesRegex(TestCase, TestMatchersInterface): + + matches_matcher = MatchesRegex('a|b') + matches_matches = ['a', 'b'] + matches_mismatches = ['c'] + + str_examples = [ + ("MatchesRegex('a|b')", MatchesRegex('a|b')), + ("MatchesRegex('a|b', re.M)", MatchesRegex('a|b', re.M)), + ("MatchesRegex('a|b', re.I|re.M)", MatchesRegex('a|b', re.I|re.M)), + ("MatchesRegex(%r)" % (_b("\xA7"),), MatchesRegex(_b("\xA7"))), + ("MatchesRegex(%r)" % (_u("\xA7"),), MatchesRegex(_u("\xA7"))), + ] + + describe_examples = [ + ("'c' does not match /a|b/", 'c', MatchesRegex('a|b')), + ("'c' does not match /a\d/", 'c', MatchesRegex(r'a\d')), + ("%r does not match /\\s+\\xa7/" % (_b('c'),), + _b('c'), MatchesRegex(_b("\\s+\xA7"))), + ("%r does not match /\\s+\\xa7/" % (_u('c'),), + _u('c'), MatchesRegex(_u("\\s+\xA7"))), + ] + + +class TestHasLength(TestCase, TestMatchersInterface): + + matches_matcher = HasLength(2) + matches_matches = [[1, 2]] + matches_mismatches = [[], [1], [3, 2, 1]] + + str_examples = [ + ("HasLength(2)", HasLength(2)), + ] + + describe_examples = [ + ("len([]) != 1", [], HasLength(1)), + ] + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py new file mode 100644 index 00000000000..f6d9d8658c8 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py @@ -0,0 +1,209 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +import doctest +import re +import sys + +from testtools import TestCase +from testtools.compat import StringIO +from testtools.matchers import ( + Annotate, + Equals, + LessThan, + MatchesRegex, + NotEquals, + ) +from testtools.matchers._datastructures import ( + ContainsAll, + MatchesListwise, + MatchesStructure, + MatchesSetwise, + ) +from testtools.tests.helpers import FullStackRunTest +from testtools.tests.matchers.helpers import TestMatchersInterface + + +def run_doctest(obj, name): + p = doctest.DocTestParser() + t = p.get_doctest( + obj.__doc__, sys.modules[obj.__module__].__dict__, name, '', 0) + r = doctest.DocTestRunner() + output = StringIO() + r.run(t, out=output.write) + return r.failures, output.getvalue() + + +class TestMatchesListwise(TestCase): + + run_tests_with = FullStackRunTest + + def test_docstring(self): + failure_count, output = run_doctest( + MatchesListwise, "MatchesListwise") + if failure_count: + self.fail("Doctest failed with %s" % output) + + +class TestMatchesStructure(TestCase, TestMatchersInterface): + + class SimpleClass: + def __init__(self, x, y): + self.x = x + self.y = y + + matches_matcher = MatchesStructure(x=Equals(1), y=Equals(2)) + matches_matches = [SimpleClass(1, 2)] + matches_mismatches = [ + SimpleClass(2, 2), + SimpleClass(1, 1), + SimpleClass(3, 3), + ] + + str_examples = [ + ("MatchesStructure(x=Equals(1))", MatchesStructure(x=Equals(1))), + ("MatchesStructure(y=Equals(2))", MatchesStructure(y=Equals(2))), + ("MatchesStructure(x=Equals(1), y=Equals(2))", + MatchesStructure(x=Equals(1), y=Equals(2))), + ] + + describe_examples = [ + ("""\ +Differences: [ +3 != 1: x +]""", SimpleClass(1, 2), MatchesStructure(x=Equals(3), y=Equals(2))), + ("""\ +Differences: [ +3 != 2: y +]""", SimpleClass(1, 2), MatchesStructure(x=Equals(1), y=Equals(3))), + ("""\ +Differences: [ +0 != 1: x +0 != 2: y +]""", SimpleClass(1, 2), MatchesStructure(x=Equals(0), y=Equals(0))), + ] + + def test_fromExample(self): + self.assertThat( + self.SimpleClass(1, 2), + MatchesStructure.fromExample(self.SimpleClass(1, 3), 'x')) + + def test_byEquality(self): + self.assertThat( + self.SimpleClass(1, 2), + MatchesStructure.byEquality(x=1)) + + def test_withStructure(self): + self.assertThat( + self.SimpleClass(1, 2), + MatchesStructure.byMatcher(LessThan, x=2)) + + def test_update(self): + self.assertThat( + self.SimpleClass(1, 2), + MatchesStructure(x=NotEquals(1)).update(x=Equals(1))) + + def test_update_none(self): + self.assertThat( + self.SimpleClass(1, 2), + MatchesStructure(x=Equals(1), z=NotEquals(42)).update( + z=None)) + + +class TestMatchesSetwise(TestCase): + + run_tests_with = FullStackRunTest + + def assertMismatchWithDescriptionMatching(self, value, matcher, + description_matcher): + mismatch = matcher.match(value) + if mismatch is None: + self.fail("%s matched %s" % (matcher, value)) + actual_description = mismatch.describe() + self.assertThat( + actual_description, + Annotate( + "%s matching %s" % (matcher, value), + description_matcher)) + + def test_matches(self): + self.assertIs( + None, MatchesSetwise(Equals(1), Equals(2)).match([2, 1])) + + def test_mismatches(self): + self.assertMismatchWithDescriptionMatching( + [2, 3], MatchesSetwise(Equals(1), Equals(2)), + MatchesRegex('.*There was 1 mismatch$', re.S)) + + def test_too_many_matchers(self): + self.assertMismatchWithDescriptionMatching( + [2, 3], MatchesSetwise(Equals(1), Equals(2), Equals(3)), + Equals('There was 1 matcher left over: Equals(1)')) + + def test_too_many_values(self): + self.assertMismatchWithDescriptionMatching( + [1, 2, 3], MatchesSetwise(Equals(1), Equals(2)), + Equals('There was 1 value left over: [3]')) + + def test_two_too_many_matchers(self): + self.assertMismatchWithDescriptionMatching( + [3], MatchesSetwise(Equals(1), Equals(2), Equals(3)), + MatchesRegex( + 'There were 2 matchers left over: Equals\([12]\), ' + 'Equals\([12]\)')) + + def test_two_too_many_values(self): + self.assertMismatchWithDescriptionMatching( + [1, 2, 3, 4], MatchesSetwise(Equals(1), Equals(2)), + MatchesRegex( + 'There were 2 values left over: \[[34], [34]\]')) + + def test_mismatch_and_too_many_matchers(self): + self.assertMismatchWithDescriptionMatching( + [2, 3], MatchesSetwise(Equals(0), Equals(1), Equals(2)), + MatchesRegex( + '.*There was 1 mismatch and 1 extra matcher: Equals\([01]\)', + re.S)) + + def test_mismatch_and_too_many_values(self): + self.assertMismatchWithDescriptionMatching( + [2, 3, 4], MatchesSetwise(Equals(1), Equals(2)), + MatchesRegex( + '.*There was 1 mismatch and 1 extra value: \[[34]\]', + re.S)) + + def test_mismatch_and_two_too_many_matchers(self): + self.assertMismatchWithDescriptionMatching( + [3, 4], MatchesSetwise( + Equals(0), Equals(1), Equals(2), Equals(3)), + MatchesRegex( + '.*There was 1 mismatch and 2 extra matchers: ' + 'Equals\([012]\), Equals\([012]\)', re.S)) + + def test_mismatch_and_two_too_many_values(self): + self.assertMismatchWithDescriptionMatching( + [2, 3, 4, 5], MatchesSetwise(Equals(1), Equals(2)), + MatchesRegex( + '.*There was 1 mismatch and 2 extra values: \[[145], [145]\]', + re.S)) + + +class TestContainsAllInterface(TestCase, TestMatchersInterface): + + matches_matcher = ContainsAll(['foo', 'bar']) + matches_matches = [['foo', 'bar'], ['foo', 'z', 'bar'], ['bar', 'foo']] + matches_mismatches = [['f', 'g'], ['foo', 'baz'], []] + + str_examples = [( + "MatchesAll(Contains('foo'), Contains('bar'))", + ContainsAll(['foo', 'bar'])), + ] + + describe_examples = [("""Differences: [ +'baz' not in 'foo' +]""", + 'foo', ContainsAll(['foo', 'baz']))] + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py new file mode 100644 index 00000000000..00368dd6ceb --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py @@ -0,0 +1,227 @@ +from testtools import TestCase +from testtools.matchers import ( + Equals, + NotEquals, + Not, + ) +from testtools.matchers._dict import ( + ContainedByDict, + ContainsDict, + KeysEqual, + MatchesAllDict, + MatchesDict, + _SubDictOf, + ) +from testtools.tests.matchers.helpers import TestMatchersInterface + + +class TestMatchesAllDictInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)}) + matches_matches = [3, 4] + matches_mismatches = [1, 2] + + str_examples = [ + ("MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})", + matches_matcher)] + + describe_examples = [ + ("""a: 1 == 1""", 1, matches_matcher), + ] + + +class TestKeysEqualWithList(TestCase, TestMatchersInterface): + + matches_matcher = KeysEqual('foo', 'bar') + matches_matches = [ + {'foo': 0, 'bar': 1}, + ] + matches_mismatches = [ + {}, + {'foo': 0}, + {'bar': 1}, + {'foo': 0, 'bar': 1, 'baz': 2}, + {'a': None, 'b': None, 'c': None}, + ] + + str_examples = [ + ("KeysEqual('foo', 'bar')", KeysEqual('foo', 'bar')), + ] + + describe_examples = [] + + def test_description(self): + matchee = {'foo': 0, 'bar': 1, 'baz': 2} + mismatch = KeysEqual('foo', 'bar').match(matchee) + description = mismatch.describe() + self.assertThat( + description, Equals( + "['bar', 'foo'] does not match %r: Keys not equal" + % (matchee,))) + + +class TestKeysEqualWithDict(TestKeysEqualWithList): + + matches_matcher = KeysEqual({'foo': 3, 'bar': 4}) + + +class TestSubDictOf(TestCase, TestMatchersInterface): + + matches_matcher = _SubDictOf({'foo': 'bar', 'baz': 'qux'}) + + matches_matches = [ + {'foo': 'bar', 'baz': 'qux'}, + {'foo': 'bar'}, + ] + + matches_mismatches = [ + {'foo': 'bar', 'baz': 'qux', 'cat': 'dog'}, + {'foo': 'bar', 'cat': 'dog'}, + ] + + str_examples = [] + describe_examples = [] + + +class TestMatchesDict(TestCase, TestMatchersInterface): + + matches_matcher = MatchesDict( + {'foo': Equals('bar'), 'baz': Not(Equals('qux'))}) + + matches_matches = [ + {'foo': 'bar', 'baz': None}, + {'foo': 'bar', 'baz': 'quux'}, + ] + matches_mismatches = [ + {}, + {'foo': 'bar', 'baz': 'qux'}, + {'foo': 'bop', 'baz': 'qux'}, + {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'}, + {'foo': 'bar', 'cat': 'dog'}, + ] + + str_examples = [ + ("MatchesDict({'baz': %s, 'foo': %s})" % ( + Not(Equals('qux')), Equals('bar')), + matches_matcher), + ] + + describe_examples = [ + ("Missing: {\n" + " 'baz': Not(Equals('qux')),\n" + " 'foo': Equals('bar'),\n" + "}", + {}, matches_matcher), + ("Differences: {\n" + " 'baz': 'qux' matches Equals('qux'),\n" + "}", + {'foo': 'bar', 'baz': 'qux'}, matches_matcher), + ("Differences: {\n" + " 'baz': 'qux' matches Equals('qux'),\n" + " 'foo': 'bar' != 'bop',\n" + "}", + {'foo': 'bop', 'baz': 'qux'}, matches_matcher), + ("Extra: {\n" + " 'cat': 'dog',\n" + "}", + {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'}, matches_matcher), + ("Extra: {\n" + " 'cat': 'dog',\n" + "}\n" + "Missing: {\n" + " 'baz': Not(Equals('qux')),\n" + "}", + {'foo': 'bar', 'cat': 'dog'}, matches_matcher), + ] + + +class TestContainsDict(TestCase, TestMatchersInterface): + + matches_matcher = ContainsDict( + {'foo': Equals('bar'), 'baz': Not(Equals('qux'))}) + + matches_matches = [ + {'foo': 'bar', 'baz': None}, + {'foo': 'bar', 'baz': 'quux'}, + {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'}, + ] + matches_mismatches = [ + {}, + {'foo': 'bar', 'baz': 'qux'}, + {'foo': 'bop', 'baz': 'qux'}, + {'foo': 'bar', 'cat': 'dog'}, + {'foo': 'bar'}, + ] + + str_examples = [ + ("ContainsDict({'baz': %s, 'foo': %s})" % ( + Not(Equals('qux')), Equals('bar')), + matches_matcher), + ] + + describe_examples = [ + ("Missing: {\n" + " 'baz': Not(Equals('qux')),\n" + " 'foo': Equals('bar'),\n" + "}", + {}, matches_matcher), + ("Differences: {\n" + " 'baz': 'qux' matches Equals('qux'),\n" + "}", + {'foo': 'bar', 'baz': 'qux'}, matches_matcher), + ("Differences: {\n" + " 'baz': 'qux' matches Equals('qux'),\n" + " 'foo': 'bar' != 'bop',\n" + "}", + {'foo': 'bop', 'baz': 'qux'}, matches_matcher), + ("Missing: {\n" + " 'baz': Not(Equals('qux')),\n" + "}", + {'foo': 'bar', 'cat': 'dog'}, matches_matcher), + ] + + +class TestContainedByDict(TestCase, TestMatchersInterface): + + matches_matcher = ContainedByDict( + {'foo': Equals('bar'), 'baz': Not(Equals('qux'))}) + + matches_matches = [ + {}, + {'foo': 'bar'}, + {'foo': 'bar', 'baz': 'quux'}, + {'baz': 'quux'}, + ] + matches_mismatches = [ + {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'}, + {'foo': 'bar', 'baz': 'qux'}, + {'foo': 'bop', 'baz': 'qux'}, + {'foo': 'bar', 'cat': 'dog'}, + ] + + str_examples = [ + ("ContainedByDict({'baz': %s, 'foo': %s})" % ( + Not(Equals('qux')), Equals('bar')), + matches_matcher), + ] + + describe_examples = [ + ("Differences: {\n" + " 'baz': 'qux' matches Equals('qux'),\n" + "}", + {'foo': 'bar', 'baz': 'qux'}, matches_matcher), + ("Differences: {\n" + " 'baz': 'qux' matches Equals('qux'),\n" + " 'foo': 'bar' != 'bop',\n" + "}", + {'foo': 'bop', 'baz': 'qux'}, matches_matcher), + ("Extra: {\n" + " 'cat': 'dog',\n" + "}", + {'foo': 'bar', 'cat': 'dog'}, matches_matcher), + ] + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py new file mode 100644 index 00000000000..81b9579dbf0 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py @@ -0,0 +1,82 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +import doctest + +from testtools import TestCase +from testtools.compat import ( + str_is_unicode, + _b, + _u, + ) +from testtools.matchers._doctest import DocTestMatches +from testtools.tests.helpers import FullStackRunTest +from testtools.tests.matchers.helpers import TestMatchersInterface + + + +class TestDocTestMatchesInterface(TestCase, TestMatchersInterface): + + matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS) + matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"] + matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"] + + str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')", + DocTestMatches("Ran 1 test in ...s")), + ("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)), + ] + + describe_examples = [('Expected:\n Ran 1 tests in ...s\nGot:\n' + ' Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s", + DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))] + + +class TestDocTestMatchesInterfaceUnicode(TestCase, TestMatchersInterface): + + matches_matcher = DocTestMatches(_u("\xa7..."), doctest.ELLIPSIS) + matches_matches = [_u("\xa7"), _u("\xa7 more\n")] + matches_mismatches = ["\\xa7", _u("more \xa7"), _u("\n\xa7")] + + str_examples = [("DocTestMatches(%r)" % (_u("\xa7\n"),), + DocTestMatches(_u("\xa7"))), + ] + + describe_examples = [( + _u("Expected:\n \xa7\nGot:\n a\n"), + "a", + DocTestMatches(_u("\xa7"), doctest.ELLIPSIS))] + + +class TestDocTestMatchesSpecific(TestCase): + + run_tests_with = FullStackRunTest + + def test___init__simple(self): + matcher = DocTestMatches("foo") + self.assertEqual("foo\n", matcher.want) + + def test___init__flags(self): + matcher = DocTestMatches("bar\n", doctest.ELLIPSIS) + self.assertEqual("bar\n", matcher.want) + self.assertEqual(doctest.ELLIPSIS, matcher.flags) + + def test_describe_non_ascii_bytes(self): + """Even with bytestrings, the mismatch should be coercible to unicode + + DocTestMatches is intended for text, but the Python 2 str type also + permits arbitrary binary inputs. This is a slightly bogus thing to do, + and under Python 3 using bytes objects will reasonably raise an error. + """ + header = _b("\x89PNG\r\n\x1a\n...") + if str_is_unicode: + self.assertRaises(TypeError, + DocTestMatches, header, doctest.ELLIPSIS) + return + matcher = DocTestMatches(header, doctest.ELLIPSIS) + mismatch = matcher.match(_b("GIF89a\1\0\1\0\0\0\0;")) + # Must be treatable as unicode text, the exact output matters less + self.assertTrue(unicode(mismatch.describe())) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py new file mode 100644 index 00000000000..ef7185f19a4 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py @@ -0,0 +1,192 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +import sys + +from testtools import TestCase +from testtools.matchers import ( + AfterPreprocessing, + Equals, + ) +from testtools.matchers._exception import ( + MatchesException, + Raises, + raises, + ) +from testtools.tests.helpers import FullStackRunTest +from testtools.tests.matchers.helpers import TestMatchersInterface + + +def make_error(type, *args, **kwargs): + try: + raise type(*args, **kwargs) + except type: + return sys.exc_info() + + +class TestMatchesExceptionInstanceInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesException(ValueError("foo")) + error_foo = make_error(ValueError, 'foo') + error_bar = make_error(ValueError, 'bar') + error_base_foo = make_error(Exception, 'foo') + matches_matches = [error_foo] + matches_mismatches = [error_bar, error_base_foo] + + str_examples = [ + ("MatchesException(Exception('foo',))", + MatchesException(Exception('foo'))) + ] + describe_examples = [ + ("%r is not a %r" % (Exception, ValueError), + error_base_foo, + MatchesException(ValueError("foo"))), + ("ValueError('bar',) has different arguments to ValueError('foo',).", + error_bar, + MatchesException(ValueError("foo"))), + ] + + +class TestMatchesExceptionTypeInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesException(ValueError) + error_foo = make_error(ValueError, 'foo') + error_sub = make_error(UnicodeError, 'bar') + error_base_foo = make_error(Exception, 'foo') + matches_matches = [error_foo, error_sub] + matches_mismatches = [error_base_foo] + + str_examples = [ + ("MatchesException(%r)" % Exception, + MatchesException(Exception)) + ] + describe_examples = [ + ("%r is not a %r" % (Exception, ValueError), + error_base_foo, + MatchesException(ValueError)), + ] + + +class TestMatchesExceptionTypeReInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesException(ValueError, 'fo.') + error_foo = make_error(ValueError, 'foo') + error_sub = make_error(UnicodeError, 'foo') + error_bar = make_error(ValueError, 'bar') + matches_matches = [error_foo, error_sub] + matches_mismatches = [error_bar] + + str_examples = [ + ("MatchesException(%r)" % Exception, + MatchesException(Exception, 'fo.')) + ] + describe_examples = [ + ("'bar' does not match /fo./", + error_bar, MatchesException(ValueError, "fo.")), + ] + + +class TestMatchesExceptionTypeMatcherInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesException( + ValueError, AfterPreprocessing(str, Equals('foo'))) + error_foo = make_error(ValueError, 'foo') + error_sub = make_error(UnicodeError, 'foo') + error_bar = make_error(ValueError, 'bar') + matches_matches = [error_foo, error_sub] + matches_mismatches = [error_bar] + + str_examples = [ + ("MatchesException(%r)" % Exception, + MatchesException(Exception, Equals('foo'))) + ] + describe_examples = [ + ("5 != %r" % (error_bar[1],), + error_bar, MatchesException(ValueError, Equals(5))), + ] + + +class TestRaisesInterface(TestCase, TestMatchersInterface): + + matches_matcher = Raises() + def boom(): + raise Exception('foo') + matches_matches = [boom] + matches_mismatches = [lambda:None] + + # Tricky to get function objects to render constantly, and the interfaces + # helper uses assertEqual rather than (for instance) DocTestMatches. + str_examples = [] + + describe_examples = [] + + +class TestRaisesExceptionMatcherInterface(TestCase, TestMatchersInterface): + + matches_matcher = Raises( + exception_matcher=MatchesException(Exception('foo'))) + def boom_bar(): + raise Exception('bar') + def boom_foo(): + raise Exception('foo') + matches_matches = [boom_foo] + matches_mismatches = [lambda:None, boom_bar] + + # Tricky to get function objects to render constantly, and the interfaces + # helper uses assertEqual rather than (for instance) DocTestMatches. + str_examples = [] + + describe_examples = [] + + +class TestRaisesBaseTypes(TestCase): + + run_tests_with = FullStackRunTest + + def raiser(self): + raise KeyboardInterrupt('foo') + + def test_KeyboardInterrupt_matched(self): + # When KeyboardInterrupt is matched, it is swallowed. + matcher = Raises(MatchesException(KeyboardInterrupt)) + self.assertThat(self.raiser, matcher) + + def test_KeyboardInterrupt_propogates(self): + # The default 'it raised' propogates KeyboardInterrupt. + match_keyb = Raises(MatchesException(KeyboardInterrupt)) + def raise_keyb_from_match(): + matcher = Raises() + matcher.match(self.raiser) + self.assertThat(raise_keyb_from_match, match_keyb) + + def test_KeyboardInterrupt_match_Exception_propogates(self): + # If the raised exception isn't matched, and it is not a subclass of + # Exception, it is propogated. + match_keyb = Raises(MatchesException(KeyboardInterrupt)) + def raise_keyb_from_match(): + if sys.version_info > (2, 5): + matcher = Raises(MatchesException(Exception)) + else: + # On Python 2.4 KeyboardInterrupt is a StandardError subclass + # but should propogate from less generic exception matchers + matcher = Raises(MatchesException(EnvironmentError)) + matcher.match(self.raiser) + self.assertThat(raise_keyb_from_match, match_keyb) + + +class TestRaisesConvenience(TestCase): + + run_tests_with = FullStackRunTest + + def test_exc_type(self): + self.assertThat(lambda: 1/0, raises(ZeroDivisionError)) + + def test_exc_value(self): + e = RuntimeError("You lose!") + def raiser(): + raise e + self.assertThat(raiser, raises(e)) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py new file mode 100644 index 00000000000..917ff2ed058 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py @@ -0,0 +1,243 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +import os +import shutil +import tarfile +import tempfile + +from testtools import TestCase +from testtools.matchers import ( + Contains, + DocTestMatches, + Equals, + ) +from testtools.matchers._filesystem import ( + DirContains, + DirExists, + FileContains, + FileExists, + HasPermissions, + PathExists, + SamePath, + TarballContains, + ) + + +class PathHelpers(object): + + def mkdtemp(self): + directory = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, directory) + return directory + + def create_file(self, filename, contents=''): + fp = open(filename, 'w') + try: + fp.write(contents) + finally: + fp.close() + + def touch(self, filename): + return self.create_file(filename) + + +class TestPathExists(TestCase, PathHelpers): + + def test_exists(self): + tempdir = self.mkdtemp() + self.assertThat(tempdir, PathExists()) + + def test_not_exists(self): + doesntexist = os.path.join(self.mkdtemp(), 'doesntexist') + mismatch = PathExists().match(doesntexist) + self.assertThat( + "%s does not exist." % doesntexist, Equals(mismatch.describe())) + + +class TestDirExists(TestCase, PathHelpers): + + def test_exists(self): + tempdir = self.mkdtemp() + self.assertThat(tempdir, DirExists()) + + def test_not_exists(self): + doesntexist = os.path.join(self.mkdtemp(), 'doesntexist') + mismatch = DirExists().match(doesntexist) + self.assertThat( + PathExists().match(doesntexist).describe(), + Equals(mismatch.describe())) + + def test_not_a_directory(self): + filename = os.path.join(self.mkdtemp(), 'foo') + self.touch(filename) + mismatch = DirExists().match(filename) + self.assertThat( + "%s is not a directory." % filename, Equals(mismatch.describe())) + + +class TestFileExists(TestCase, PathHelpers): + + def test_exists(self): + tempdir = self.mkdtemp() + filename = os.path.join(tempdir, 'filename') + self.touch(filename) + self.assertThat(filename, FileExists()) + + def test_not_exists(self): + doesntexist = os.path.join(self.mkdtemp(), 'doesntexist') + mismatch = FileExists().match(doesntexist) + self.assertThat( + PathExists().match(doesntexist).describe(), + Equals(mismatch.describe())) + + def test_not_a_file(self): + tempdir = self.mkdtemp() + mismatch = FileExists().match(tempdir) + self.assertThat( + "%s is not a file." % tempdir, Equals(mismatch.describe())) + + +class TestDirContains(TestCase, PathHelpers): + + def test_empty(self): + tempdir = self.mkdtemp() + self.assertThat(tempdir, DirContains([])) + + def test_not_exists(self): + doesntexist = os.path.join(self.mkdtemp(), 'doesntexist') + mismatch = DirContains([]).match(doesntexist) + self.assertThat( + PathExists().match(doesntexist).describe(), + Equals(mismatch.describe())) + + def test_contains_files(self): + tempdir = self.mkdtemp() + self.touch(os.path.join(tempdir, 'foo')) + self.touch(os.path.join(tempdir, 'bar')) + self.assertThat(tempdir, DirContains(['bar', 'foo'])) + + def test_matcher(self): + tempdir = self.mkdtemp() + self.touch(os.path.join(tempdir, 'foo')) + self.touch(os.path.join(tempdir, 'bar')) + self.assertThat(tempdir, DirContains(matcher=Contains('bar'))) + + def test_neither_specified(self): + self.assertRaises(AssertionError, DirContains) + + def test_both_specified(self): + self.assertRaises( + AssertionError, DirContains, filenames=[], matcher=Contains('a')) + + def test_does_not_contain_files(self): + tempdir = self.mkdtemp() + self.touch(os.path.join(tempdir, 'foo')) + mismatch = DirContains(['bar', 'foo']).match(tempdir) + self.assertThat( + Equals(['bar', 'foo']).match(['foo']).describe(), + Equals(mismatch.describe())) + + +class TestFileContains(TestCase, PathHelpers): + + def test_not_exists(self): + doesntexist = os.path.join(self.mkdtemp(), 'doesntexist') + mismatch = FileContains('').match(doesntexist) + self.assertThat( + PathExists().match(doesntexist).describe(), + Equals(mismatch.describe())) + + def test_contains(self): + tempdir = self.mkdtemp() + filename = os.path.join(tempdir, 'foo') + self.create_file(filename, 'Hello World!') + self.assertThat(filename, FileContains('Hello World!')) + + def test_matcher(self): + tempdir = self.mkdtemp() + filename = os.path.join(tempdir, 'foo') + self.create_file(filename, 'Hello World!') + self.assertThat( + filename, FileContains(matcher=DocTestMatches('Hello World!'))) + + def test_neither_specified(self): + self.assertRaises(AssertionError, FileContains) + + def test_both_specified(self): + self.assertRaises( + AssertionError, FileContains, contents=[], matcher=Contains('a')) + + def test_does_not_contain(self): + tempdir = self.mkdtemp() + filename = os.path.join(tempdir, 'foo') + self.create_file(filename, 'Goodbye Cruel World!') + mismatch = FileContains('Hello World!').match(filename) + self.assertThat( + Equals('Hello World!').match('Goodbye Cruel World!').describe(), + Equals(mismatch.describe())) +class TestTarballContains(TestCase, PathHelpers): + + def test_match(self): + tempdir = self.mkdtemp() + in_temp_dir = lambda x: os.path.join(tempdir, x) + self.touch(in_temp_dir('a')) + self.touch(in_temp_dir('b')) + tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w') + tarball.add(in_temp_dir('a'), 'a') + tarball.add(in_temp_dir('b'), 'b') + tarball.close() + self.assertThat( + in_temp_dir('foo.tar.gz'), TarballContains(['b', 'a'])) + + def test_mismatch(self): + tempdir = self.mkdtemp() + in_temp_dir = lambda x: os.path.join(tempdir, x) + self.touch(in_temp_dir('a')) + self.touch(in_temp_dir('b')) + tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w') + tarball.add(in_temp_dir('a'), 'a') + tarball.add(in_temp_dir('b'), 'b') + tarball.close() + mismatch = TarballContains(['d', 'c']).match(in_temp_dir('foo.tar.gz')) + self.assertEqual( + mismatch.describe(), + Equals(['c', 'd']).match(['a', 'b']).describe()) + + +class TestSamePath(TestCase, PathHelpers): + + def test_same_string(self): + self.assertThat('foo', SamePath('foo')) + + def test_relative_and_absolute(self): + path = 'foo' + abspath = os.path.abspath(path) + self.assertThat(path, SamePath(abspath)) + self.assertThat(abspath, SamePath(path)) + + def test_real_path(self): + tempdir = self.mkdtemp() + source = os.path.join(tempdir, 'source') + self.touch(source) + target = os.path.join(tempdir, 'target') + try: + os.symlink(source, target) + except (AttributeError, NotImplementedError): + self.skip("No symlink support") + self.assertThat(source, SamePath(target)) + self.assertThat(target, SamePath(source)) + + +class TestHasPermissions(TestCase, PathHelpers): + + def test_match(self): + tempdir = self.mkdtemp() + filename = os.path.join(tempdir, 'filename') + self.touch(filename) + permissions = oct(os.stat(filename).st_mode)[-4:] + self.assertThat(filename, HasPermissions(permissions)) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py new file mode 100644 index 00000000000..fb86b7fe2f9 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py @@ -0,0 +1,254 @@ +# Copyright (c) 2008-2011 testtools developers. See LICENSE for details. + +from testtools import TestCase +from testtools.matchers import ( + DocTestMatches, + Equals, + LessThan, + MatchesStructure, + Mismatch, + NotEquals, + ) +from testtools.matchers._higherorder import ( + AfterPreprocessing, + AllMatch, + Annotate, + AnnotatedMismatch, + AnyMatch, + MatchesAny, + MatchesAll, + MatchesPredicate, + MatchesPredicateWithParams, + Not, + ) +from testtools.tests.helpers import FullStackRunTest +from testtools.tests.matchers.helpers import TestMatchersInterface + + +class TestAllMatch(TestCase, TestMatchersInterface): + + matches_matcher = AllMatch(LessThan(10)) + matches_matches = [ + [9, 9, 9], + (9, 9), + iter([9, 9, 9, 9, 9]), + ] + matches_mismatches = [ + [11, 9, 9], + iter([9, 12, 9, 11]), + ] + + str_examples = [ + ("AllMatch(LessThan(12))", AllMatch(LessThan(12))), + ] + + describe_examples = [ + ('Differences: [\n' + '10 is not > 11\n' + '10 is not > 10\n' + ']', + [11, 9, 10], + AllMatch(LessThan(10))), + ] + + +class TestAnyMatch(TestCase, TestMatchersInterface): + + matches_matcher = AnyMatch(Equals('elephant')) + matches_matches = [ + ['grass', 'cow', 'steak', 'milk', 'elephant'], + (13, 'elephant'), + ['elephant', 'elephant', 'elephant'], + set(['hippo', 'rhino', 'elephant']), + ] + matches_mismatches = [ + [], + ['grass', 'cow', 'steak', 'milk'], + (13, 12, 10), + ['element', 'hephalump', 'pachyderm'], + set(['hippo', 'rhino', 'diplodocus']), + ] + + str_examples = [ + ("AnyMatch(Equals('elephant'))", AnyMatch(Equals('elephant'))), + ] + + describe_examples = [ + ('Differences: [\n' + '7 != 11\n' + '7 != 9\n' + '7 != 10\n' + ']', + [11, 9, 10], + AnyMatch(Equals(7))), + ] + + +class TestAfterPreprocessing(TestCase, TestMatchersInterface): + + def parity(x): + return x % 2 + + matches_matcher = AfterPreprocessing(parity, Equals(1)) + matches_matches = [3, 5] + matches_mismatches = [2] + + str_examples = [ + ("AfterPreprocessing(<function parity>, Equals(1))", + AfterPreprocessing(parity, Equals(1))), + ] + + describe_examples = [ + ("1 != 0: after <function parity> on 2", 2, + AfterPreprocessing(parity, Equals(1))), + ("1 != 0", 2, + AfterPreprocessing(parity, Equals(1), annotate=False)), + ] + +class TestMatchersAnyInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2")) + matches_matches = ["1", "2"] + matches_mismatches = ["3"] + + str_examples = [( + "MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))", + MatchesAny(DocTestMatches("1"), DocTestMatches("2"))), + ] + + describe_examples = [("""Differences: [ +Expected: + 1 +Got: + 3 + +Expected: + 2 +Got: + 3 + +]""", + "3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))] + + +class TestMatchesAllInterface(TestCase, TestMatchersInterface): + + matches_matcher = MatchesAll(NotEquals(1), NotEquals(2)) + matches_matches = [3, 4] + matches_mismatches = [1, 2] + + str_examples = [ + ("MatchesAll(NotEquals(1), NotEquals(2))", + MatchesAll(NotEquals(1), NotEquals(2)))] + + describe_examples = [ + ("""Differences: [ +1 == 1 +]""", + 1, MatchesAll(NotEquals(1), NotEquals(2))), + ("1 == 1", 1, + MatchesAll(NotEquals(2), NotEquals(1), Equals(3), first_only=True)), + ] + + +class TestAnnotate(TestCase, TestMatchersInterface): + + matches_matcher = Annotate("foo", Equals(1)) + matches_matches = [1] + matches_mismatches = [2] + + str_examples = [ + ("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))] + + describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))] + + def test_if_message_no_message(self): + # Annotate.if_message returns the given matcher if there is no + # message. + matcher = Equals(1) + not_annotated = Annotate.if_message('', matcher) + self.assertIs(matcher, not_annotated) + + def test_if_message_given_message(self): + # Annotate.if_message returns an annotated version of the matcher if a + # message is provided. + matcher = Equals(1) + expected = Annotate('foo', matcher) + annotated = Annotate.if_message('foo', matcher) + self.assertThat( + annotated, + MatchesStructure.fromExample(expected, 'annotation', 'matcher')) + + +class TestAnnotatedMismatch(TestCase): + + run_tests_with = FullStackRunTest + + def test_forwards_details(self): + x = Mismatch('description', {'foo': 'bar'}) + annotated = AnnotatedMismatch("annotation", x) + self.assertEqual(x.get_details(), annotated.get_details()) + + +class TestNotInterface(TestCase, TestMatchersInterface): + + matches_matcher = Not(Equals(1)) + matches_matches = [2] + matches_mismatches = [1] + + str_examples = [ + ("Not(Equals(1))", Not(Equals(1))), + ("Not(Equals('1'))", Not(Equals('1')))] + + describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))] + + +def is_even(x): + return x % 2 == 0 + + +class TestMatchesPredicate(TestCase, TestMatchersInterface): + + matches_matcher = MatchesPredicate(is_even, "%s is not even") + matches_matches = [2, 4, 6, 8] + matches_mismatches = [3, 5, 7, 9] + + str_examples = [ + ("MatchesPredicate(%r, %r)" % (is_even, "%s is not even"), + MatchesPredicate(is_even, "%s is not even")), + ] + + describe_examples = [ + ('7 is not even', 7, MatchesPredicate(is_even, "%s is not even")), + ] + + +def between(x, low, high): + return low < x < high + + +class TestMatchesPredicateWithParams(TestCase, TestMatchersInterface): + + matches_matcher = MatchesPredicateWithParams( + between, "{0} is not between {1} and {2}")(1, 9) + matches_matches = [2, 4, 6, 8] + matches_mismatches = [0, 1, 9, 10] + + str_examples = [ + ("MatchesPredicateWithParams(%r, %r)(%s)" % ( + between, "{0} is not between {1} and {2}", "1, 2"), + MatchesPredicateWithParams( + between, "{0} is not between {1} and {2}")(1, 2)), + ("Between(1, 2)", MatchesPredicateWithParams( + between, "{0} is not between {1} and {2}", "Between")(1, 2)), + ] + + describe_examples = [ + ('1 is not between 2 and 3', 1, MatchesPredicateWithParams( + between, "{0} is not between {1} and {2}")(2, 3)), + ] + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py new file mode 100644 index 00000000000..10967ead25b --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py @@ -0,0 +1,132 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +"""Tests for matchers.""" + +from testtools import ( + Matcher, # check that Matcher is exposed at the top level for docs. + TestCase, + ) +from testtools.compat import ( + str_is_unicode, + text_repr, + _u, + ) +from testtools.matchers import ( + Equals, + MatchesException, + Raises, + ) +from testtools.matchers._impl import ( + Mismatch, + MismatchDecorator, + MismatchError, + ) +from testtools.tests.helpers import FullStackRunTest + +# Silence pyflakes. +Matcher + + +class TestMismatch(TestCase): + + run_tests_with = FullStackRunTest + + def test_constructor_arguments(self): + mismatch = Mismatch("some description", {'detail': "things"}) + self.assertEqual("some description", mismatch.describe()) + self.assertEqual({'detail': "things"}, mismatch.get_details()) + + def test_constructor_no_arguments(self): + mismatch = Mismatch() + self.assertThat(mismatch.describe, + Raises(MatchesException(NotImplementedError))) + self.assertEqual({}, mismatch.get_details()) + + +class TestMismatchError(TestCase): + + def test_is_assertion_error(self): + # MismatchError is an AssertionError, so that most of the time, it + # looks like a test failure, rather than an error. + def raise_mismatch_error(): + raise MismatchError(2, Equals(3), Equals(3).match(2)) + self.assertRaises(AssertionError, raise_mismatch_error) + + def test_default_description_is_mismatch(self): + mismatch = Equals(3).match(2) + e = MismatchError(2, Equals(3), mismatch) + self.assertEqual(mismatch.describe(), str(e)) + + def test_default_description_unicode(self): + matchee = _u('\xa7') + matcher = Equals(_u('a')) + mismatch = matcher.match(matchee) + e = MismatchError(matchee, matcher, mismatch) + self.assertEqual(mismatch.describe(), str(e)) + + def test_verbose_description(self): + matchee = 2 + matcher = Equals(3) + mismatch = matcher.match(2) + e = MismatchError(matchee, matcher, mismatch, True) + expected = ( + 'Match failed. Matchee: %r\n' + 'Matcher: %s\n' + 'Difference: %s\n' % ( + matchee, + matcher, + matcher.match(matchee).describe(), + )) + self.assertEqual(expected, str(e)) + + def test_verbose_unicode(self): + # When assertThat is given matchees or matchers that contain non-ASCII + # unicode strings, we can still provide a meaningful error. + matchee = _u('\xa7') + matcher = Equals(_u('a')) + mismatch = matcher.match(matchee) + expected = ( + 'Match failed. Matchee: %s\n' + 'Matcher: %s\n' + 'Difference: %s\n' % ( + text_repr(matchee), + matcher, + mismatch.describe(), + )) + e = MismatchError(matchee, matcher, mismatch, True) + if str_is_unicode: + actual = str(e) + else: + actual = unicode(e) + # Using str() should still work, and return ascii only + self.assertEqual( + expected.replace(matchee, matchee.encode("unicode-escape")), + str(e).decode("ascii")) + self.assertEqual(expected, actual) + + +class TestMismatchDecorator(TestCase): + + run_tests_with = FullStackRunTest + + def test_forwards_description(self): + x = Mismatch("description", {'foo': 'bar'}) + decorated = MismatchDecorator(x) + self.assertEqual(x.describe(), decorated.describe()) + + def test_forwards_details(self): + x = Mismatch("description", {'foo': 'bar'}) + decorated = MismatchDecorator(x) + self.assertEqual(x.get_details(), decorated.get_details()) + + def test_repr(self): + x = Mismatch("description", {'foo': 'bar'}) + decorated = MismatchDecorator(x) + self.assertEqual( + '<testtools.matchers.MismatchDecorator(%r)>' % (x,), + repr(decorated)) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py new file mode 100644 index 00000000000..84e57be472c --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py @@ -0,0 +1,603 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Tests for miscellaneous compatibility functions""" + +import io +import linecache +import os +import sys +import tempfile +import traceback + +import testtools + +from testtools.compat import ( + _b, + _detect_encoding, + _format_exc_info, + _format_exception_only, + _format_stack_list, + _get_source_encoding, + _u, + reraise, + str_is_unicode, + text_repr, + unicode_output_stream, + ) +from testtools.matchers import ( + Equals, + Is, + IsInstance, + MatchesException, + Not, + Raises, + ) + + +class TestDetectEncoding(testtools.TestCase): + """Test detection of Python source encodings""" + + def _check_encoding(self, expected, lines, possibly_invalid=False): + """Check lines are valid Python and encoding is as expected""" + if not possibly_invalid: + compile(_b("".join(lines)), "<str>", "exec") + encoding = _detect_encoding(lines) + self.assertEqual(expected, encoding, + "Encoding %r expected but got %r from lines %r" % + (expected, encoding, lines)) + + def test_examples_from_pep(self): + """Check the examples given in PEP 263 all work as specified + + See 'Examples' section of <http://www.python.org/dev/peps/pep-0263/> + """ + # With interpreter binary and using Emacs style file encoding comment: + self._check_encoding("latin-1", ( + "#!/usr/bin/python\n", + "# -*- coding: latin-1 -*-\n", + "import os, sys\n")) + self._check_encoding("iso-8859-15", ( + "#!/usr/bin/python\n", + "# -*- coding: iso-8859-15 -*-\n", + "import os, sys\n")) + self._check_encoding("ascii", ( + "#!/usr/bin/python\n", + "# -*- coding: ascii -*-\n", + "import os, sys\n")) + # Without interpreter line, using plain text: + self._check_encoding("utf-8", ( + "# This Python file uses the following encoding: utf-8\n", + "import os, sys\n")) + # Text editors might have different ways of defining the file's + # encoding, e.g. + self._check_encoding("latin-1", ( + "#!/usr/local/bin/python\n", + "# coding: latin-1\n", + "import os, sys\n")) + # Without encoding comment, Python's parser will assume ASCII text: + self._check_encoding("ascii", ( + "#!/usr/local/bin/python\n", + "import os, sys\n")) + # Encoding comments which don't work: + # Missing "coding:" prefix: + self._check_encoding("ascii", ( + "#!/usr/local/bin/python\n", + "# latin-1\n", + "import os, sys\n")) + # Encoding comment not on line 1 or 2: + self._check_encoding("ascii", ( + "#!/usr/local/bin/python\n", + "#\n", + "# -*- coding: latin-1 -*-\n", + "import os, sys\n")) + # Unsupported encoding: + self._check_encoding("ascii", ( + "#!/usr/local/bin/python\n", + "# -*- coding: utf-42 -*-\n", + "import os, sys\n"), + possibly_invalid=True) + + def test_bom(self): + """Test the UTF-8 BOM counts as an encoding declaration""" + self._check_encoding("utf-8", ( + "\xef\xbb\xbfimport sys\n", + )) + self._check_encoding("utf-8", ( + "\xef\xbb\xbf# File encoding: utf-8\n", + )) + self._check_encoding("utf-8", ( + '\xef\xbb\xbf"""Module docstring\n', + '\xef\xbb\xbfThat should just be a ZWNB"""\n')) + self._check_encoding("latin-1", ( + '"""Is this coding: latin-1 or coding: utf-8 instead?\n', + '\xef\xbb\xbfThose should be latin-1 bytes"""\n')) + self._check_encoding("utf-8", ( + "\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n", + '"""Module docstring say \xe2\x98\x86"""\n'), + possibly_invalid=True) + + def test_multiple_coding_comments(self): + """Test only the first of multiple coding declarations counts""" + self._check_encoding("iso-8859-1", ( + "# Is the coding: iso-8859-1\n", + "# Or is it coding: iso-8859-2\n"), + possibly_invalid=True) + self._check_encoding("iso-8859-1", ( + "#!/usr/bin/python\n", + "# Is the coding: iso-8859-1\n", + "# Or is it coding: iso-8859-2\n")) + self._check_encoding("iso-8859-1", ( + "# Is the coding: iso-8859-1 or coding: iso-8859-2\n", + "# Or coding: iso-8859-3 or coding: iso-8859-4\n"), + possibly_invalid=True) + self._check_encoding("iso-8859-2", ( + "# Is the coding iso-8859-1 or coding: iso-8859-2\n", + "# Spot the missing colon above\n")) + + +class TestGetSourceEncoding(testtools.TestCase): + """Test reading and caching the encodings of source files""" + + def setUp(self): + testtools.TestCase.setUp(self) + dir = tempfile.mkdtemp() + self.addCleanup(os.rmdir, dir) + self.filename = os.path.join(dir, self.id().rsplit(".", 1)[1] + ".py") + self._written = False + + def put_source(self, text): + f = open(self.filename, "w") + try: + f.write(text) + finally: + f.close() + if not self._written: + self._written = True + self.addCleanup(os.remove, self.filename) + self.addCleanup(linecache.cache.pop, self.filename, None) + + def test_nonexistant_file_as_ascii(self): + """When file can't be found, the encoding should default to ascii""" + self.assertEquals("ascii", _get_source_encoding(self.filename)) + + def test_encoding_is_cached(self): + """The encoding should stay the same if the cache isn't invalidated""" + self.put_source( + "# coding: iso-8859-13\n" + "import os\n") + self.assertEquals("iso-8859-13", _get_source_encoding(self.filename)) + self.put_source( + "# coding: rot-13\n" + "vzcbeg bf\n") + self.assertEquals("iso-8859-13", _get_source_encoding(self.filename)) + + def test_traceback_rechecks_encoding(self): + """A traceback function checks the cache and resets the encoding""" + self.put_source( + "# coding: iso-8859-8\n" + "import os\n") + self.assertEquals("iso-8859-8", _get_source_encoding(self.filename)) + self.put_source( + "# coding: utf-8\n" + "import os\n") + try: + exec (compile("raise RuntimeError\n", self.filename, "exec")) + except RuntimeError: + traceback.extract_tb(sys.exc_info()[2]) + else: + self.fail("RuntimeError not raised") + self.assertEquals("utf-8", _get_source_encoding(self.filename)) + + +class _FakeOutputStream(object): + """A simple file-like object for testing""" + + def __init__(self): + self.writelog = [] + + def write(self, obj): + self.writelog.append(obj) + + +class TestUnicodeOutputStream(testtools.TestCase): + """Test wrapping output streams so they work with arbitrary unicode""" + + uni = _u("pa\u026a\u03b8\u0259n") + + def setUp(self): + super(TestUnicodeOutputStream, self).setUp() + if sys.platform == "cli": + self.skip("IronPython shouldn't wrap streams to do encoding") + + def test_no_encoding_becomes_ascii(self): + """A stream with no encoding attribute gets ascii/replace strings""" + sout = _FakeOutputStream() + unicode_output_stream(sout).write(self.uni) + self.assertEqual([_b("pa???n")], sout.writelog) + + def test_encoding_as_none_becomes_ascii(self): + """A stream with encoding value of None gets ascii/replace strings""" + sout = _FakeOutputStream() + sout.encoding = None + unicode_output_stream(sout).write(self.uni) + self.assertEqual([_b("pa???n")], sout.writelog) + + def test_bogus_encoding_becomes_ascii(self): + """A stream with a bogus encoding gets ascii/replace strings""" + sout = _FakeOutputStream() + sout.encoding = "bogus" + unicode_output_stream(sout).write(self.uni) + self.assertEqual([_b("pa???n")], sout.writelog) + + def test_partial_encoding_replace(self): + """A string which can be partly encoded correctly should be""" + sout = _FakeOutputStream() + sout.encoding = "iso-8859-7" + unicode_output_stream(sout).write(self.uni) + self.assertEqual([_b("pa?\xe8?n")], sout.writelog) + + @testtools.skipIf(str_is_unicode, "Tests behaviour when str is not unicode") + def test_unicode_encodings_wrapped_when_str_is_not_unicode(self): + """A unicode encoding is wrapped but needs no error handler""" + sout = _FakeOutputStream() + sout.encoding = "utf-8" + uout = unicode_output_stream(sout) + self.assertEqual(uout.errors, "strict") + uout.write(self.uni) + self.assertEqual([_b("pa\xc9\xaa\xce\xb8\xc9\x99n")], sout.writelog) + + @testtools.skipIf(not str_is_unicode, "Tests behaviour when str is unicode") + def test_unicode_encodings_not_wrapped_when_str_is_unicode(self): + # No wrapping needed if native str type is unicode + sout = _FakeOutputStream() + sout.encoding = "utf-8" + uout = unicode_output_stream(sout) + self.assertIs(uout, sout) + + def test_stringio(self): + """A StringIO object should maybe get an ascii native str type""" + try: + from cStringIO import StringIO + newio = False + except ImportError: + from io import StringIO + newio = True + sout = StringIO() + soutwrapper = unicode_output_stream(sout) + soutwrapper.write(self.uni) + if newio: + self.assertEqual(self.uni, sout.getvalue()) + else: + self.assertEqual("pa???n", sout.getvalue()) + + def test_io_stringio(self): + # io.StringIO only accepts unicode so should be returned as itself. + s = io.StringIO() + self.assertEqual(s, unicode_output_stream(s)) + + def test_io_bytesio(self): + # io.BytesIO only accepts bytes so should be wrapped. + bytes_io = io.BytesIO() + self.assertThat(bytes_io, Not(Is(unicode_output_stream(bytes_io)))) + # Will error if s was not wrapped properly. + unicode_output_stream(bytes_io).write(_u('foo')) + + def test_io_textwrapper(self): + # textwrapper is unicode, should be returned as itself. + text_io = io.TextIOWrapper(io.BytesIO()) + self.assertThat(unicode_output_stream(text_io), Is(text_io)) + # To be sure... + unicode_output_stream(text_io).write(_u('foo')) + + +class TestTextRepr(testtools.TestCase): + """Ensure in extending repr, basic behaviours are not being broken""" + + ascii_examples = ( + # Single character examples + # C0 control codes should be escaped except multiline \n + ("\x00", "'\\x00'", "'''\\\n\\x00'''"), + ("\b", "'\\x08'", "'''\\\n\\x08'''"), + ("\t", "'\\t'", "'''\\\n\\t'''"), + ("\n", "'\\n'", "'''\\\n\n'''"), + ("\r", "'\\r'", "'''\\\n\\r'''"), + # Quotes and backslash should match normal repr behaviour + ('"', "'\"'", "'''\\\n\"'''"), + ("'", "\"'\"", "'''\\\n\\''''"), + ("\\", "'\\\\'", "'''\\\n\\\\'''"), + # DEL is also unprintable and should be escaped + ("\x7F", "'\\x7f'", "'''\\\n\\x7f'''"), + + # Character combinations that need double checking + ("\r\n", "'\\r\\n'", "'''\\\n\\r\n'''"), + ("\"'", "'\"\\''", "'''\\\n\"\\''''"), + ("'\"", "'\\'\"'", "'''\\\n'\"'''"), + ("\\n", "'\\\\n'", "'''\\\n\\\\n'''"), + ("\\\n", "'\\\\\\n'", "'''\\\n\\\\\n'''"), + ("\\' ", "\"\\\\' \"", "'''\\\n\\\\' '''"), + ("\\'\n", "\"\\\\'\\n\"", "'''\\\n\\\\'\n'''"), + ("\\'\"", "'\\\\\\'\"'", "'''\\\n\\\\'\"'''"), + ("\\'''", "\"\\\\'''\"", "'''\\\n\\\\\\'\\'\\''''"), + ) + + # Bytes with the high bit set should always be escaped + bytes_examples = ( + (_b("\x80"), "'\\x80'", "'''\\\n\\x80'''"), + (_b("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"), + (_b("\xC0"), "'\\xc0'", "'''\\\n\\xc0'''"), + (_b("\xFF"), "'\\xff'", "'''\\\n\\xff'''"), + (_b("\xC2\xA7"), "'\\xc2\\xa7'", "'''\\\n\\xc2\\xa7'''"), + ) + + # Unicode doesn't escape printable characters as per the Python 3 model + unicode_examples = ( + # C1 codes are unprintable + (_u("\x80"), "'\\x80'", "'''\\\n\\x80'''"), + (_u("\x9F"), "'\\x9f'", "'''\\\n\\x9f'''"), + # No-break space is unprintable + (_u("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"), + # Letters latin alphabets are printable + (_u("\xA1"), _u("'\xa1'"), _u("'''\\\n\xa1'''")), + (_u("\xFF"), _u("'\xff'"), _u("'''\\\n\xff'''")), + (_u("\u0100"), _u("'\u0100'"), _u("'''\\\n\u0100'''")), + # Line and paragraph seperators are unprintable + (_u("\u2028"), "'\\u2028'", "'''\\\n\\u2028'''"), + (_u("\u2029"), "'\\u2029'", "'''\\\n\\u2029'''"), + # Unpaired surrogates are unprintable + (_u("\uD800"), "'\\ud800'", "'''\\\n\\ud800'''"), + (_u("\uDFFF"), "'\\udfff'", "'''\\\n\\udfff'''"), + # Unprintable general categories not fully tested: Cc, Cf, Co, Cn, Zs + ) + + b_prefix = repr(_b(""))[:-2] + u_prefix = repr(_u(""))[:-2] + + def test_ascii_examples_oneline_bytes(self): + for s, expected, _ in self.ascii_examples: + b = _b(s) + actual = text_repr(b, multiline=False) + # Add self.assertIsInstance check? + self.assertEqual(actual, self.b_prefix + expected) + self.assertEqual(eval(actual), b) + + def test_ascii_examples_oneline_unicode(self): + for s, expected, _ in self.ascii_examples: + u = _u(s) + actual = text_repr(u, multiline=False) + self.assertEqual(actual, self.u_prefix + expected) + self.assertEqual(eval(actual), u) + + def test_ascii_examples_multiline_bytes(self): + for s, _, expected in self.ascii_examples: + b = _b(s) + actual = text_repr(b, multiline=True) + self.assertEqual(actual, self.b_prefix + expected) + self.assertEqual(eval(actual), b) + + def test_ascii_examples_multiline_unicode(self): + for s, _, expected in self.ascii_examples: + u = _u(s) + actual = text_repr(u, multiline=True) + self.assertEqual(actual, self.u_prefix + expected) + self.assertEqual(eval(actual), u) + + def test_ascii_examples_defaultline_bytes(self): + for s, one, multi in self.ascii_examples: + expected = "\n" in s and multi or one + self.assertEqual(text_repr(_b(s)), self.b_prefix + expected) + + def test_ascii_examples_defaultline_unicode(self): + for s, one, multi in self.ascii_examples: + expected = "\n" in s and multi or one + self.assertEqual(text_repr(_u(s)), self.u_prefix + expected) + + def test_bytes_examples_oneline(self): + for b, expected, _ in self.bytes_examples: + actual = text_repr(b, multiline=False) + self.assertEqual(actual, self.b_prefix + expected) + self.assertEqual(eval(actual), b) + + def test_bytes_examples_multiline(self): + for b, _, expected in self.bytes_examples: + actual = text_repr(b, multiline=True) + self.assertEqual(actual, self.b_prefix + expected) + self.assertEqual(eval(actual), b) + + def test_unicode_examples_oneline(self): + for u, expected, _ in self.unicode_examples: + actual = text_repr(u, multiline=False) + self.assertEqual(actual, self.u_prefix + expected) + self.assertEqual(eval(actual), u) + + def test_unicode_examples_multiline(self): + for u, _, expected in self.unicode_examples: + actual = text_repr(u, multiline=True) + self.assertEqual(actual, self.u_prefix + expected) + self.assertEqual(eval(actual), u) + + + +class TestReraise(testtools.TestCase): + """Tests for trivial reraise wrapper needed for Python 2/3 changes""" + + def test_exc_info(self): + """After reraise exc_info matches plus some extra traceback""" + try: + raise ValueError("Bad value") + except ValueError: + _exc_info = sys.exc_info() + try: + reraise(*_exc_info) + except ValueError: + _new_exc_info = sys.exc_info() + self.assertIs(_exc_info[0], _new_exc_info[0]) + self.assertIs(_exc_info[1], _new_exc_info[1]) + expected_tb = traceback.extract_tb(_exc_info[2]) + self.assertEqual(expected_tb, + traceback.extract_tb(_new_exc_info[2])[-len(expected_tb):]) + + def test_custom_exception_no_args(self): + """Reraising does not require args attribute to contain params""" + + class CustomException(Exception): + """Exception that expects and sets attrs but not args""" + + def __init__(self, value): + Exception.__init__(self) + self.value = value + + try: + raise CustomException("Some value") + except CustomException: + _exc_info = sys.exc_info() + self.assertRaises(CustomException, reraise, *_exc_info) + + +class Python2CompatibilityTests(testtools.TestCase): + + def setUp(self): + super(Python2CompatibilityTests, self).setUp() + if sys.version[0] >= '3': + self.skip("These tests are only applicable to python 2.") + + +class TestExceptionFormatting(Python2CompatibilityTests): + """Test the _format_exception_only function.""" + + def _assert_exception_format(self, eclass, evalue, expected): + actual = _format_exception_only(eclass, evalue) + self.assertThat(actual, Equals(expected)) + self.assertThat(''.join(actual), IsInstance(unicode)) + + def test_supports_string_exception(self): + self._assert_exception_format( + "String_Exception", + None, + [_u("String_Exception\n")] + ) + + def test_supports_regular_exception(self): + self._assert_exception_format( + RuntimeError, + RuntimeError("Something went wrong"), + [_u("RuntimeError: Something went wrong\n")] + ) + + def test_supports_unprintable_exceptions(self): + """Verify support for exception classes that raise an exception when + __unicode__ or __str__ is called. + """ + class UnprintableException(Exception): + + def __str__(self): + raise Exception() + + def __unicode__(self): + raise Exception() + + self._assert_exception_format( + UnprintableException, + UnprintableException("Foo"), + [_u("UnprintableException: <unprintable UnprintableException object>\n")] + ) + + def test_supports_exceptions_with_no_string_value(self): + class NoStringException(Exception): + + def __str__(self): + return "" + + def __unicode__(self): + return _u("") + + self._assert_exception_format( + NoStringException, + NoStringException("Foo"), + [_u("NoStringException\n")] + ) + + def test_supports_strange_syntax_error(self): + """Test support for syntax errors with unusual number of arguments""" + self._assert_exception_format( + SyntaxError, + SyntaxError("Message"), + [_u("SyntaxError: Message\n")] + ) + + def test_supports_syntax_error(self): + self._assert_exception_format( + SyntaxError, + SyntaxError( + "Some Syntax Message", + ( + "/path/to/file", + 12, + 2, + "This is the line of code", + ) + ), + [ + _u(' File "/path/to/file", line 12\n'), + _u(' This is the line of code\n'), + _u(' ^\n'), + _u('SyntaxError: Some Syntax Message\n'), + ] + ) + + +class StackListFormattingTests(Python2CompatibilityTests): + """Test the _format_stack_list function.""" + + def _assert_stack_format(self, stack_lines, expected_output): + actual = _format_stack_list(stack_lines) + self.assertThat(actual, Equals([expected_output])) + + def test_single_complete_stack_line(self): + stack_lines = [( + '/path/to/filename', + 12, + 'func_name', + 'some_code()', + )] + expected = \ + _u(' File "/path/to/filename", line 12, in func_name\n' \ + ' some_code()\n') + + self._assert_stack_format(stack_lines, expected) + + def test_single_stack_line_no_code(self): + stack_lines = [( + '/path/to/filename', + 12, + 'func_name', + None + )] + expected = _u(' File "/path/to/filename", line 12, in func_name\n') + self._assert_stack_format(stack_lines, expected) + + +class FormatExceptionInfoTests(Python2CompatibilityTests): + + def test_individual_functions_called(self): + self.patch( + testtools.compat, + '_format_stack_list', + lambda stack_list: [_u("format stack list called\n")] + ) + self.patch( + testtools.compat, + '_format_exception_only', + lambda etype, evalue: [_u("format exception only called\n")] + ) + result = _format_exc_info(None, None, None) + expected = [ + _u("Traceback (most recent call last):\n"), + _u("format stack list called\n"), + _u("format exception only called\n"), + ] + self.assertThat(expected, Equals(result)) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py new file mode 100644 index 00000000000..9ed1b2ffba5 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py @@ -0,0 +1,349 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +import json +import os +import tempfile +import unittest + +from testtools import TestCase +from testtools.compat import ( + _b, + _u, + BytesIO, + StringIO, + ) +from testtools.content import ( + attach_file, + Content, + content_from_file, + content_from_stream, + JSON, + json_content, + StackLinesContent, + StacktraceContent, + TracebackContent, + text_content, + ) +from testtools.content_type import ( + ContentType, + UTF8_TEXT, + ) +from testtools.matchers import ( + Equals, + MatchesException, + Raises, + raises, + ) +from testtools.tests.helpers import an_exc_info + + +raises_value_error = Raises(MatchesException(ValueError)) + + +class TestContent(TestCase): + + def test___init___None_errors(self): + self.assertThat(lambda: Content(None, None), raises_value_error) + self.assertThat( + lambda: Content(None, lambda: ["traceback"]), raises_value_error) + self.assertThat( + lambda: Content(ContentType("text", "traceback"), None), + raises_value_error) + + def test___init___sets_ivars(self): + content_type = ContentType("foo", "bar") + content = Content(content_type, lambda: ["bytes"]) + self.assertEqual(content_type, content.content_type) + self.assertEqual(["bytes"], list(content.iter_bytes())) + + def test___eq__(self): + content_type = ContentType("foo", "bar") + one_chunk = lambda: [_b("bytes")] + two_chunk = lambda: [_b("by"), _b("tes")] + content1 = Content(content_type, one_chunk) + content2 = Content(content_type, one_chunk) + content3 = Content(content_type, two_chunk) + content4 = Content(content_type, lambda: [_b("by"), _b("te")]) + content5 = Content(ContentType("f", "b"), two_chunk) + self.assertEqual(content1, content2) + self.assertEqual(content1, content3) + self.assertNotEqual(content1, content4) + self.assertNotEqual(content1, content5) + + def test___repr__(self): + content = Content(ContentType("application", "octet-stream"), + lambda: [_b("\x00bin"), _b("ary\xff")]) + self.assertIn("\\x00binary\\xff", repr(content)) + + def test_iter_text_not_text_errors(self): + content_type = ContentType("foo", "bar") + content = Content(content_type, lambda: ["bytes"]) + self.assertThat(content.iter_text, raises_value_error) + + def test_iter_text_decodes(self): + content_type = ContentType("text", "strange", {"charset": "utf8"}) + content = Content( + content_type, lambda: [_u("bytes\xea").encode("utf8")]) + self.assertEqual([_u("bytes\xea")], list(content.iter_text())) + + def test_iter_text_default_charset_iso_8859_1(self): + content_type = ContentType("text", "strange") + text = _u("bytes\xea") + iso_version = text.encode("ISO-8859-1") + content = Content(content_type, lambda: [iso_version]) + self.assertEqual([text], list(content.iter_text())) + + def test_as_text(self): + content_type = ContentType("text", "strange", {"charset": "utf8"}) + content = Content( + content_type, lambda: [_u("bytes\xea").encode("utf8")]) + self.assertEqual(_u("bytes\xea"), content.as_text()) + + def test_from_file(self): + fd, path = tempfile.mkstemp() + self.addCleanup(os.remove, path) + os.write(fd, _b('some data')) + os.close(fd) + content = content_from_file(path, UTF8_TEXT, chunk_size=2) + self.assertThat( + list(content.iter_bytes()), + Equals([_b('so'), _b('me'), _b(' d'), _b('at'), _b('a')])) + + def test_from_nonexistent_file(self): + directory = tempfile.mkdtemp() + nonexistent = os.path.join(directory, 'nonexistent-file') + content = content_from_file(nonexistent) + self.assertThat(content.iter_bytes, raises(IOError)) + + def test_from_file_default_type(self): + content = content_from_file('/nonexistent/path') + self.assertThat(content.content_type, Equals(UTF8_TEXT)) + + def test_from_file_eager_loading(self): + fd, path = tempfile.mkstemp() + os.write(fd, _b('some data')) + os.close(fd) + content = content_from_file(path, UTF8_TEXT, buffer_now=True) + os.remove(path) + self.assertThat( + ''.join(content.iter_text()), Equals('some data')) + + def test_from_file_with_simple_seek(self): + f = tempfile.NamedTemporaryFile() + f.write(_b('some data')) + f.flush() + self.addCleanup(f.close) + content = content_from_file( + f.name, UTF8_TEXT, chunk_size=50, seek_offset=5) + self.assertThat( + list(content.iter_bytes()), Equals([_b('data')])) + + def test_from_file_with_whence_seek(self): + f = tempfile.NamedTemporaryFile() + f.write(_b('some data')) + f.flush() + self.addCleanup(f.close) + content = content_from_file( + f.name, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2) + self.assertThat( + list(content.iter_bytes()), Equals([_b('data')])) + + def test_from_stream(self): + data = StringIO('some data') + content = content_from_stream(data, UTF8_TEXT, chunk_size=2) + self.assertThat( + list(content.iter_bytes()), Equals(['so', 'me', ' d', 'at', 'a'])) + + def test_from_stream_default_type(self): + data = StringIO('some data') + content = content_from_stream(data) + self.assertThat(content.content_type, Equals(UTF8_TEXT)) + + def test_from_stream_eager_loading(self): + fd, path = tempfile.mkstemp() + self.addCleanup(os.remove, path) + self.addCleanup(os.close, fd) + os.write(fd, _b('some data')) + stream = open(path, 'rb') + self.addCleanup(stream.close) + content = content_from_stream(stream, UTF8_TEXT, buffer_now=True) + os.write(fd, _b('more data')) + self.assertThat( + ''.join(content.iter_text()), Equals('some data')) + + def test_from_stream_with_simple_seek(self): + data = BytesIO(_b('some data')) + content = content_from_stream( + data, UTF8_TEXT, chunk_size=50, seek_offset=5) + self.assertThat( + list(content.iter_bytes()), Equals([_b('data')])) + + def test_from_stream_with_whence_seek(self): + data = BytesIO(_b('some data')) + content = content_from_stream( + data, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2) + self.assertThat( + list(content.iter_bytes()), Equals([_b('data')])) + + def test_from_text(self): + data = _u("some data") + expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')]) + self.assertEqual(expected, text_content(data)) + + def test_json_content(self): + data = {'foo': 'bar'} + expected = Content(JSON, lambda: [_b('{"foo": "bar"}')]) + self.assertEqual(expected, json_content(data)) + + +class TestStackLinesContent(TestCase): + + def _get_stack_line_and_expected_output(self): + stack_lines = [ + ('/path/to/file', 42, 'some_function', 'print("Hello World")'), + ] + expected = ' File "/path/to/file", line 42, in some_function\n' \ + ' print("Hello World")\n' + return stack_lines, expected + + def test_single_stack_line(self): + stack_lines, expected = self._get_stack_line_and_expected_output() + actual = StackLinesContent(stack_lines).as_text() + + self.assertEqual(expected, actual) + + def test_prefix_content(self): + stack_lines, expected = self._get_stack_line_and_expected_output() + prefix = self.getUniqueString() + '\n' + content = StackLinesContent(stack_lines, prefix_content=prefix) + actual = content.as_text() + expected = prefix + expected + + self.assertEqual(expected, actual) + + def test_postfix_content(self): + stack_lines, expected = self._get_stack_line_and_expected_output() + postfix = '\n' + self.getUniqueString() + content = StackLinesContent(stack_lines, postfix_content=postfix) + actual = content.as_text() + expected = expected + postfix + + self.assertEqual(expected, actual) + + def test___init___sets_content_type(self): + stack_lines, expected = self._get_stack_line_and_expected_output() + content = StackLinesContent(stack_lines) + expected_content_type = ContentType("text", "x-traceback", + {"language": "python", "charset": "utf8"}) + + self.assertEqual(expected_content_type, content.content_type) + + +class TestTracebackContent(TestCase): + + def test___init___None_errors(self): + self.assertThat( + lambda: TracebackContent(None, None), raises_value_error) + + def test___init___sets_ivars(self): + content = TracebackContent(an_exc_info, self) + content_type = ContentType("text", "x-traceback", + {"language": "python", "charset": "utf8"}) + self.assertEqual(content_type, content.content_type) + result = unittest.TestResult() + expected = result._exc_info_to_string(an_exc_info, self) + self.assertEqual(expected, ''.join(list(content.iter_text()))) + + +class TestStacktraceContent(TestCase): + + def test___init___sets_ivars(self): + content = StacktraceContent() + content_type = ContentType("text", "x-traceback", + {"language": "python", "charset": "utf8"}) + + self.assertEqual(content_type, content.content_type) + + def test_prefix_is_used(self): + prefix = self.getUniqueString() + actual = StacktraceContent(prefix_content=prefix).as_text() + + self.assertTrue(actual.startswith(prefix)) + + def test_postfix_is_used(self): + postfix = self.getUniqueString() + actual = StacktraceContent(postfix_content=postfix).as_text() + + self.assertTrue(actual.endswith(postfix)) + + def test_top_frame_is_skipped_when_no_stack_is_specified(self): + actual = StacktraceContent().as_text() + + self.assertTrue('testtools/content.py' not in actual) + + +class TestAttachFile(TestCase): + + def make_file(self, data): + # GZ 2011-04-21: This helper could be useful for methods above trying + # to use mkstemp, but should handle write failures and + # always close the fd. There must be a better way. + fd, path = tempfile.mkstemp() + self.addCleanup(os.remove, path) + os.write(fd, _b(data)) + os.close(fd) + return path + + def test_simple(self): + class SomeTest(TestCase): + def test_foo(self): + pass + test = SomeTest('test_foo') + data = 'some data' + path = self.make_file(data) + my_content = text_content(data) + attach_file(test, path, name='foo') + self.assertEqual({'foo': my_content}, test.getDetails()) + + def test_optional_name(self): + # If no name is provided, attach_file just uses the base name of the + # file. + class SomeTest(TestCase): + def test_foo(self): + pass + test = SomeTest('test_foo') + path = self.make_file('some data') + base_path = os.path.basename(path) + attach_file(test, path) + self.assertEqual([base_path], list(test.getDetails())) + + def test_lazy_read(self): + class SomeTest(TestCase): + def test_foo(self): + pass + test = SomeTest('test_foo') + path = self.make_file('some data') + attach_file(test, path, name='foo', buffer_now=False) + content = test.getDetails()['foo'] + content_file = open(path, 'w') + content_file.write('new data') + content_file.close() + self.assertEqual(''.join(content.iter_text()), 'new data') + + def test_eager_read_by_default(self): + class SomeTest(TestCase): + def test_foo(self): + pass + test = SomeTest('test_foo') + path = self.make_file('some data') + attach_file(test, path, name='foo') + content = test.getDetails()['foo'] + content_file = open(path, 'w') + content_file.write('new data') + content_file.close() + self.assertEqual(''.join(content.iter_text()), 'some data') + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py new file mode 100644 index 00000000000..2d34f95e479 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py @@ -0,0 +1,66 @@ +# Copyright (c) 2008, 2012 testtools developers. See LICENSE for details. + +from testtools import TestCase +from testtools.matchers import Equals, MatchesException, Raises +from testtools.content_type import ( + ContentType, + JSON, + UTF8_TEXT, + ) + + +class TestContentType(TestCase): + + def test___init___None_errors(self): + raises_value_error = Raises(MatchesException(ValueError)) + self.assertThat(lambda:ContentType(None, None), raises_value_error) + self.assertThat(lambda:ContentType(None, "traceback"), + raises_value_error) + self.assertThat(lambda:ContentType("text", None), raises_value_error) + + def test___init___sets_ivars(self): + content_type = ContentType("foo", "bar") + self.assertEqual("foo", content_type.type) + self.assertEqual("bar", content_type.subtype) + self.assertEqual({}, content_type.parameters) + + def test___init___with_parameters(self): + content_type = ContentType("foo", "bar", {"quux": "thing"}) + self.assertEqual({"quux": "thing"}, content_type.parameters) + + def test___eq__(self): + content_type1 = ContentType("foo", "bar", {"quux": "thing"}) + content_type2 = ContentType("foo", "bar", {"quux": "thing"}) + content_type3 = ContentType("foo", "bar", {"quux": "thing2"}) + self.assertTrue(content_type1.__eq__(content_type2)) + self.assertFalse(content_type1.__eq__(content_type3)) + + def test_basic_repr(self): + content_type = ContentType('text', 'plain') + self.assertThat(repr(content_type), Equals('text/plain')) + + def test_extended_repr(self): + content_type = ContentType( + 'text', 'plain', {'foo': 'bar', 'baz': 'qux'}) + self.assertThat( + repr(content_type), Equals('text/plain; baz="qux"; foo="bar"')) + + +class TestBuiltinContentTypes(TestCase): + + def test_plain_text(self): + # The UTF8_TEXT content type represents UTF-8 encoded text/plain. + self.assertThat(UTF8_TEXT.type, Equals('text')) + self.assertThat(UTF8_TEXT.subtype, Equals('plain')) + self.assertThat(UTF8_TEXT.parameters, Equals({'charset': 'utf8'})) + + def test_json_content(self): + # The JSON content type represents implictly UTF-8 application/json. + self.assertThat(JSON.type, Equals('application')) + self.assertThat(JSON.subtype, Equals('json')) + self.assertThat(JSON.parameters, Equals({})) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py new file mode 100644 index 00000000000..f0510dc9a9f --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py @@ -0,0 +1,767 @@ +# Copyright (c) 2010-2011 testtools developers. See LICENSE for details. + +"""Tests for the DeferredRunTest single test execution logic.""" + +import os +import signal + +from extras import try_import + +from testtools import ( + skipIf, + TestCase, + TestResult, + ) +from testtools.content import ( + text_content, + ) +from testtools.matchers import ( + Equals, + KeysEqual, + MatchesException, + Raises, + ) +from testtools.runtest import RunTest +from testtools.testresult.doubles import ExtendedTestResult +from testtools.tests.test_spinner import NeedsTwistedTestCase + +assert_fails_with = try_import('testtools.deferredruntest.assert_fails_with') +AsynchronousDeferredRunTest = try_import( + 'testtools.deferredruntest.AsynchronousDeferredRunTest') +flush_logged_errors = try_import( + 'testtools.deferredruntest.flush_logged_errors') +SynchronousDeferredRunTest = try_import( + 'testtools.deferredruntest.SynchronousDeferredRunTest') + +defer = try_import('twisted.internet.defer') +failure = try_import('twisted.python.failure') +log = try_import('twisted.python.log') +DelayedCall = try_import('twisted.internet.base.DelayedCall') + + +class X(object): + """Tests that we run as part of our tests, nested to avoid discovery.""" + + class Base(TestCase): + def setUp(self): + super(X.Base, self).setUp() + self.calls = ['setUp'] + self.addCleanup(self.calls.append, 'clean-up') + def test_something(self): + self.calls.append('test') + def tearDown(self): + self.calls.append('tearDown') + super(X.Base, self).tearDown() + + class ErrorInSetup(Base): + expected_calls = ['setUp', 'clean-up'] + expected_results = [('addError', RuntimeError)] + def setUp(self): + super(X.ErrorInSetup, self).setUp() + raise RuntimeError("Error in setUp") + + class ErrorInTest(Base): + expected_calls = ['setUp', 'tearDown', 'clean-up'] + expected_results = [('addError', RuntimeError)] + def test_something(self): + raise RuntimeError("Error in test") + + class FailureInTest(Base): + expected_calls = ['setUp', 'tearDown', 'clean-up'] + expected_results = [('addFailure', AssertionError)] + def test_something(self): + self.fail("test failed") + + class ErrorInTearDown(Base): + expected_calls = ['setUp', 'test', 'clean-up'] + expected_results = [('addError', RuntimeError)] + def tearDown(self): + raise RuntimeError("Error in tearDown") + + class ErrorInCleanup(Base): + expected_calls = ['setUp', 'test', 'tearDown', 'clean-up'] + expected_results = [('addError', ZeroDivisionError)] + def test_something(self): + self.calls.append('test') + self.addCleanup(lambda: 1/0) + + class TestIntegration(NeedsTwistedTestCase): + + def assertResultsMatch(self, test, result): + events = list(result._events) + self.assertEqual(('startTest', test), events.pop(0)) + for expected_result in test.expected_results: + result = events.pop(0) + if len(expected_result) == 1: + self.assertEqual((expected_result[0], test), result) + else: + self.assertEqual((expected_result[0], test), result[:2]) + error_type = expected_result[1] + self.assertIn(error_type.__name__, str(result[2])) + self.assertEqual([('stopTest', test)], events) + + def test_runner(self): + result = ExtendedTestResult() + test = self.test_factory('test_something', runTest=self.runner) + test.run(result) + self.assertEqual(test.calls, self.test_factory.expected_calls) + self.assertResultsMatch(test, result) + + +def make_integration_tests(): + from unittest import TestSuite + from testtools import clone_test_with_new_id + runners = [ + ('RunTest', RunTest), + ('SynchronousDeferredRunTest', SynchronousDeferredRunTest), + ('AsynchronousDeferredRunTest', AsynchronousDeferredRunTest), + ] + + tests = [ + X.ErrorInSetup, + X.ErrorInTest, + X.ErrorInTearDown, + X.FailureInTest, + X.ErrorInCleanup, + ] + base_test = X.TestIntegration('test_runner') + integration_tests = [] + for runner_name, runner in runners: + for test in tests: + new_test = clone_test_with_new_id( + base_test, '%s(%s, %s)' % ( + base_test.id(), + runner_name, + test.__name__)) + new_test.test_factory = test + new_test.runner = runner + integration_tests.append(new_test) + return TestSuite(integration_tests) + + +class TestSynchronousDeferredRunTest(NeedsTwistedTestCase): + + def make_result(self): + return ExtendedTestResult() + + def make_runner(self, test): + return SynchronousDeferredRunTest(test, test.exception_handlers) + + def test_success(self): + class SomeCase(TestCase): + def test_success(self): + return defer.succeed(None) + test = SomeCase('test_success') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + result._events, Equals([ + ('startTest', test), + ('addSuccess', test), + ('stopTest', test)])) + + def test_failure(self): + class SomeCase(TestCase): + def test_failure(self): + return defer.maybeDeferred(self.fail, "Egads!") + test = SomeCase('test_failure') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], Equals([ + ('startTest', test), + ('addFailure', test), + ('stopTest', test)])) + + def test_setUp_followed_by_test(self): + class SomeCase(TestCase): + def setUp(self): + super(SomeCase, self).setUp() + return defer.succeed(None) + def test_failure(self): + return defer.maybeDeferred(self.fail, "Egads!") + test = SomeCase('test_failure') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], Equals([ + ('startTest', test), + ('addFailure', test), + ('stopTest', test)])) + + +class TestAsynchronousDeferredRunTest(NeedsTwistedTestCase): + + def make_reactor(self): + from twisted.internet import reactor + return reactor + + def make_result(self): + return ExtendedTestResult() + + def make_runner(self, test, timeout=None): + if timeout is None: + timeout = self.make_timeout() + return AsynchronousDeferredRunTest( + test, test.exception_handlers, timeout=timeout) + + def make_timeout(self): + return 0.005 + + def test_setUp_returns_deferred_that_fires_later(self): + # setUp can return a Deferred that might fire at any time. + # AsynchronousDeferredRunTest will not go on to running the test until + # the Deferred returned by setUp actually fires. + call_log = [] + marker = object() + d = defer.Deferred().addCallback(call_log.append) + class SomeCase(TestCase): + def setUp(self): + super(SomeCase, self).setUp() + call_log.append('setUp') + return d + def test_something(self): + call_log.append('test') + def fire_deferred(): + self.assertThat(call_log, Equals(['setUp'])) + d.callback(marker) + test = SomeCase('test_something') + timeout = self.make_timeout() + runner = self.make_runner(test, timeout=timeout) + result = self.make_result() + reactor = self.make_reactor() + reactor.callLater(timeout, fire_deferred) + runner.run(result) + self.assertThat(call_log, Equals(['setUp', marker, 'test'])) + + def test_calls_setUp_test_tearDown_in_sequence(self): + # setUp, the test method and tearDown can all return + # Deferreds. AsynchronousDeferredRunTest will make sure that each of + # these are run in turn, only going on to the next stage once the + # Deferred from the previous stage has fired. + call_log = [] + a = defer.Deferred() + a.addCallback(lambda x: call_log.append('a')) + b = defer.Deferred() + b.addCallback(lambda x: call_log.append('b')) + c = defer.Deferred() + c.addCallback(lambda x: call_log.append('c')) + class SomeCase(TestCase): + def setUp(self): + super(SomeCase, self).setUp() + call_log.append('setUp') + return a + def test_success(self): + call_log.append('test') + return b + def tearDown(self): + super(SomeCase, self).tearDown() + call_log.append('tearDown') + return c + test = SomeCase('test_success') + timeout = self.make_timeout() + runner = self.make_runner(test, timeout) + result = self.make_result() + reactor = self.make_reactor() + def fire_a(): + self.assertThat(call_log, Equals(['setUp'])) + a.callback(None) + def fire_b(): + self.assertThat(call_log, Equals(['setUp', 'a', 'test'])) + b.callback(None) + def fire_c(): + self.assertThat( + call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown'])) + c.callback(None) + reactor.callLater(timeout * 0.25, fire_a) + reactor.callLater(timeout * 0.5, fire_b) + reactor.callLater(timeout * 0.75, fire_c) + runner.run(result) + self.assertThat( + call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown', 'c'])) + + def test_async_cleanups(self): + # Cleanups added with addCleanup can return + # Deferreds. AsynchronousDeferredRunTest will run each of them in + # turn. + class SomeCase(TestCase): + def test_whatever(self): + pass + test = SomeCase('test_whatever') + call_log = [] + a = defer.Deferred().addCallback(lambda x: call_log.append('a')) + b = defer.Deferred().addCallback(lambda x: call_log.append('b')) + c = defer.Deferred().addCallback(lambda x: call_log.append('c')) + test.addCleanup(lambda: a) + test.addCleanup(lambda: b) + test.addCleanup(lambda: c) + def fire_a(): + self.assertThat(call_log, Equals([])) + a.callback(None) + def fire_b(): + self.assertThat(call_log, Equals(['a'])) + b.callback(None) + def fire_c(): + self.assertThat(call_log, Equals(['a', 'b'])) + c.callback(None) + timeout = self.make_timeout() + reactor = self.make_reactor() + reactor.callLater(timeout * 0.25, fire_a) + reactor.callLater(timeout * 0.5, fire_b) + reactor.callLater(timeout * 0.75, fire_c) + runner = self.make_runner(test, timeout) + result = self.make_result() + runner.run(result) + self.assertThat(call_log, Equals(['a', 'b', 'c'])) + + def test_clean_reactor(self): + # If there's cruft left over in the reactor, the test fails. + reactor = self.make_reactor() + timeout = self.make_timeout() + class SomeCase(TestCase): + def test_cruft(self): + reactor.callLater(timeout * 10.0, lambda: None) + test = SomeCase('test_cruft') + runner = self.make_runner(test, timeout) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], + Equals( + [('startTest', test), + ('addError', test), + ('stopTest', test)])) + error = result._events[1][2] + self.assertThat(error, KeysEqual('traceback', 'twisted-log')) + + def test_exports_reactor(self): + # The reactor is set as an attribute on the test case. + reactor = self.make_reactor() + timeout = self.make_timeout() + class SomeCase(TestCase): + def test_cruft(self): + self.assertIs(reactor, self.reactor) + test = SomeCase('test_cruft') + runner = self.make_runner(test, timeout) + result = TestResult() + runner.run(result) + self.assertEqual([], result.errors) + self.assertEqual([], result.failures) + + def test_unhandled_error_from_deferred(self): + # If there's a Deferred with an unhandled error, the test fails. Each + # unhandled error is reported with a separate traceback. + class SomeCase(TestCase): + def test_cruft(self): + # Note we aren't returning the Deferred so that the error will + # be unhandled. + defer.maybeDeferred(lambda: 1/0) + defer.maybeDeferred(lambda: 2/0) + test = SomeCase('test_cruft') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + error = result._events[1][2] + result._events[1] = ('addError', test, None) + self.assertThat(result._events, Equals( + [('startTest', test), + ('addError', test, None), + ('stopTest', test)])) + self.assertThat( + error, KeysEqual( + 'twisted-log', + 'unhandled-error-in-deferred', + 'unhandled-error-in-deferred-1', + )) + + def test_unhandled_error_from_deferred_combined_with_error(self): + # If there's a Deferred with an unhandled error, the test fails. Each + # unhandled error is reported with a separate traceback, and the error + # is still reported. + class SomeCase(TestCase): + def test_cruft(self): + # Note we aren't returning the Deferred so that the error will + # be unhandled. + defer.maybeDeferred(lambda: 1/0) + 2 / 0 + test = SomeCase('test_cruft') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + error = result._events[1][2] + result._events[1] = ('addError', test, None) + self.assertThat(result._events, Equals( + [('startTest', test), + ('addError', test, None), + ('stopTest', test)])) + self.assertThat( + error, KeysEqual( + 'traceback', + 'twisted-log', + 'unhandled-error-in-deferred', + )) + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_keyboard_interrupt_stops_test_run(self): + # If we get a SIGINT during a test run, the test stops and no more + # tests run. + SIGINT = getattr(signal, 'SIGINT', None) + if not SIGINT: + raise self.skipTest("SIGINT unavailable") + class SomeCase(TestCase): + def test_pause(self): + return defer.Deferred() + test = SomeCase('test_pause') + reactor = self.make_reactor() + timeout = self.make_timeout() + runner = self.make_runner(test, timeout * 5) + result = self.make_result() + reactor.callLater(timeout, os.kill, os.getpid(), SIGINT) + self.assertThat(lambda:runner.run(result), + Raises(MatchesException(KeyboardInterrupt))) + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_fast_keyboard_interrupt_stops_test_run(self): + # If we get a SIGINT during a test run, the test stops and no more + # tests run. + SIGINT = getattr(signal, 'SIGINT', None) + if not SIGINT: + raise self.skipTest("SIGINT unavailable") + class SomeCase(TestCase): + def test_pause(self): + return defer.Deferred() + test = SomeCase('test_pause') + reactor = self.make_reactor() + timeout = self.make_timeout() + runner = self.make_runner(test, timeout * 5) + result = self.make_result() + reactor.callWhenRunning(os.kill, os.getpid(), SIGINT) + self.assertThat(lambda:runner.run(result), + Raises(MatchesException(KeyboardInterrupt))) + + def test_timeout_causes_test_error(self): + # If a test times out, it reports itself as having failed with a + # TimeoutError. + class SomeCase(TestCase): + def test_pause(self): + return defer.Deferred() + test = SomeCase('test_pause') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + error = result._events[1][2] + self.assertThat( + [event[:2] for event in result._events], Equals( + [('startTest', test), + ('addError', test), + ('stopTest', test)])) + self.assertIn('TimeoutError', str(error['traceback'])) + + def test_convenient_construction(self): + # As a convenience method, AsynchronousDeferredRunTest has a + # classmethod that returns an AsynchronousDeferredRunTest + # factory. This factory has the same API as the RunTest constructor. + reactor = object() + timeout = object() + handler = object() + factory = AsynchronousDeferredRunTest.make_factory(reactor, timeout) + runner = factory(self, [handler]) + self.assertIs(reactor, runner._reactor) + self.assertIs(timeout, runner._timeout) + self.assertIs(self, runner.case) + self.assertEqual([handler], runner.handlers) + + def test_use_convenient_factory(self): + # Make sure that the factory can actually be used. + factory = AsynchronousDeferredRunTest.make_factory() + class SomeCase(TestCase): + run_tests_with = factory + def test_something(self): + pass + case = SomeCase('test_something') + case.run() + + def test_convenient_construction_default_reactor(self): + # As a convenience method, AsynchronousDeferredRunTest has a + # classmethod that returns an AsynchronousDeferredRunTest + # factory. This factory has the same API as the RunTest constructor. + reactor = object() + handler = object() + factory = AsynchronousDeferredRunTest.make_factory(reactor=reactor) + runner = factory(self, [handler]) + self.assertIs(reactor, runner._reactor) + self.assertIs(self, runner.case) + self.assertEqual([handler], runner.handlers) + + def test_convenient_construction_default_timeout(self): + # As a convenience method, AsynchronousDeferredRunTest has a + # classmethod that returns an AsynchronousDeferredRunTest + # factory. This factory has the same API as the RunTest constructor. + timeout = object() + handler = object() + factory = AsynchronousDeferredRunTest.make_factory(timeout=timeout) + runner = factory(self, [handler]) + self.assertIs(timeout, runner._timeout) + self.assertIs(self, runner.case) + self.assertEqual([handler], runner.handlers) + + def test_convenient_construction_default_debugging(self): + # As a convenience method, AsynchronousDeferredRunTest has a + # classmethod that returns an AsynchronousDeferredRunTest + # factory. This factory has the same API as the RunTest constructor. + handler = object() + factory = AsynchronousDeferredRunTest.make_factory(debug=True) + runner = factory(self, [handler]) + self.assertIs(self, runner.case) + self.assertEqual([handler], runner.handlers) + self.assertEqual(True, runner._debug) + + def test_deferred_error(self): + class SomeTest(TestCase): + def test_something(self): + return defer.maybeDeferred(lambda: 1/0) + test = SomeTest('test_something') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], + Equals([ + ('startTest', test), + ('addError', test), + ('stopTest', test)])) + error = result._events[1][2] + self.assertThat(error, KeysEqual('traceback', 'twisted-log')) + + def test_only_addError_once(self): + # Even if the reactor is unclean and the test raises an error and the + # cleanups raise errors, we only called addError once per test. + reactor = self.make_reactor() + class WhenItRains(TestCase): + def it_pours(self): + # Add a dirty cleanup. + self.addCleanup(lambda: 3 / 0) + # Dirty the reactor. + from twisted.internet.protocol import ServerFactory + reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1') + # Unhandled error. + defer.maybeDeferred(lambda: 2 / 0) + # Actual error. + raise RuntimeError("Excess precipitation") + test = WhenItRains('it_pours') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], + Equals([ + ('startTest', test), + ('addError', test), + ('stopTest', test)])) + error = result._events[1][2] + self.assertThat( + error, KeysEqual( + 'traceback', + 'traceback-1', + 'traceback-2', + 'twisted-log', + 'unhandled-error-in-deferred', + )) + + def test_log_err_is_error(self): + # An error logged during the test run is recorded as an error in the + # tests. + class LogAnError(TestCase): + def test_something(self): + try: + 1/0 + except ZeroDivisionError: + f = failure.Failure() + log.err(f) + test = LogAnError('test_something') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], + Equals([ + ('startTest', test), + ('addError', test), + ('stopTest', test)])) + error = result._events[1][2] + self.assertThat(error, KeysEqual('logged-error', 'twisted-log')) + + def test_log_err_flushed_is_success(self): + # An error logged during the test run is recorded as an error in the + # tests. + class LogAnError(TestCase): + def test_something(self): + try: + 1/0 + except ZeroDivisionError: + f = failure.Failure() + log.err(f) + flush_logged_errors(ZeroDivisionError) + test = LogAnError('test_something') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + result._events, + Equals([ + ('startTest', test), + ('addSuccess', test, {'twisted-log': text_content('')}), + ('stopTest', test)])) + + def test_log_in_details(self): + class LogAnError(TestCase): + def test_something(self): + log.msg("foo") + 1/0 + test = LogAnError('test_something') + runner = self.make_runner(test) + result = self.make_result() + runner.run(result) + self.assertThat( + [event[:2] for event in result._events], + Equals([ + ('startTest', test), + ('addError', test), + ('stopTest', test)])) + error = result._events[1][2] + self.assertThat(error, KeysEqual('traceback', 'twisted-log')) + + def test_debugging_unchanged_during_test_by_default(self): + debugging = [(defer.Deferred.debug, DelayedCall.debug)] + class SomeCase(TestCase): + def test_debugging_enabled(self): + debugging.append((defer.Deferred.debug, DelayedCall.debug)) + test = SomeCase('test_debugging_enabled') + runner = AsynchronousDeferredRunTest( + test, handlers=test.exception_handlers, + reactor=self.make_reactor(), timeout=self.make_timeout()) + runner.run(self.make_result()) + self.assertEqual(debugging[0], debugging[1]) + + def test_debugging_enabled_during_test_with_debug_flag(self): + self.patch(defer.Deferred, 'debug', False) + self.patch(DelayedCall, 'debug', False) + debugging = [] + class SomeCase(TestCase): + def test_debugging_enabled(self): + debugging.append((defer.Deferred.debug, DelayedCall.debug)) + test = SomeCase('test_debugging_enabled') + runner = AsynchronousDeferredRunTest( + test, handlers=test.exception_handlers, + reactor=self.make_reactor(), timeout=self.make_timeout(), + debug=True) + runner.run(self.make_result()) + self.assertEqual([(True, True)], debugging) + self.assertEqual(False, defer.Deferred.debug) + self.assertEqual(False, defer.Deferred.debug) + + +class TestAssertFailsWith(NeedsTwistedTestCase): + """Tests for `assert_fails_with`.""" + + if SynchronousDeferredRunTest is not None: + run_tests_with = SynchronousDeferredRunTest + + def test_assert_fails_with_success(self): + # assert_fails_with fails the test if it's given a Deferred that + # succeeds. + marker = object() + d = assert_fails_with(defer.succeed(marker), RuntimeError) + def check_result(failure): + failure.trap(self.failureException) + self.assertThat( + str(failure.value), + Equals("RuntimeError not raised (%r returned)" % (marker,))) + d.addCallbacks( + lambda x: self.fail("Should not have succeeded"), check_result) + return d + + def test_assert_fails_with_success_multiple_types(self): + # assert_fails_with fails the test if it's given a Deferred that + # succeeds. + marker = object() + d = assert_fails_with( + defer.succeed(marker), RuntimeError, ZeroDivisionError) + def check_result(failure): + failure.trap(self.failureException) + self.assertThat( + str(failure.value), + Equals("RuntimeError, ZeroDivisionError not raised " + "(%r returned)" % (marker,))) + d.addCallbacks( + lambda x: self.fail("Should not have succeeded"), check_result) + return d + + def test_assert_fails_with_wrong_exception(self): + # assert_fails_with fails the test if it's given a Deferred that + # succeeds. + d = assert_fails_with( + defer.maybeDeferred(lambda: 1/0), RuntimeError, KeyboardInterrupt) + def check_result(failure): + failure.trap(self.failureException) + lines = str(failure.value).splitlines() + self.assertThat( + lines[:2], + Equals([ + ("ZeroDivisionError raised instead of RuntimeError, " + "KeyboardInterrupt:"), + " Traceback (most recent call last):", + ])) + d.addCallbacks( + lambda x: self.fail("Should not have succeeded"), check_result) + return d + + def test_assert_fails_with_expected_exception(self): + # assert_fails_with calls back with the value of the failure if it's + # one of the expected types of failures. + try: + 1/0 + except ZeroDivisionError: + f = failure.Failure() + d = assert_fails_with(defer.fail(f), ZeroDivisionError) + return d.addCallback(self.assertThat, Equals(f.value)) + + def test_custom_failure_exception(self): + # If assert_fails_with is passed a 'failureException' keyword + # argument, then it will raise that instead of `AssertionError`. + class CustomException(Exception): + pass + marker = object() + d = assert_fails_with( + defer.succeed(marker), RuntimeError, + failureException=CustomException) + def check_result(failure): + failure.trap(CustomException) + self.assertThat( + str(failure.value), + Equals("RuntimeError not raised (%r returned)" % (marker,))) + return d.addCallbacks( + lambda x: self.fail("Should not have succeeded"), check_result) + + +class TestRunWithLogObservers(NeedsTwistedTestCase): + + def test_restores_observers(self): + from testtools.deferredruntest import run_with_log_observers + from twisted.python import log + # Make sure there's at least one observer. This reproduces bug + # #926189. + log.addObserver(lambda *args: None) + observers = list(log.theLogPublisher.observers) + run_with_log_observers([], lambda: None) + self.assertEqual(observers, log.theLogPublisher.observers) + + +def test_suite(): + from unittest import TestLoader, TestSuite + return TestSuite( + [TestLoader().loadTestsFromName(__name__), + make_integration_tests()]) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py new file mode 100644 index 00000000000..7bfc1fa267b --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py @@ -0,0 +1,100 @@ +# Copyright (c) 2010-2011 Testtools authors. See LICENSE for details. + +"""Tests for the distutils test command logic.""" + +from distutils.dist import Distribution + +from extras import try_import + +from testtools.compat import ( + _b, + _u, + BytesIO, + ) +fixtures = try_import('fixtures') + +import testtools +from testtools import TestCase +from testtools.distutilscmd import TestCommand +from testtools.matchers import MatchesRegex + + +if fixtures: + class SampleTestFixture(fixtures.Fixture): + """Creates testtools.runexample temporarily.""" + + def __init__(self): + self.package = fixtures.PythonPackage( + 'runexample', [('__init__.py', _b(""" +from testtools import TestCase + +class TestFoo(TestCase): + def test_bar(self): + pass + def test_quux(self): + pass +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) +"""))]) + + def setUp(self): + super(SampleTestFixture, self).setUp() + self.useFixture(self.package) + testtools.__path__.append(self.package.base) + self.addCleanup(testtools.__path__.remove, self.package.base) + + +class TestCommandTest(TestCase): + + def setUp(self): + super(TestCommandTest, self).setUp() + if fixtures is None: + self.skipTest("Need fixtures") + + def test_test_module(self): + self.useFixture(SampleTestFixture()) + stdout = self.useFixture(fixtures.StringStream('stdout')) + dist = Distribution() + dist.script_name = 'setup.py' + dist.script_args = ['test'] + dist.cmdclass = {'test': TestCommand} + dist.command_options = { + 'test': {'test_module': ('command line', 'testtools.runexample')}} + cmd = dist.reinitialize_command('test') + with fixtures.MonkeyPatch('sys.stdout', stdout.stream): + dist.run_command('test') + self.assertThat( + stdout.getDetails()['stdout'].as_text(), + MatchesRegex(_u("""Tests running... + +Ran 2 tests in \\d.\\d\\d\\ds +OK +"""))) + + def test_test_suite(self): + self.useFixture(SampleTestFixture()) + stdout = self.useFixture(fixtures.StringStream('stdout')) + dist = Distribution() + dist.script_name = 'setup.py' + dist.script_args = ['test'] + dist.cmdclass = {'test': TestCommand} + dist.command_options = { + 'test': { + 'test_suite': ( + 'command line', 'testtools.runexample.test_suite')}} + cmd = dist.reinitialize_command('test') + with fixtures.MonkeyPatch('sys.stdout', stdout.stream): + dist.run_command('test') + self.assertThat( + stdout.getDetails()['stdout'].as_text(), + MatchesRegex(_u("""Tests running... + +Ran 2 tests in \\d.\\d\\d\\ds +OK +"""))) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py new file mode 100644 index 00000000000..2ccd1e853a0 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py @@ -0,0 +1,118 @@ +# Copyright (c) 2010-2011 testtools developers. See LICENSE for details. + +import unittest + +from extras import try_import + +from testtools import ( + TestCase, + content, + content_type, + ) +from testtools.compat import _b, _u +from testtools.testresult.doubles import ( + ExtendedTestResult, + ) + +fixtures = try_import('fixtures') +LoggingFixture = try_import('fixtures.tests.helpers.LoggingFixture') + + +class TestFixtureSupport(TestCase): + + def setUp(self): + super(TestFixtureSupport, self).setUp() + if fixtures is None or LoggingFixture is None: + self.skipTest("Need fixtures") + + def test_useFixture(self): + fixture = LoggingFixture() + class SimpleTest(TestCase): + def test_foo(self): + self.useFixture(fixture) + result = unittest.TestResult() + SimpleTest('test_foo').run(result) + self.assertTrue(result.wasSuccessful()) + self.assertEqual(['setUp', 'cleanUp'], fixture.calls) + + def test_useFixture_cleanups_raise_caught(self): + calls = [] + def raiser(ignored): + calls.append('called') + raise Exception('foo') + fixture = fixtures.FunctionFixture(lambda:None, raiser) + class SimpleTest(TestCase): + def test_foo(self): + self.useFixture(fixture) + result = unittest.TestResult() + SimpleTest('test_foo').run(result) + self.assertFalse(result.wasSuccessful()) + self.assertEqual(['called'], calls) + + def test_useFixture_details_captured(self): + class DetailsFixture(fixtures.Fixture): + def setUp(self): + fixtures.Fixture.setUp(self) + self.addCleanup(delattr, self, 'content') + self.content = [_b('content available until cleanUp')] + self.addDetail('content', + content.Content(content_type.UTF8_TEXT, self.get_content)) + def get_content(self): + return self.content + fixture = DetailsFixture() + class SimpleTest(TestCase): + def test_foo(self): + self.useFixture(fixture) + # Add a colliding detail (both should show up) + self.addDetail('content', + content.Content(content_type.UTF8_TEXT, lambda:[_b('foo')])) + result = ExtendedTestResult() + SimpleTest('test_foo').run(result) + self.assertEqual('addSuccess', result._events[-2][0]) + details = result._events[-2][2] + self.assertEqual(['content', 'content-1'], sorted(details.keys())) + self.assertEqual('foo', details['content'].as_text()) + self.assertEqual('content available until cleanUp', + details['content-1'].as_text()) + + def test_useFixture_multiple_details_captured(self): + class DetailsFixture(fixtures.Fixture): + def setUp(self): + fixtures.Fixture.setUp(self) + self.addDetail('aaa', content.text_content("foo")) + self.addDetail('bbb', content.text_content("bar")) + fixture = DetailsFixture() + class SimpleTest(TestCase): + def test_foo(self): + self.useFixture(fixture) + result = ExtendedTestResult() + SimpleTest('test_foo').run(result) + self.assertEqual('addSuccess', result._events[-2][0]) + details = result._events[-2][2] + self.assertEqual(['aaa', 'bbb'], sorted(details)) + self.assertEqual(_u('foo'), details['aaa'].as_text()) + self.assertEqual(_u('bar'), details['bbb'].as_text()) + + def test_useFixture_details_captured_from_setUp(self): + # Details added during fixture set-up are gathered even if setUp() + # fails with an exception. + class BrokenFixture(fixtures.Fixture): + def setUp(self): + fixtures.Fixture.setUp(self) + self.addDetail('content', content.text_content("foobar")) + raise Exception() + fixture = BrokenFixture() + class SimpleTest(TestCase): + def test_foo(self): + self.useFixture(fixture) + result = ExtendedTestResult() + SimpleTest('test_foo').run(result) + self.assertEqual('addError', result._events[-2][0]) + details = result._events[-2][2] + self.assertEqual(['content', 'traceback'], sorted(details)) + self.assertEqual('foobar', ''.join(details['content'].iter_text())) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py new file mode 100644 index 00000000000..848c2f0b489 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py @@ -0,0 +1,30 @@ +# Copyright (c) 2010-2012 testtools developers. See LICENSE for details. + +from testtools import TestCase +from testtools.tests.helpers import ( + FullStackRunTest, + hide_testtools_stack, + is_stack_hidden, + ) + + +class TestStackHiding(TestCase): + + run_tests_with = FullStackRunTest + + def setUp(self): + super(TestStackHiding, self).setUp() + self.addCleanup(hide_testtools_stack, is_stack_hidden()) + + def test_is_stack_hidden_consistent_true(self): + hide_testtools_stack(True) + self.assertEqual(True, is_stack_hidden()) + + def test_is_stack_hidden_consistent_false(self): + hide_testtools_stack(False) + self.assertEqual(False, is_stack_hidden()) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py new file mode 100644 index 00000000000..540a2ee909f --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py @@ -0,0 +1,167 @@ +# Copyright (c) 2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +"""Tests for testtools.monkey.""" + +from testtools import TestCase +from testtools.matchers import MatchesException, Raises +from testtools.monkey import MonkeyPatcher, patch + + +class TestObj: + + def __init__(self): + self.foo = 'foo value' + self.bar = 'bar value' + self.baz = 'baz value' + + +class MonkeyPatcherTest(TestCase): + """ + Tests for 'MonkeyPatcher' monkey-patching class. + """ + + def setUp(self): + super(MonkeyPatcherTest, self).setUp() + self.test_object = TestObj() + self.original_object = TestObj() + self.monkey_patcher = MonkeyPatcher() + + def test_empty(self): + # A monkey patcher without patches doesn't change a thing. + self.monkey_patcher.patch() + + # We can't assert that all state is unchanged, but at least we can + # check our test object. + self.assertEquals(self.original_object.foo, self.test_object.foo) + self.assertEquals(self.original_object.bar, self.test_object.bar) + self.assertEquals(self.original_object.baz, self.test_object.baz) + + def test_construct_with_patches(self): + # Constructing a 'MonkeyPatcher' with patches adds all of the given + # patches to the patch list. + patcher = MonkeyPatcher((self.test_object, 'foo', 'haha'), + (self.test_object, 'bar', 'hehe')) + patcher.patch() + self.assertEquals('haha', self.test_object.foo) + self.assertEquals('hehe', self.test_object.bar) + self.assertEquals(self.original_object.baz, self.test_object.baz) + + def test_patch_existing(self): + # Patching an attribute that exists sets it to the value defined in the + # patch. + self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha') + self.monkey_patcher.patch() + self.assertEquals(self.test_object.foo, 'haha') + + def test_patch_non_existing(self): + # Patching a non-existing attribute sets it to the value defined in + # the patch. + self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value') + self.monkey_patcher.patch() + self.assertEquals(self.test_object.doesntexist, 'value') + + def test_restore_non_existing(self): + # Restoring a value that didn't exist before the patch deletes the + # value. + self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value') + self.monkey_patcher.patch() + self.monkey_patcher.restore() + marker = object() + self.assertIs(marker, getattr(self.test_object, 'doesntexist', marker)) + + def test_patch_already_patched(self): + # Adding a patch for an object and attribute that already have a patch + # overrides the existing patch. + self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah') + self.monkey_patcher.add_patch(self.test_object, 'foo', 'BLAH') + self.monkey_patcher.patch() + self.assertEquals(self.test_object.foo, 'BLAH') + self.monkey_patcher.restore() + self.assertEquals(self.test_object.foo, self.original_object.foo) + + def test_restore_twice_is_a_no_op(self): + # Restoring an already-restored monkey patch is a no-op. + self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah') + self.monkey_patcher.patch() + self.monkey_patcher.restore() + self.assertEquals(self.test_object.foo, self.original_object.foo) + self.monkey_patcher.restore() + self.assertEquals(self.test_object.foo, self.original_object.foo) + + def test_run_with_patches_decoration(self): + # run_with_patches runs the given callable, passing in all arguments + # and keyword arguments, and returns the return value of the callable. + log = [] + + def f(a, b, c=None): + log.append((a, b, c)) + return 'foo' + + result = self.monkey_patcher.run_with_patches(f, 1, 2, c=10) + self.assertEquals('foo', result) + self.assertEquals([(1, 2, 10)], log) + + def test_repeated_run_with_patches(self): + # We can call the same function with run_with_patches more than + # once. All patches apply for each call. + def f(): + return (self.test_object.foo, self.test_object.bar, + self.test_object.baz) + + self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha') + result = self.monkey_patcher.run_with_patches(f) + self.assertEquals( + ('haha', self.original_object.bar, self.original_object.baz), + result) + result = self.monkey_patcher.run_with_patches(f) + self.assertEquals( + ('haha', self.original_object.bar, self.original_object.baz), + result) + + def test_run_with_patches_restores(self): + # run_with_patches restores the original values after the function has + # executed. + self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha') + self.assertEquals(self.original_object.foo, self.test_object.foo) + self.monkey_patcher.run_with_patches(lambda: None) + self.assertEquals(self.original_object.foo, self.test_object.foo) + + def test_run_with_patches_restores_on_exception(self): + # run_with_patches restores the original values even when the function + # raises an exception. + def _(): + self.assertEquals(self.test_object.foo, 'haha') + self.assertEquals(self.test_object.bar, 'blahblah') + raise RuntimeError("Something went wrong!") + + self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha') + self.monkey_patcher.add_patch(self.test_object, 'bar', 'blahblah') + + self.assertThat(lambda:self.monkey_patcher.run_with_patches(_), + Raises(MatchesException(RuntimeError("Something went wrong!")))) + self.assertEquals(self.test_object.foo, self.original_object.foo) + self.assertEquals(self.test_object.bar, self.original_object.bar) + + +class TestPatchHelper(TestCase): + + def test_patch_patches(self): + # patch(obj, name, value) sets obj.name to value. + test_object = TestObj() + patch(test_object, 'foo', 42) + self.assertEqual(42, test_object.foo) + + def test_patch_returns_cleanup(self): + # patch(obj, name, value) returns a nullary callable that restores obj + # to its original state when run. + test_object = TestObj() + original = test_object.foo + cleanup = patch(test_object, 'foo', 42) + cleanup() + self.assertEqual(original, test_object.foo) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py new file mode 100644 index 00000000000..e89ecdc26a4 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py @@ -0,0 +1,248 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Tests for the test runner logic.""" + +from unittest import TestSuite +import sys + +from extras import try_import +fixtures = try_import('fixtures') +testresources = try_import('testresources') + +import testtools +from testtools import TestCase, run +from testtools.compat import ( + _b, + StringIO, + ) +from testtools.matchers import Contains + + +if fixtures: + class SampleTestFixture(fixtures.Fixture): + """Creates testtools.runexample temporarily.""" + + def __init__(self, broken=False): + """Create a SampleTestFixture. + + :param broken: If True, the sample file will not be importable. + """ + if not broken: + init_contents = _b("""\ +from testtools import TestCase + +class TestFoo(TestCase): + def test_bar(self): + pass + def test_quux(self): + pass +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) +""") + else: + init_contents = b"class not in\n" + self.package = fixtures.PythonPackage( + 'runexample', [('__init__.py', init_contents)]) + + def setUp(self): + super(SampleTestFixture, self).setUp() + self.useFixture(self.package) + testtools.__path__.append(self.package.base) + self.addCleanup(testtools.__path__.remove, self.package.base) + self.addCleanup(sys.modules.pop, 'testtools.runexample', None) + + +if fixtures and testresources: + class SampleResourcedFixture(fixtures.Fixture): + """Creates a test suite that uses testresources.""" + + def __init__(self): + super(SampleResourcedFixture, self).__init__() + self.package = fixtures.PythonPackage( + 'resourceexample', [('__init__.py', _b(""" +from fixtures import Fixture +from testresources import ( + FixtureResource, + OptimisingTestSuite, + ResourcedTestCase, + ) +from testtools import TestCase + +class Printer(Fixture): + + def setUp(self): + super(Printer, self).setUp() + print('Setting up Printer') + + def reset(self): + pass + +class TestFoo(TestCase, ResourcedTestCase): + # When run, this will print just one Setting up Printer, unless the + # OptimisingTestSuite is not honoured, when one per test case will print. + resources=[('res', FixtureResource(Printer()))] + def test_bar(self): + pass + def test_foo(self): + pass + def test_quux(self): + pass +def test_suite(): + from unittest import TestLoader + return OptimisingTestSuite(TestLoader().loadTestsFromName(__name__)) +"""))]) + + def setUp(self): + super(SampleResourcedFixture, self).setUp() + self.useFixture(self.package) + self.addCleanup(testtools.__path__.remove, self.package.base) + testtools.__path__.append(self.package.base) + + +class TestRun(TestCase): + + def setUp(self): + super(TestRun, self).setUp() + if fixtures is None: + self.skipTest("Need fixtures") + + def test_run_custom_list(self): + self.useFixture(SampleTestFixture()) + tests = [] + class CaptureList(run.TestToolsTestRunner): + def list(self, test): + tests.append(set([case.id() for case + in testtools.testsuite.iterate_tests(test)])) + out = StringIO() + try: + program = run.TestProgram( + argv=['prog', '-l', 'testtools.runexample.test_suite'], + stdout=out, testRunner=CaptureList) + except SystemExit: + exc_info = sys.exc_info() + raise AssertionError("-l tried to exit. %r" % exc_info[1]) + self.assertEqual([set(['testtools.runexample.TestFoo.test_bar', + 'testtools.runexample.TestFoo.test_quux'])], tests) + + def test_run_list(self): + self.useFixture(SampleTestFixture()) + out = StringIO() + try: + run.main(['prog', '-l', 'testtools.runexample.test_suite'], out) + except SystemExit: + exc_info = sys.exc_info() + raise AssertionError("-l tried to exit. %r" % exc_info[1]) + self.assertEqual("""testtools.runexample.TestFoo.test_bar +testtools.runexample.TestFoo.test_quux +""", out.getvalue()) + + def test_run_list_failed_import(self): + if not run.have_discover: + self.skipTest("Need discover") + broken = self.useFixture(SampleTestFixture(broken=True)) + out = StringIO() + exc = self.assertRaises( + SystemExit, + run.main, ['prog', 'discover', '-l', broken.package.base, '*.py'], out) + self.assertEqual(2, exc.args[0]) + self.assertEqual("""Failed to import +runexample.__init__ +""", out.getvalue()) + + def test_run_orders_tests(self): + self.useFixture(SampleTestFixture()) + out = StringIO() + # We load two tests - one that exists and one that doesn't, and we + # should get the one that exists and neither the one that doesn't nor + # the unmentioned one that does. + tempdir = self.useFixture(fixtures.TempDir()) + tempname = tempdir.path + '/tests.list' + f = open(tempname, 'wb') + try: + f.write(_b(""" +testtools.runexample.TestFoo.test_bar +testtools.runexample.missingtest +""")) + finally: + f.close() + try: + run.main(['prog', '-l', '--load-list', tempname, + 'testtools.runexample.test_suite'], out) + except SystemExit: + exc_info = sys.exc_info() + raise AssertionError("-l tried to exit. %r" % exc_info[1]) + self.assertEqual("""testtools.runexample.TestFoo.test_bar +""", out.getvalue()) + + def test_run_load_list(self): + self.useFixture(SampleTestFixture()) + out = StringIO() + # We load two tests - one that exists and one that doesn't, and we + # should get the one that exists and neither the one that doesn't nor + # the unmentioned one that does. + tempdir = self.useFixture(fixtures.TempDir()) + tempname = tempdir.path + '/tests.list' + f = open(tempname, 'wb') + try: + f.write(_b(""" +testtools.runexample.TestFoo.test_bar +testtools.runexample.missingtest +""")) + finally: + f.close() + try: + run.main(['prog', '-l', '--load-list', tempname, + 'testtools.runexample.test_suite'], out) + except SystemExit: + exc_info = sys.exc_info() + raise AssertionError("-l tried to exit. %r" % exc_info[1]) + self.assertEqual("""testtools.runexample.TestFoo.test_bar +""", out.getvalue()) + + def test_load_list_preserves_custom_suites(self): + if testresources is None: + self.skipTest("Need testresources") + self.useFixture(SampleResourcedFixture()) + # We load two tests, not loading one. Both share a resource, so we + # should see just one resource setup occur. + tempdir = self.useFixture(fixtures.TempDir()) + tempname = tempdir.path + '/tests.list' + f = open(tempname, 'wb') + try: + f.write(_b(""" +testtools.resourceexample.TestFoo.test_bar +testtools.resourceexample.TestFoo.test_foo +""")) + finally: + f.close() + stdout = self.useFixture(fixtures.StringStream('stdout')) + with fixtures.MonkeyPatch('sys.stdout', stdout.stream): + try: + run.main(['prog', '--load-list', tempname, + 'testtools.resourceexample.test_suite'], stdout.stream) + except SystemExit: + # Evil resides in TestProgram. + pass + out = stdout.getDetails()['stdout'].as_text() + self.assertEqual(1, out.count('Setting up Printer'), "%r" % out) + + def test_run_failfast(self): + stdout = self.useFixture(fixtures.StringStream('stdout')) + + class Failing(TestCase): + def test_a(self): + self.fail('a') + def test_b(self): + self.fail('b') + runner = run.TestToolsTestRunner(failfast=True) + with fixtures.MonkeyPatch('sys.stdout', stdout.stream): + runner.run(TestSuite([Failing('test_a'), Failing('test_b')])) + self.assertThat( + stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test')) + + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py new file mode 100644 index 00000000000..afbb8baf395 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py @@ -0,0 +1,303 @@ +# Copyright (c) 2009-2011 testtools developers. See LICENSE for details. + +"""Tests for the RunTest single test execution logic.""" + +from testtools import ( + ExtendedToOriginalDecorator, + run_test_with, + RunTest, + TestCase, + TestResult, + ) +from testtools.matchers import MatchesException, Is, Raises +from testtools.testresult.doubles import ExtendedTestResult +from testtools.tests.helpers import FullStackRunTest + + +class TestRunTest(TestCase): + + run_tests_with = FullStackRunTest + + def make_case(self): + class Case(TestCase): + def test(self): + pass + return Case('test') + + def test___init___short(self): + run = RunTest("bar") + self.assertEqual("bar", run.case) + self.assertEqual([], run.handlers) + + def test__init____handlers(self): + handlers = [("quux", "baz")] + run = RunTest("bar", handlers) + self.assertEqual(handlers, run.handlers) + + def test_run_with_result(self): + # test.run passes result down to _run_test_method. + log = [] + class Case(TestCase): + def _run_test_method(self, result): + log.append(result) + case = Case('_run_test_method') + run = RunTest(case, lambda x: log.append(x)) + result = TestResult() + run.run(result) + self.assertEqual(1, len(log)) + self.assertEqual(result, log[0].decorated) + + def test_run_no_result_manages_new_result(self): + log = [] + run = RunTest(self.make_case(), lambda x: log.append(x) or x) + result = run.run() + self.assertIsInstance(result.decorated, TestResult) + + def test__run_core_called(self): + case = self.make_case() + log = [] + run = RunTest(case, lambda x: x) + run._run_core = lambda: log.append('foo') + run.run() + self.assertEqual(['foo'], log) + + def test__run_user_does_not_catch_keyboard(self): + case = self.make_case() + def raises(): + raise KeyboardInterrupt("yo") + run = RunTest(case, None) + run.result = ExtendedTestResult() + self.assertThat(lambda: run._run_user(raises), + Raises(MatchesException(KeyboardInterrupt))) + self.assertEqual([], run.result._events) + + def test__run_user_calls_onException(self): + case = self.make_case() + log = [] + def handler(exc_info): + log.append("got it") + self.assertEqual(3, len(exc_info)) + self.assertIsInstance(exc_info[1], KeyError) + self.assertIs(KeyError, exc_info[0]) + case.addOnException(handler) + e = KeyError('Yo') + def raises(): + raise e + run = RunTest(case, [(KeyError, None)]) + run.result = ExtendedTestResult() + status = run._run_user(raises) + self.assertEqual(run.exception_caught, status) + self.assertEqual([], run.result._events) + self.assertEqual(["got it"], log) + + def test__run_user_can_catch_Exception(self): + case = self.make_case() + e = Exception('Yo') + def raises(): + raise e + log = [] + run = RunTest(case, [(Exception, None)]) + run.result = ExtendedTestResult() + status = run._run_user(raises) + self.assertEqual(run.exception_caught, status) + self.assertEqual([], run.result._events) + self.assertEqual([], log) + + def test__run_user_uncaught_Exception_raised(self): + case = self.make_case() + e = KeyError('Yo') + def raises(): + raise e + log = [] + def log_exc(self, result, err): + log.append((result, err)) + run = RunTest(case, [(ValueError, log_exc)]) + run.result = ExtendedTestResult() + self.assertThat(lambda: run._run_user(raises), + Raises(MatchesException(KeyError))) + self.assertEqual([], run.result._events) + self.assertEqual([], log) + + def test__run_user_uncaught_Exception_from_exception_handler_raised(self): + case = self.make_case() + def broken_handler(exc_info): + # ValueError because thats what we know how to catch - and must + # not. + raise ValueError('boo') + case.addOnException(broken_handler) + e = KeyError('Yo') + def raises(): + raise e + log = [] + def log_exc(self, result, err): + log.append((result, err)) + run = RunTest(case, [(ValueError, log_exc)]) + run.result = ExtendedTestResult() + self.assertThat(lambda: run._run_user(raises), + Raises(MatchesException(ValueError))) + self.assertEqual([], run.result._events) + self.assertEqual([], log) + + def test__run_user_returns_result(self): + case = self.make_case() + def returns(): + return 1 + run = RunTest(case) + run.result = ExtendedTestResult() + self.assertEqual(1, run._run_user(returns)) + self.assertEqual([], run.result._events) + + def test__run_one_decorates_result(self): + log = [] + class Run(RunTest): + def _run_prepared_result(self, result): + log.append(result) + return result + run = Run(self.make_case(), lambda x: x) + result = run._run_one('foo') + self.assertEqual([result], log) + self.assertIsInstance(log[0], ExtendedToOriginalDecorator) + self.assertEqual('foo', result.decorated) + + def test__run_prepared_result_calls_start_and_stop_test(self): + result = ExtendedTestResult() + case = self.make_case() + run = RunTest(case, lambda x: x) + run.run(result) + self.assertEqual([ + ('startTest', case), + ('addSuccess', case), + ('stopTest', case), + ], result._events) + + def test__run_prepared_result_calls_stop_test_always(self): + result = ExtendedTestResult() + case = self.make_case() + def inner(): + raise Exception("foo") + run = RunTest(case, lambda x: x) + run._run_core = inner + self.assertThat(lambda: run.run(result), + Raises(MatchesException(Exception("foo")))) + self.assertEqual([ + ('startTest', case), + ('stopTest', case), + ], result._events) + + +class CustomRunTest(RunTest): + + marker = object() + + def run(self, result=None): + return self.marker + + +class TestTestCaseSupportForRunTest(TestCase): + + def test_pass_custom_run_test(self): + class SomeCase(TestCase): + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo', runTest=CustomRunTest) + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(CustomRunTest.marker)) + + def test_default_is_runTest_class_variable(self): + class SomeCase(TestCase): + run_tests_with = CustomRunTest + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo') + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(CustomRunTest.marker)) + + def test_constructor_argument_overrides_class_variable(self): + # If a 'runTest' argument is passed to the test's constructor, that + # overrides the class variable. + marker = object() + class DifferentRunTest(RunTest): + def run(self, result=None): + return marker + class SomeCase(TestCase): + run_tests_with = CustomRunTest + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo', runTest=DifferentRunTest) + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(marker)) + + def test_decorator_for_run_test(self): + # Individual test methods can be marked as needing a special runner. + class SomeCase(TestCase): + @run_test_with(CustomRunTest) + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo') + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(CustomRunTest.marker)) + + def test_extended_decorator_for_run_test(self): + # Individual test methods can be marked as needing a special runner. + # Extra arguments can be passed to the decorator which will then be + # passed on to the RunTest object. + marker = object() + class FooRunTest(RunTest): + def __init__(self, case, handlers=None, bar=None): + super(FooRunTest, self).__init__(case, handlers) + self.bar = bar + def run(self, result=None): + return self.bar + class SomeCase(TestCase): + @run_test_with(FooRunTest, bar=marker) + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo') + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(marker)) + + def test_works_as_inner_decorator(self): + # Even if run_test_with is the innermost decorator, it will be + # respected. + def wrapped(function): + """Silly, trivial decorator.""" + def decorated(*args, **kwargs): + return function(*args, **kwargs) + decorated.__name__ = function.__name__ + decorated.__dict__.update(function.__dict__) + return decorated + class SomeCase(TestCase): + @wrapped + @run_test_with(CustomRunTest) + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo') + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(CustomRunTest.marker)) + + def test_constructor_overrides_decorator(self): + # If a 'runTest' argument is passed to the test's constructor, that + # overrides the decorator. + marker = object() + class DifferentRunTest(RunTest): + def run(self, result=None): + return marker + class SomeCase(TestCase): + @run_test_with(CustomRunTest) + def test_foo(self): + pass + result = TestResult() + case = SomeCase('test_foo', runTest=DifferentRunTest) + from_run_test = case.run(result) + self.assertThat(from_run_test, Is(marker)) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py new file mode 100644 index 00000000000..6112252acd9 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py @@ -0,0 +1,333 @@ +# Copyright (c) 2010 testtools developers. See LICENSE for details. + +"""Tests for the evil Twisted reactor-spinning we do.""" + +import os +import signal + +from extras import try_import + +from testtools import ( + skipIf, + TestCase, + ) +from testtools.matchers import ( + Equals, + Is, + MatchesException, + Raises, + ) + +_spinner = try_import('testtools._spinner') + +defer = try_import('twisted.internet.defer') +Failure = try_import('twisted.python.failure.Failure') + + +class NeedsTwistedTestCase(TestCase): + + def setUp(self): + super(NeedsTwistedTestCase, self).setUp() + if defer is None or Failure is None: + self.skipTest("Need Twisted to run") + + +class TestNotReentrant(NeedsTwistedTestCase): + + def test_not_reentrant(self): + # A function decorated as not being re-entrant will raise a + # _spinner.ReentryError if it is called while it is running. + calls = [] + @_spinner.not_reentrant + def log_something(): + calls.append(None) + if len(calls) < 5: + log_something() + self.assertThat( + log_something, Raises(MatchesException(_spinner.ReentryError))) + self.assertEqual(1, len(calls)) + + def test_deeper_stack(self): + calls = [] + @_spinner.not_reentrant + def g(): + calls.append(None) + if len(calls) < 5: + f() + @_spinner.not_reentrant + def f(): + calls.append(None) + if len(calls) < 5: + g() + self.assertThat(f, Raises(MatchesException(_spinner.ReentryError))) + self.assertEqual(2, len(calls)) + + +class TestExtractResult(NeedsTwistedTestCase): + + def test_not_fired(self): + # _spinner.extract_result raises _spinner.DeferredNotFired if it's + # given a Deferred that has not fired. + self.assertThat(lambda:_spinner.extract_result(defer.Deferred()), + Raises(MatchesException(_spinner.DeferredNotFired))) + + def test_success(self): + # _spinner.extract_result returns the value of the Deferred if it has + # fired successfully. + marker = object() + d = defer.succeed(marker) + self.assertThat(_spinner.extract_result(d), Equals(marker)) + + def test_failure(self): + # _spinner.extract_result raises the failure's exception if it's given + # a Deferred that is failing. + try: + 1/0 + except ZeroDivisionError: + f = Failure() + d = defer.fail(f) + self.assertThat(lambda:_spinner.extract_result(d), + Raises(MatchesException(ZeroDivisionError))) + + +class TestTrapUnhandledErrors(NeedsTwistedTestCase): + + def test_no_deferreds(self): + marker = object() + result, errors = _spinner.trap_unhandled_errors(lambda: marker) + self.assertEqual([], errors) + self.assertIs(marker, result) + + def test_unhandled_error(self): + failures = [] + def make_deferred_but_dont_handle(): + try: + 1/0 + except ZeroDivisionError: + f = Failure() + failures.append(f) + defer.fail(f) + result, errors = _spinner.trap_unhandled_errors( + make_deferred_but_dont_handle) + self.assertIs(None, result) + self.assertEqual(failures, [error.failResult for error in errors]) + + +class TestRunInReactor(NeedsTwistedTestCase): + + def make_reactor(self): + from twisted.internet import reactor + return reactor + + def make_spinner(self, reactor=None): + if reactor is None: + reactor = self.make_reactor() + return _spinner.Spinner(reactor) + + def make_timeout(self): + return 0.01 + + def test_function_called(self): + # run_in_reactor actually calls the function given to it. + calls = [] + marker = object() + self.make_spinner().run(self.make_timeout(), calls.append, marker) + self.assertThat(calls, Equals([marker])) + + def test_return_value_returned(self): + # run_in_reactor returns the value returned by the function given to + # it. + marker = object() + result = self.make_spinner().run(self.make_timeout(), lambda: marker) + self.assertThat(result, Is(marker)) + + def test_exception_reraised(self): + # If the given function raises an error, run_in_reactor re-raises that + # error. + self.assertThat( + lambda:self.make_spinner().run(self.make_timeout(), lambda: 1/0), + Raises(MatchesException(ZeroDivisionError))) + + def test_keyword_arguments(self): + # run_in_reactor passes keyword arguments on. + calls = [] + function = lambda *a, **kw: calls.extend([a, kw]) + self.make_spinner().run(self.make_timeout(), function, foo=42) + self.assertThat(calls, Equals([(), {'foo': 42}])) + + def test_not_reentrant(self): + # run_in_reactor raises an error if it is called inside another call + # to run_in_reactor. + spinner = self.make_spinner() + self.assertThat(lambda: spinner.run( + self.make_timeout(), spinner.run, self.make_timeout(), + lambda: None), Raises(MatchesException(_spinner.ReentryError))) + + def test_deferred_value_returned(self): + # If the given function returns a Deferred, run_in_reactor returns the + # value in the Deferred at the end of the callback chain. + marker = object() + result = self.make_spinner().run( + self.make_timeout(), lambda: defer.succeed(marker)) + self.assertThat(result, Is(marker)) + + def test_preserve_signal_handler(self): + signals = ['SIGINT', 'SIGTERM', 'SIGCHLD'] + signals = filter( + None, (getattr(signal, name, None) for name in signals)) + for sig in signals: + self.addCleanup(signal.signal, sig, signal.getsignal(sig)) + new_hdlrs = list(lambda *a: None for _ in signals) + for sig, hdlr in zip(signals, new_hdlrs): + signal.signal(sig, hdlr) + spinner = self.make_spinner() + spinner.run(self.make_timeout(), lambda: None) + self.assertEqual(new_hdlrs, map(signal.getsignal, signals)) + + def test_timeout(self): + # If the function takes too long to run, we raise a + # _spinner.TimeoutError. + timeout = self.make_timeout() + self.assertThat( + lambda:self.make_spinner().run(timeout, lambda: defer.Deferred()), + Raises(MatchesException(_spinner.TimeoutError))) + + def test_no_junk_by_default(self): + # If the reactor hasn't spun yet, then there cannot be any junk. + spinner = self.make_spinner() + self.assertThat(spinner.get_junk(), Equals([])) + + def test_clean_do_nothing(self): + # If there's nothing going on in the reactor, then clean does nothing + # and returns an empty list. + spinner = self.make_spinner() + result = spinner._clean() + self.assertThat(result, Equals([])) + + def test_clean_delayed_call(self): + # If there's a delayed call in the reactor, then clean cancels it and + # returns an empty list. + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + call = reactor.callLater(10, lambda: None) + results = spinner._clean() + self.assertThat(results, Equals([call])) + self.assertThat(call.active(), Equals(False)) + + def test_clean_delayed_call_cancelled(self): + # If there's a delayed call that's just been cancelled, then it's no + # longer there. + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + call = reactor.callLater(10, lambda: None) + call.cancel() + results = spinner._clean() + self.assertThat(results, Equals([])) + + def test_clean_selectables(self): + # If there's still a selectable (e.g. a listening socket), then + # clean() removes it from the reactor's registry. + # + # Note that the socket is left open. This emulates a bug in trial. + from twisted.internet.protocol import ServerFactory + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + port = reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1') + spinner.run(self.make_timeout(), lambda: None) + results = spinner.get_junk() + self.assertThat(results, Equals([port])) + + def test_clean_running_threads(self): + import threading + import time + current_threads = list(threading.enumerate()) + reactor = self.make_reactor() + timeout = self.make_timeout() + spinner = self.make_spinner(reactor) + spinner.run(timeout, reactor.callInThread, time.sleep, timeout / 2.0) + # Python before 2.5 has a race condition with thread handling where + # join() does not remove threads from enumerate before returning - the + # thread being joined does the removal. This was fixed in Python 2.5 + # but we still support 2.4, so we have to workaround the issue. + # http://bugs.python.org/issue1703448. + self.assertThat( + [thread for thread in threading.enumerate() if thread.isAlive()], + Equals(current_threads)) + + def test_leftover_junk_available(self): + # If 'run' is given a function that leaves the reactor dirty in some + # way, 'run' will clean up the reactor and then store information + # about the junk. This information can be got using get_junk. + from twisted.internet.protocol import ServerFactory + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + port = spinner.run( + self.make_timeout(), reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1') + self.assertThat(spinner.get_junk(), Equals([port])) + + def test_will_not_run_with_previous_junk(self): + # If 'run' is called and there's still junk in the spinner's junk + # list, then the spinner will refuse to run. + from twisted.internet.protocol import ServerFactory + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + timeout = self.make_timeout() + spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1') + self.assertThat(lambda: spinner.run(timeout, lambda: None), + Raises(MatchesException(_spinner.StaleJunkError))) + + def test_clear_junk_clears_previous_junk(self): + # If 'run' is called and there's still junk in the spinner's junk + # list, then the spinner will refuse to run. + from twisted.internet.protocol import ServerFactory + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + timeout = self.make_timeout() + port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1') + junk = spinner.clear_junk() + self.assertThat(junk, Equals([port])) + self.assertThat(spinner.get_junk(), Equals([])) + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_sigint_raises_no_result_error(self): + # If we get a SIGINT during a run, we raise _spinner.NoResultError. + SIGINT = getattr(signal, 'SIGINT', None) + if not SIGINT: + self.skipTest("SIGINT not available") + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + timeout = self.make_timeout() + reactor.callLater(timeout, os.kill, os.getpid(), SIGINT) + self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred), + Raises(MatchesException(_spinner.NoResultError))) + self.assertEqual([], spinner._clean()) + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_sigint_raises_no_result_error_second_time(self): + # If we get a SIGINT during a run, we raise _spinner.NoResultError. + # This test is exactly the same as test_sigint_raises_no_result_error, + # and exists to make sure we haven't futzed with state. + self.test_sigint_raises_no_result_error() + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_fast_sigint_raises_no_result_error(self): + # If we get a SIGINT during a run, we raise _spinner.NoResultError. + SIGINT = getattr(signal, 'SIGINT', None) + if not SIGINT: + self.skipTest("SIGINT not available") + reactor = self.make_reactor() + spinner = self.make_spinner(reactor) + timeout = self.make_timeout() + reactor.callWhenRunning(os.kill, os.getpid(), SIGINT) + self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred), + Raises(MatchesException(_spinner.NoResultError))) + self.assertEqual([], spinner._clean()) + + @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") + def test_fast_sigint_raises_no_result_error_second_time(self): + self.test_fast_sigint_raises_no_result_error() + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py new file mode 100644 index 00000000000..5010f9ac12c --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py @@ -0,0 +1,84 @@ +# Copyright (c) 2012 testtools developers. See LICENSE for details. + +"""Test tag support.""" + + +from testtools import TestCase +from testtools.tags import TagContext + + +class TestTags(TestCase): + + def test_no_tags(self): + # A tag context has no tags initially. + tag_context = TagContext() + self.assertEqual(set(), tag_context.get_current_tags()) + + def test_add_tag(self): + # A tag added with change_tags appears in get_current_tags. + tag_context = TagContext() + tag_context.change_tags(set(['foo']), set()) + self.assertEqual(set(['foo']), tag_context.get_current_tags()) + + def test_add_tag_twice(self): + # Calling change_tags twice to add tags adds both tags to the current + # tags. + tag_context = TagContext() + tag_context.change_tags(set(['foo']), set()) + tag_context.change_tags(set(['bar']), set()) + self.assertEqual( + set(['foo', 'bar']), tag_context.get_current_tags()) + + def test_change_tags_returns_tags(self): + # change_tags returns the current tags. This is a convenience. + tag_context = TagContext() + tags = tag_context.change_tags(set(['foo']), set()) + self.assertEqual(set(['foo']), tags) + + def test_remove_tag(self): + # change_tags can remove tags from the context. + tag_context = TagContext() + tag_context.change_tags(set(['foo']), set()) + tag_context.change_tags(set(), set(['foo'])) + self.assertEqual(set(), tag_context.get_current_tags()) + + def test_child_context(self): + # A TagContext can have a parent. If so, its tags are the tags of the + # parent at the moment of construction. + parent = TagContext() + parent.change_tags(set(['foo']), set()) + child = TagContext(parent) + self.assertEqual( + parent.get_current_tags(), child.get_current_tags()) + + def test_add_to_child(self): + # Adding a tag to the child context doesn't affect the parent. + parent = TagContext() + parent.change_tags(set(['foo']), set()) + child = TagContext(parent) + child.change_tags(set(['bar']), set()) + self.assertEqual(set(['foo', 'bar']), child.get_current_tags()) + self.assertEqual(set(['foo']), parent.get_current_tags()) + + def test_remove_in_child(self): + # A tag that was in the parent context can be removed from the child + # context without affect the parent. + parent = TagContext() + parent.change_tags(set(['foo']), set()) + child = TagContext(parent) + child.change_tags(set(), set(['foo'])) + self.assertEqual(set(), child.get_current_tags()) + self.assertEqual(set(['foo']), parent.get_current_tags()) + + def test_parent(self): + # The parent can be retrieved from a child context. + parent = TagContext() + parent.change_tags(set(['foo']), set()) + child = TagContext(parent) + child.change_tags(set(), set(['foo'])) + self.assertEqual(parent, child.parent) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py new file mode 100644 index 00000000000..680368db4a1 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py @@ -0,0 +1,1550 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +"""Tests for extensions to the base test library.""" + +from doctest import ELLIPSIS +from pprint import pformat +import sys +import unittest + +from testtools import ( + DecorateTestCaseResult, + ErrorHolder, + MultipleExceptions, + PlaceHolder, + TestCase, + clone_test_with_new_id, + content, + skip, + skipIf, + skipUnless, + testcase, + ) +from testtools.compat import ( + _b, + _u, + ) +from testtools.content import ( + text_content, + TracebackContent, + ) +from testtools.matchers import ( + Annotate, + DocTestMatches, + Equals, + HasLength, + MatchesException, + Raises, + ) +from testtools.testcase import ( + attr, + Nullary, + WithAttributes, + ) +from testtools.testresult.doubles import ( + Python26TestResult, + Python27TestResult, + ExtendedTestResult, + ) +from testtools.tests.helpers import ( + an_exc_info, + FullStackRunTest, + LoggingResult, + ) +try: + exec('from __future__ import with_statement') +except SyntaxError: + pass +else: + from testtools.tests.test_with_with import * + + +class TestPlaceHolder(TestCase): + + run_test_with = FullStackRunTest + + def makePlaceHolder(self, test_id="foo", short_description=None): + return PlaceHolder(test_id, short_description) + + def test_id_comes_from_constructor(self): + # The id() of a PlaceHolder is whatever you pass into the constructor. + test = PlaceHolder("test id") + self.assertEqual("test id", test.id()) + + def test_shortDescription_is_id(self): + # The shortDescription() of a PlaceHolder is the id, by default. + test = PlaceHolder("test id") + self.assertEqual(test.id(), test.shortDescription()) + + def test_shortDescription_specified(self): + # If a shortDescription is provided to the constructor, then + # shortDescription() returns that instead. + test = PlaceHolder("test id", "description") + self.assertEqual("description", test.shortDescription()) + + def test_repr_just_id(self): + # repr(placeholder) shows you how the object was constructed. + test = PlaceHolder("test id") + self.assertEqual( + "<testtools.testcase.PlaceHolder('addSuccess', %s, {})>" % repr( + test.id()), repr(test)) + + def test_repr_with_description(self): + # repr(placeholder) shows you how the object was constructed. + test = PlaceHolder("test id", "description") + self.assertEqual( + "<testtools.testcase.PlaceHolder('addSuccess', %r, {}, %r)>" % ( + test.id(), test.shortDescription()), repr(test)) + + def test_repr_custom_outcome(self): + test = PlaceHolder("test id", outcome='addSkip') + self.assertEqual( + "<testtools.testcase.PlaceHolder('addSkip', %r, {})>" % ( + test.id()), repr(test)) + + def test_counts_as_one_test(self): + # A placeholder test counts as one test. + test = self.makePlaceHolder() + self.assertEqual(1, test.countTestCases()) + + def test_str_is_id(self): + # str(placeholder) is always the id(). We are not barbarians. + test = self.makePlaceHolder() + self.assertEqual(test.id(), str(test)) + + def test_runs_as_success(self): + # When run, a PlaceHolder test records a success. + test = self.makePlaceHolder() + log = [] + test.run(LoggingResult(log)) + self.assertEqual( + [('tags', set(), set()), ('startTest', test), ('addSuccess', test), + ('stopTest', test), ('tags', set(), set()),], + log) + + def test_supplies_details(self): + details = {'quux':None} + test = PlaceHolder('foo', details=details) + result = ExtendedTestResult() + test.run(result) + self.assertEqual( + [('tags', set(), set()), + ('startTest', test), + ('addSuccess', test, details), + ('stopTest', test), + ('tags', set(), set()), + ], + result._events) + + def test_supplies_timestamps(self): + test = PlaceHolder('foo', details={}, timestamps=["A", "B"]) + result = ExtendedTestResult() + test.run(result) + self.assertEqual( + [('time', "A"), + ('tags', set(), set()), + ('startTest', test), + ('time', "B"), + ('addSuccess', test), + ('stopTest', test), + ('tags', set(), set()), + ], + result._events) + + def test_call_is_run(self): + # A PlaceHolder can be called, in which case it behaves like run. + test = self.makePlaceHolder() + run_log = [] + test.run(LoggingResult(run_log)) + call_log = [] + test(LoggingResult(call_log)) + self.assertEqual(run_log, call_log) + + def test_runs_without_result(self): + # A PlaceHolder can be run without a result, in which case there's no + # way to actually get at the result. + self.makePlaceHolder().run() + + def test_debug(self): + # A PlaceHolder can be debugged. + self.makePlaceHolder().debug() + + def test_supports_tags(self): + result = ExtendedTestResult() + tags = set(['foo', 'bar']) + case = PlaceHolder("foo", tags=tags) + case.run(result) + self.assertEqual([ + ('tags', tags, set()), + ('startTest', case), + ('addSuccess', case), + ('stopTest', case), + ('tags', set(), tags), + ], result._events) + + +class TestErrorHolder(TestCase): + # Note that these tests exist because ErrorHolder exists - it could be + # deprecated and dropped at this point. + + run_test_with = FullStackRunTest + + def makeException(self): + try: + raise RuntimeError("danger danger") + except: + return sys.exc_info() + + def makePlaceHolder(self, test_id="foo", error=None, + short_description=None): + if error is None: + error = self.makeException() + return ErrorHolder(test_id, error, short_description) + + def test_id_comes_from_constructor(self): + # The id() of a PlaceHolder is whatever you pass into the constructor. + test = ErrorHolder("test id", self.makeException()) + self.assertEqual("test id", test.id()) + + def test_shortDescription_is_id(self): + # The shortDescription() of a PlaceHolder is the id, by default. + test = ErrorHolder("test id", self.makeException()) + self.assertEqual(test.id(), test.shortDescription()) + + def test_shortDescription_specified(self): + # If a shortDescription is provided to the constructor, then + # shortDescription() returns that instead. + test = ErrorHolder("test id", self.makeException(), "description") + self.assertEqual("description", test.shortDescription()) + + def test_counts_as_one_test(self): + # A placeholder test counts as one test. + test = self.makePlaceHolder() + self.assertEqual(1, test.countTestCases()) + + def test_str_is_id(self): + # str(placeholder) is always the id(). We are not barbarians. + test = self.makePlaceHolder() + self.assertEqual(test.id(), str(test)) + + def test_runs_as_error(self): + # When run, an ErrorHolder test records an error. + error = self.makeException() + test = self.makePlaceHolder(error=error) + result = ExtendedTestResult() + log = result._events + test.run(result) + self.assertEqual( + [('tags', set(), set()), + ('startTest', test), + ('addError', test, test._details), + ('stopTest', test), + ('tags', set(), set())], log) + + def test_call_is_run(self): + # A PlaceHolder can be called, in which case it behaves like run. + test = self.makePlaceHolder() + run_log = [] + test.run(LoggingResult(run_log)) + call_log = [] + test(LoggingResult(call_log)) + self.assertEqual(run_log, call_log) + + def test_runs_without_result(self): + # A PlaceHolder can be run without a result, in which case there's no + # way to actually get at the result. + self.makePlaceHolder().run() + + def test_debug(self): + # A PlaceHolder can be debugged. + self.makePlaceHolder().debug() + + +class TestEquality(TestCase): + """Test ``TestCase``'s equality implementation.""" + + run_test_with = FullStackRunTest + + def test_identicalIsEqual(self): + # TestCase's are equal if they are identical. + self.assertEqual(self, self) + + def test_nonIdenticalInUnequal(self): + # TestCase's are not equal if they are not identical. + self.assertNotEqual(TestCase(methodName='run'), + TestCase(methodName='skip')) + + +class TestAssertions(TestCase): + """Test assertions in TestCase.""" + + run_test_with = FullStackRunTest + + def raiseError(self, exceptionFactory, *args, **kwargs): + raise exceptionFactory(*args, **kwargs) + + def test_formatTypes_single(self): + # Given a single class, _formatTypes returns the name. + class Foo(object): + pass + self.assertEqual('Foo', self._formatTypes(Foo)) + + def test_formatTypes_multiple(self): + # Given multiple types, _formatTypes returns the names joined by + # commas. + class Foo(object): + pass + class Bar(object): + pass + self.assertEqual('Foo, Bar', self._formatTypes([Foo, Bar])) + + def test_assertRaises(self): + # assertRaises asserts that a callable raises a particular exception. + self.assertRaises(RuntimeError, self.raiseError, RuntimeError) + + def test_assertRaises_exception_w_metaclass(self): + # assertRaises works when called for exceptions with custom metaclasses + class MyExMeta(type): + def __init__(cls, name, bases, dct): + """ Do some dummy metaclass stuff """ + dct.update({'answer': 42}) + type.__init__(cls, name, bases, dct) + + class MyEx(Exception): + __metaclass__ = MyExMeta + + self.assertRaises(MyEx, self.raiseError, MyEx) + + def test_assertRaises_fails_when_no_error_raised(self): + # assertRaises raises self.failureException when it's passed a + # callable that raises no error. + ret = ('orange', 42) + self.assertFails( + "<function ...<lambda> at ...> returned ('orange', 42)", + self.assertRaises, RuntimeError, lambda: ret) + + def test_assertRaises_fails_when_different_error_raised(self): + # assertRaises re-raises an exception that it didn't expect. + self.assertThat(lambda: self.assertRaises(RuntimeError, + self.raiseError, ZeroDivisionError), + Raises(MatchesException(ZeroDivisionError))) + + def test_assertRaises_returns_the_raised_exception(self): + # assertRaises returns the exception object that was raised. This is + # useful for testing that exceptions have the right message. + + # This contraption stores the raised exception, so we can compare it + # to the return value of assertRaises. + raisedExceptions = [] + def raiseError(): + try: + raise RuntimeError('Deliberate error') + except RuntimeError: + raisedExceptions.append(sys.exc_info()[1]) + raise + + exception = self.assertRaises(RuntimeError, raiseError) + self.assertEqual(1, len(raisedExceptions)) + self.assertTrue( + exception is raisedExceptions[0], + "%r is not %r" % (exception, raisedExceptions[0])) + + def test_assertRaises_with_multiple_exceptions(self): + # assertRaises((ExceptionOne, ExceptionTwo), function) asserts that + # function raises one of ExceptionTwo or ExceptionOne. + expectedExceptions = (RuntimeError, ZeroDivisionError) + self.assertRaises( + expectedExceptions, self.raiseError, expectedExceptions[0]) + self.assertRaises( + expectedExceptions, self.raiseError, expectedExceptions[1]) + + def test_assertRaises_with_multiple_exceptions_failure_mode(self): + # If assertRaises is called expecting one of a group of exceptions and + # a callable that doesn't raise an exception, then fail with an + # appropriate error message. + expectedExceptions = (RuntimeError, ZeroDivisionError) + self.assertRaises( + self.failureException, + self.assertRaises, expectedExceptions, lambda: None) + self.assertFails('<function ...<lambda> at ...> returned None', + self.assertRaises, expectedExceptions, lambda: None) + + def test_assertRaises_function_repr_in_exception(self): + # When assertRaises fails, it includes the repr of the invoked + # function in the error message, so it's easy to locate the problem. + def foo(): + """An arbitrary function.""" + pass + self.assertThat( + lambda: self.assertRaises(Exception, foo), + Raises( + MatchesException(self.failureException, '.*%r.*' % (foo,)))) + + def assertFails(self, message, function, *args, **kwargs): + """Assert that function raises a failure with the given message.""" + failure = self.assertRaises( + self.failureException, function, *args, **kwargs) + self.assertThat(failure, DocTestMatches(message, ELLIPSIS)) + + def test_assertIn_success(self): + # assertIn(needle, haystack) asserts that 'needle' is in 'haystack'. + self.assertIn(3, range(10)) + self.assertIn('foo', 'foo bar baz') + self.assertIn('foo', 'foo bar baz'.split()) + + def test_assertIn_failure(self): + # assertIn(needle, haystack) fails the test when 'needle' is not in + # 'haystack'. + self.assertFails('3 not in [0, 1, 2]', self.assertIn, 3, [0, 1, 2]) + self.assertFails( + '%r not in %r' % ('qux', 'foo bar baz'), + self.assertIn, 'qux', 'foo bar baz') + + def test_assertNotIn_success(self): + # assertNotIn(needle, haystack) asserts that 'needle' is not in + # 'haystack'. + self.assertNotIn(3, [0, 1, 2]) + self.assertNotIn('qux', 'foo bar baz') + + def test_assertNotIn_failure(self): + # assertNotIn(needle, haystack) fails the test when 'needle' is in + # 'haystack'. + self.assertFails('[1, 2, 3] matches Contains(3)', self.assertNotIn, + 3, [1, 2, 3]) + self.assertFails( + "'foo bar baz' matches Contains('foo')", + self.assertNotIn, 'foo', 'foo bar baz') + + def test_assertIsInstance(self): + # assertIsInstance asserts that an object is an instance of a class. + + class Foo(object): + """Simple class for testing assertIsInstance.""" + + foo = Foo() + self.assertIsInstance(foo, Foo) + + def test_assertIsInstance_multiple_classes(self): + # assertIsInstance asserts that an object is an instance of one of a + # group of classes. + + class Foo(object): + """Simple class for testing assertIsInstance.""" + + class Bar(object): + """Another simple class for testing assertIsInstance.""" + + foo = Foo() + self.assertIsInstance(foo, (Foo, Bar)) + self.assertIsInstance(Bar(), (Foo, Bar)) + + def test_assertIsInstance_failure(self): + # assertIsInstance(obj, klass) fails the test when obj is not an + # instance of klass. + + class Foo(object): + """Simple class for testing assertIsInstance.""" + + self.assertFails( + "'42' is not an instance of %s" % self._formatTypes(Foo), + self.assertIsInstance, 42, Foo) + + def test_assertIsInstance_failure_multiple_classes(self): + # assertIsInstance(obj, (klass1, klass2)) fails the test when obj is + # not an instance of klass1 or klass2. + + class Foo(object): + """Simple class for testing assertIsInstance.""" + + class Bar(object): + """Another simple class for testing assertIsInstance.""" + + self.assertFails( + "'42' is not an instance of any of (%s)" % self._formatTypes([Foo, Bar]), + self.assertIsInstance, 42, (Foo, Bar)) + + def test_assertIsInstance_overridden_message(self): + # assertIsInstance(obj, klass, msg) permits a custom message. + self.assertFails("'42' is not an instance of str: foo", + self.assertIsInstance, 42, str, "foo") + + def test_assertIs(self): + # assertIs asserts that an object is identical to another object. + self.assertIs(None, None) + some_list = [42] + self.assertIs(some_list, some_list) + some_object = object() + self.assertIs(some_object, some_object) + + def test_assertIs_fails(self): + # assertIs raises assertion errors if one object is not identical to + # another. + self.assertFails('None is not 42', self.assertIs, None, 42) + self.assertFails('[42] is not [42]', self.assertIs, [42], [42]) + + def test_assertIs_fails_with_message(self): + # assertIs raises assertion errors if one object is not identical to + # another, and includes a user-supplied message, if it's provided. + self.assertFails( + 'None is not 42: foo bar', self.assertIs, None, 42, 'foo bar') + + def test_assertIsNot(self): + # assertIsNot asserts that an object is not identical to another + # object. + self.assertIsNot(None, 42) + self.assertIsNot([42], [42]) + self.assertIsNot(object(), object()) + + def test_assertIsNot_fails(self): + # assertIsNot raises assertion errors if one object is identical to + # another. + self.assertFails('None matches Is(None)', self.assertIsNot, None, None) + some_list = [42] + self.assertFails( + '[42] matches Is([42])', self.assertIsNot, some_list, some_list) + + def test_assertIsNot_fails_with_message(self): + # assertIsNot raises assertion errors if one object is identical to + # another, and includes a user-supplied message if it's provided. + self.assertFails( + 'None matches Is(None): foo bar', self.assertIsNot, None, None, + "foo bar") + + def test_assertThat_matches_clean(self): + class Matcher(object): + def match(self, foo): + return None + self.assertThat("foo", Matcher()) + + def test_assertThat_mismatch_raises_description(self): + calls = [] + class Mismatch(object): + def __init__(self, thing): + self.thing = thing + def describe(self): + calls.append(('describe_diff', self.thing)) + return "object is not a thing" + def get_details(self): + return {} + class Matcher(object): + def match(self, thing): + calls.append(('match', thing)) + return Mismatch(thing) + def __str__(self): + calls.append(('__str__',)) + return "a description" + class Test(TestCase): + def test(self): + self.assertThat("foo", Matcher()) + result = Test("test").run() + self.assertEqual([ + ('match', "foo"), + ('describe_diff', "foo"), + ], calls) + self.assertFalse(result.wasSuccessful()) + + def test_assertThat_output(self): + matchee = 'foo' + matcher = Equals('bar') + expected = matcher.match(matchee).describe() + self.assertFails(expected, self.assertThat, matchee, matcher) + + def test_assertThat_message_is_annotated(self): + matchee = 'foo' + matcher = Equals('bar') + expected = Annotate('woo', matcher).match(matchee).describe() + self.assertFails(expected, self.assertThat, matchee, matcher, 'woo') + + def test_assertThat_verbose_output(self): + matchee = 'foo' + matcher = Equals('bar') + expected = ( + 'Match failed. Matchee: %r\n' + 'Matcher: %s\n' + 'Difference: %s\n' % ( + matchee, + matcher, + matcher.match(matchee).describe(), + )) + self.assertFails( + expected, self.assertThat, matchee, matcher, verbose=True) + + def test__force_failure_fails_test(self): + class Test(TestCase): + def test_foo(self): + self.force_failure = True + self.remaining_code_run = True + test = Test('test_foo') + result = test.run() + self.assertFalse(result.wasSuccessful()) + self.assertTrue(test.remaining_code_run) + + def get_error_string(self, e): + """Get the string showing how 'e' would be formatted in test output. + + This is a little bit hacky, since it's designed to give consistent + output regardless of Python version. + + In testtools, TestResult._exc_info_to_unicode is the point of dispatch + between various different implementations of methods that format + exceptions, so that's what we have to call. However, that method cares + about stack traces and formats the exception class. We don't care + about either of these, so we take its output and parse it a little. + """ + error = TracebackContent((e.__class__, e, None), self).as_text() + # We aren't at all interested in the traceback. + if error.startswith('Traceback (most recent call last):\n'): + lines = error.splitlines(True)[1:] + for i, line in enumerate(lines): + if not line.startswith(' '): + break + error = ''.join(lines[i:]) + # We aren't interested in how the exception type is formatted. + exc_class, error = error.split(': ', 1) + return error + + def test_assertThat_verbose_unicode(self): + # When assertThat is given matchees or matchers that contain non-ASCII + # unicode strings, we can still provide a meaningful error. + matchee = _u('\xa7') + matcher = Equals(_u('a')) + expected = ( + 'Match failed. Matchee: %s\n' + 'Matcher: %s\n' + 'Difference: %s\n\n' % ( + repr(matchee).replace("\\xa7", matchee), + matcher, + matcher.match(matchee).describe(), + )) + e = self.assertRaises( + self.failureException, self.assertThat, matchee, matcher, + verbose=True) + self.assertEqual(expected, self.get_error_string(e)) + + def test_assertEqual_nice_formatting(self): + message = "These things ought not be equal." + a = ['apple', 'banana', 'cherry'] + b = {'Thatcher': 'One who mends roofs of straw', + 'Major': 'A military officer, ranked below colonel', + 'Blair': 'To shout loudly', + 'Brown': 'The colour of healthy human faeces'} + expected_error = '\n'.join([ + '!=:', + 'reference = %s' % pformat(a), + 'actual = %s' % pformat(b), + ': ' + message, + ]) + self.assertFails(expected_error, self.assertEqual, a, b, message) + self.assertFails(expected_error, self.assertEquals, a, b, message) + self.assertFails(expected_error, self.failUnlessEqual, a, b, message) + + def test_assertEqual_formatting_no_message(self): + a = "cat" + b = "dog" + expected_error = "'cat' != 'dog'" + self.assertFails(expected_error, self.assertEqual, a, b) + self.assertFails(expected_error, self.assertEquals, a, b) + self.assertFails(expected_error, self.failUnlessEqual, a, b) + + def test_assertEqual_non_ascii_str_with_newlines(self): + message = _u("Be careful mixing unicode and bytes") + a = "a\n\xa7\n" + b = "Just a longish string so the more verbose output form is used." + expected_error = '\n'.join([ + '!=:', + "reference = '''\\", + 'a', + repr('\xa7')[1:-1], + "'''", + 'actual = %r' % (b,), + ': ' + message, + ]) + self.assertFails(expected_error, self.assertEqual, a, b, message) + + def test_assertIsNone(self): + self.assertIsNone(None) + + expected_error = 'None is not 0' + self.assertFails(expected_error, self.assertIsNone, 0) + + def test_assertIsNotNone(self): + self.assertIsNotNone(0) + self.assertIsNotNone("0") + + expected_error = 'None matches Is(None)' + self.assertFails(expected_error, self.assertIsNotNone, None) + + + def test_fail_preserves_traceback_detail(self): + class Test(TestCase): + def test(self): + self.addDetail('traceback', text_content('foo')) + self.fail('bar') + test = Test('test') + result = ExtendedTestResult() + test.run(result) + self.assertEqual(set(['traceback', 'traceback-1']), + set(result._events[1][2].keys())) + + +class TestAddCleanup(TestCase): + """Tests for TestCase.addCleanup.""" + + run_test_with = FullStackRunTest + + class LoggingTest(TestCase): + """A test that logs calls to setUp, runTest and tearDown.""" + + def setUp(self): + TestCase.setUp(self) + self._calls = ['setUp'] + + def brokenSetUp(self): + # A tearDown that deliberately fails. + self._calls = ['brokenSetUp'] + raise RuntimeError('Deliberate Failure') + + def runTest(self): + self._calls.append('runTest') + + def brokenTest(self): + raise RuntimeError('Deliberate broken test') + + def tearDown(self): + self._calls.append('tearDown') + TestCase.tearDown(self) + + def setUp(self): + TestCase.setUp(self) + self._result_calls = [] + self.test = TestAddCleanup.LoggingTest('runTest') + self.logging_result = LoggingResult(self._result_calls) + + def assertErrorLogEqual(self, messages): + self.assertEqual(messages, [call[0] for call in self._result_calls]) + + def assertTestLogEqual(self, messages): + """Assert that the call log equals 'messages'.""" + case = self._result_calls[0][1] + self.assertEqual(messages, case._calls) + + def logAppender(self, message): + """A cleanup that appends 'message' to the tests log. + + Cleanups are callables that are added to a test by addCleanup. To + verify that our cleanups run in the right order, we add strings to a + list that acts as a log. This method returns a cleanup that will add + the given message to that log when run. + """ + self.test._calls.append(message) + + def test_fixture(self): + # A normal run of self.test logs 'setUp', 'runTest' and 'tearDown'. + # This test doesn't test addCleanup itself, it just sanity checks the + # fixture. + self.test.run(self.logging_result) + self.assertTestLogEqual(['setUp', 'runTest', 'tearDown']) + + def test_cleanup_run_before_tearDown(self): + # Cleanup functions added with 'addCleanup' are called before tearDown + # runs. + self.test.addCleanup(self.logAppender, 'cleanup') + self.test.run(self.logging_result) + self.assertTestLogEqual(['setUp', 'runTest', 'tearDown', 'cleanup']) + + def test_add_cleanup_called_if_setUp_fails(self): + # Cleanup functions added with 'addCleanup' are called even if setUp + # fails. Note that tearDown has a different behavior: it is only + # called when setUp succeeds. + self.test.setUp = self.test.brokenSetUp + self.test.addCleanup(self.logAppender, 'cleanup') + self.test.run(self.logging_result) + self.assertTestLogEqual(['brokenSetUp', 'cleanup']) + + def test_addCleanup_called_in_reverse_order(self): + # Cleanup functions added with 'addCleanup' are called in reverse + # order. + # + # One of the main uses of addCleanup is to dynamically create + # resources that need some sort of explicit tearDown. Often one + # resource will be created in terms of another, e.g., + # self.first = self.makeFirst() + # self.second = self.makeSecond(self.first) + # + # When this happens, we generally want to clean up the second resource + # before the first one, since the second depends on the first. + self.test.addCleanup(self.logAppender, 'first') + self.test.addCleanup(self.logAppender, 'second') + self.test.run(self.logging_result) + self.assertTestLogEqual( + ['setUp', 'runTest', 'tearDown', 'second', 'first']) + + def test_tearDown_runs_after_cleanup_failure(self): + # tearDown runs even if a cleanup function fails. + self.test.addCleanup(lambda: 1/0) + self.test.run(self.logging_result) + self.assertTestLogEqual(['setUp', 'runTest', 'tearDown']) + + def test_cleanups_continue_running_after_error(self): + # All cleanups are always run, even if one or two of them fail. + self.test.addCleanup(self.logAppender, 'first') + self.test.addCleanup(lambda: 1/0) + self.test.addCleanup(self.logAppender, 'second') + self.test.run(self.logging_result) + self.assertTestLogEqual( + ['setUp', 'runTest', 'tearDown', 'second', 'first']) + + def test_error_in_cleanups_are_captured(self): + # If a cleanup raises an error, we want to record it and fail the the + # test, even though we go on to run other cleanups. + self.test.addCleanup(lambda: 1/0) + self.test.run(self.logging_result) + self.assertErrorLogEqual(['startTest', 'addError', 'stopTest']) + + def test_keyboard_interrupt_not_caught(self): + # If a cleanup raises KeyboardInterrupt, it gets reraised. + def raiseKeyboardInterrupt(): + raise KeyboardInterrupt() + self.test.addCleanup(raiseKeyboardInterrupt) + self.assertThat(lambda:self.test.run(self.logging_result), + Raises(MatchesException(KeyboardInterrupt))) + + def test_all_errors_from_MultipleExceptions_reported(self): + # When a MultipleExceptions exception is caught, all the errors are + # reported. + def raiseMany(): + try: + 1/0 + except Exception: + exc_info1 = sys.exc_info() + try: + 1/0 + except Exception: + exc_info2 = sys.exc_info() + raise MultipleExceptions(exc_info1, exc_info2) + self.test.addCleanup(raiseMany) + self.logging_result = ExtendedTestResult() + self.test.run(self.logging_result) + self.assertEqual(['startTest', 'addError', 'stopTest'], + [event[0] for event in self.logging_result._events]) + self.assertEqual(set(['traceback', 'traceback-1']), + set(self.logging_result._events[1][2].keys())) + + def test_multipleCleanupErrorsReported(self): + # Errors from all failing cleanups are reported as separate backtraces. + self.test.addCleanup(lambda: 1/0) + self.test.addCleanup(lambda: 1/0) + self.logging_result = ExtendedTestResult() + self.test.run(self.logging_result) + self.assertEqual(['startTest', 'addError', 'stopTest'], + [event[0] for event in self.logging_result._events]) + self.assertEqual(set(['traceback', 'traceback-1']), + set(self.logging_result._events[1][2].keys())) + + def test_multipleErrorsCoreAndCleanupReported(self): + # Errors from all failing cleanups are reported, with stopTest, + # startTest inserted. + self.test = TestAddCleanup.LoggingTest('brokenTest') + self.test.addCleanup(lambda: 1/0) + self.test.addCleanup(lambda: 1/0) + self.logging_result = ExtendedTestResult() + self.test.run(self.logging_result) + self.assertEqual(['startTest', 'addError', 'stopTest'], + [event[0] for event in self.logging_result._events]) + self.assertEqual(set(['traceback', 'traceback-1', 'traceback-2']), + set(self.logging_result._events[1][2].keys())) + + +class TestWithDetails(TestCase): + + run_test_with = FullStackRunTest + + def assertDetailsProvided(self, case, expected_outcome, expected_keys): + """Assert that when case is run, details are provided to the result. + + :param case: A TestCase to run. + :param expected_outcome: The call that should be made. + :param expected_keys: The keys to look for. + """ + result = ExtendedTestResult() + case.run(result) + case = result._events[0][1] + expected = [ + ('startTest', case), + (expected_outcome, case), + ('stopTest', case), + ] + self.assertEqual(3, len(result._events)) + self.assertEqual(expected[0], result._events[0]) + self.assertEqual(expected[1], result._events[1][0:2]) + # Checking the TB is right is rather tricky. doctest line matching + # would help, but 'meh'. + self.assertEqual(sorted(expected_keys), + sorted(result._events[1][2].keys())) + self.assertEqual(expected[-1], result._events[-1]) + + def get_content(self): + return content.Content( + content.ContentType("text", "foo"), lambda: [_b('foo')]) + + +class TestExpectedFailure(TestWithDetails): + """Tests for expected failures and unexpected successess.""" + + run_test_with = FullStackRunTest + + def make_unexpected_case(self): + class Case(TestCase): + def test(self): + raise testcase._UnexpectedSuccess + case = Case('test') + return case + + def test_raising__UnexpectedSuccess_py27(self): + case = self.make_unexpected_case() + result = Python27TestResult() + case.run(result) + case = result._events[0][1] + self.assertEqual([ + ('startTest', case), + ('addUnexpectedSuccess', case), + ('stopTest', case), + ], result._events) + + def test_raising__UnexpectedSuccess_extended(self): + case = self.make_unexpected_case() + result = ExtendedTestResult() + case.run(result) + case = result._events[0][1] + self.assertEqual([ + ('startTest', case), + ('addUnexpectedSuccess', case, {}), + ('stopTest', case), + ], result._events) + + def make_xfail_case_xfails(self): + content = self.get_content() + class Case(TestCase): + def test(self): + self.addDetail("foo", content) + self.expectFailure("we are sad", self.assertEqual, + 1, 0) + case = Case('test') + return case + + def make_xfail_case_succeeds(self): + content = self.get_content() + class Case(TestCase): + def test(self): + self.addDetail("foo", content) + self.expectFailure("we are sad", self.assertEqual, + 1, 1) + case = Case('test') + return case + + def test_expectFailure_KnownFailure_extended(self): + case = self.make_xfail_case_xfails() + self.assertDetailsProvided(case, "addExpectedFailure", + ["foo", "traceback", "reason"]) + + def test_expectFailure_KnownFailure_unexpected_success(self): + case = self.make_xfail_case_succeeds() + self.assertDetailsProvided(case, "addUnexpectedSuccess", + ["foo", "reason"]) + + +class TestUniqueFactories(TestCase): + """Tests for getUniqueString and getUniqueInteger.""" + + run_test_with = FullStackRunTest + + def test_getUniqueInteger(self): + # getUniqueInteger returns an integer that increments each time you + # call it. + one = self.getUniqueInteger() + self.assertEqual(1, one) + two = self.getUniqueInteger() + self.assertEqual(2, two) + + def test_getUniqueString(self): + # getUniqueString returns the current test id followed by a unique + # integer. + name_one = self.getUniqueString() + self.assertEqual('%s-%d' % (self.id(), 1), name_one) + name_two = self.getUniqueString() + self.assertEqual('%s-%d' % (self.id(), 2), name_two) + + def test_getUniqueString_prefix(self): + # If getUniqueString is given an argument, it uses that argument as + # the prefix of the unique string, rather than the test id. + name_one = self.getUniqueString('foo') + self.assertThat(name_one, Equals('foo-1')) + name_two = self.getUniqueString('bar') + self.assertThat(name_two, Equals('bar-2')) + + +class TestCloneTestWithNewId(TestCase): + """Tests for clone_test_with_new_id.""" + + run_test_with = FullStackRunTest + + def test_clone_test_with_new_id(self): + class FooTestCase(TestCase): + def test_foo(self): + pass + test = FooTestCase('test_foo') + oldName = test.id() + newName = self.getUniqueString() + newTest = clone_test_with_new_id(test, newName) + self.assertEqual(newName, newTest.id()) + self.assertEqual(oldName, test.id(), + "the original test instance should be unchanged.") + + def test_cloned_testcase_does_not_share_details(self): + """A cloned TestCase does not share the details dict.""" + class Test(TestCase): + def test_foo(self): + self.addDetail( + 'foo', content.Content('text/plain', lambda: 'foo')) + orig_test = Test('test_foo') + cloned_test = clone_test_with_new_id(orig_test, self.getUniqueString()) + orig_test.run(unittest.TestResult()) + self.assertEqual('foo', orig_test.getDetails()['foo'].iter_bytes()) + self.assertEqual(None, cloned_test.getDetails().get('foo')) + + +class TestDetailsProvided(TestWithDetails): + + run_test_with = FullStackRunTest + + def test_addDetail(self): + mycontent = self.get_content() + self.addDetail("foo", mycontent) + details = self.getDetails() + self.assertEqual({"foo": mycontent}, details) + + def test_addError(self): + class Case(TestCase): + def test(this): + this.addDetail("foo", self.get_content()) + 1/0 + self.assertDetailsProvided(Case("test"), "addError", + ["foo", "traceback"]) + + def test_addFailure(self): + class Case(TestCase): + def test(this): + this.addDetail("foo", self.get_content()) + self.fail('yo') + self.assertDetailsProvided(Case("test"), "addFailure", + ["foo", "traceback"]) + + def test_addSkip(self): + class Case(TestCase): + def test(this): + this.addDetail("foo", self.get_content()) + self.skip('yo') + self.assertDetailsProvided(Case("test"), "addSkip", + ["foo", "reason"]) + + def test_addSucccess(self): + class Case(TestCase): + def test(this): + this.addDetail("foo", self.get_content()) + self.assertDetailsProvided(Case("test"), "addSuccess", + ["foo"]) + + def test_addUnexpectedSuccess(self): + class Case(TestCase): + def test(this): + this.addDetail("foo", self.get_content()) + raise testcase._UnexpectedSuccess() + self.assertDetailsProvided(Case("test"), "addUnexpectedSuccess", + ["foo"]) + + def test_addDetails_from_Mismatch(self): + content = self.get_content() + class Mismatch(object): + def describe(self): + return "Mismatch" + def get_details(self): + return {"foo": content} + class Matcher(object): + def match(self, thing): + return Mismatch() + def __str__(self): + return "a description" + class Case(TestCase): + def test(self): + self.assertThat("foo", Matcher()) + self.assertDetailsProvided(Case("test"), "addFailure", + ["foo", "traceback"]) + + def test_multiple_addDetails_from_Mismatch(self): + content = self.get_content() + class Mismatch(object): + def describe(self): + return "Mismatch" + def get_details(self): + return {"foo": content, "bar": content} + class Matcher(object): + def match(self, thing): + return Mismatch() + def __str__(self): + return "a description" + class Case(TestCase): + def test(self): + self.assertThat("foo", Matcher()) + self.assertDetailsProvided(Case("test"), "addFailure", + ["bar", "foo", "traceback"]) + + def test_addDetails_with_same_name_as_key_from_get_details(self): + content = self.get_content() + class Mismatch(object): + def describe(self): + return "Mismatch" + def get_details(self): + return {"foo": content} + class Matcher(object): + def match(self, thing): + return Mismatch() + def __str__(self): + return "a description" + class Case(TestCase): + def test(self): + self.addDetail("foo", content) + self.assertThat("foo", Matcher()) + self.assertDetailsProvided(Case("test"), "addFailure", + ["foo", "foo-1", "traceback"]) + + def test_addDetailUniqueName_works(self): + content = self.get_content() + class Case(TestCase): + def test(self): + self.addDetailUniqueName("foo", content) + self.addDetailUniqueName("foo", content) + self.assertDetailsProvided(Case("test"), "addSuccess", + ["foo", "foo-1"]) + + +class TestSetupTearDown(TestCase): + + run_test_with = FullStackRunTest + + def test_setUpNotCalled(self): + class DoesnotcallsetUp(TestCase): + def setUp(self): + pass + def test_method(self): + pass + result = unittest.TestResult() + DoesnotcallsetUp('test_method').run(result) + self.assertThat(result.errors, HasLength(1)) + self.assertThat(result.errors[0][1], + DocTestMatches( + "...ValueError...File...testtools/tests/test_testcase.py...", + ELLIPSIS)) + + def test_tearDownNotCalled(self): + class DoesnotcalltearDown(TestCase): + def test_method(self): + pass + def tearDown(self): + pass + result = unittest.TestResult() + DoesnotcalltearDown('test_method').run(result) + self.assertThat(result.errors, HasLength(1)) + self.assertThat(result.errors[0][1], + DocTestMatches( + "...ValueError...File...testtools/tests/test_testcase.py...", + ELLIPSIS)) + + +class TestSkipping(TestCase): + """Tests for skipping of tests functionality.""" + + run_test_with = FullStackRunTest + + def test_skip_causes_skipException(self): + self.assertThat(lambda:self.skip("Skip this test"), + Raises(MatchesException(self.skipException))) + + def test_can_use_skipTest(self): + self.assertThat(lambda:self.skipTest("Skip this test"), + Raises(MatchesException(self.skipException))) + + def test_skip_without_reason_works(self): + class Test(TestCase): + def test(self): + raise self.skipException() + case = Test("test") + result = ExtendedTestResult() + case.run(result) + self.assertEqual('addSkip', result._events[1][0]) + self.assertEqual('no reason given.', + result._events[1][2]['reason'].as_text()) + + def test_skipException_in_setup_calls_result_addSkip(self): + class TestThatRaisesInSetUp(TestCase): + def setUp(self): + TestCase.setUp(self) + self.skip("skipping this test") + def test_that_passes(self): + pass + calls = [] + result = LoggingResult(calls) + test = TestThatRaisesInSetUp("test_that_passes") + test.run(result) + case = result._events[0][1] + self.assertEqual([('startTest', case), + ('addSkip', case, "skipping this test"), ('stopTest', case)], + calls) + + def test_skipException_in_test_method_calls_result_addSkip(self): + class SkippingTest(TestCase): + def test_that_raises_skipException(self): + self.skip("skipping this test") + result = Python27TestResult() + test = SkippingTest("test_that_raises_skipException") + test.run(result) + case = result._events[0][1] + self.assertEqual([('startTest', case), + ('addSkip', case, "skipping this test"), ('stopTest', case)], + result._events) + + def test_skip__in_setup_with_old_result_object_calls_addSuccess(self): + class SkippingTest(TestCase): + def setUp(self): + TestCase.setUp(self) + raise self.skipException("skipping this test") + def test_that_raises_skipException(self): + pass + result = Python26TestResult() + test = SkippingTest("test_that_raises_skipException") + test.run(result) + self.assertEqual('addSuccess', result._events[1][0]) + + def test_skip_with_old_result_object_calls_addError(self): + class SkippingTest(TestCase): + def test_that_raises_skipException(self): + raise self.skipException("skipping this test") + result = Python26TestResult() + test = SkippingTest("test_that_raises_skipException") + test.run(result) + self.assertEqual('addSuccess', result._events[1][0]) + + def test_skip_decorator(self): + class SkippingTest(TestCase): + @skip("skipping this test") + def test_that_is_decorated_with_skip(self): + self.fail() + result = Python26TestResult() + test = SkippingTest("test_that_is_decorated_with_skip") + test.run(result) + self.assertEqual('addSuccess', result._events[1][0]) + + def test_skipIf_decorator(self): + class SkippingTest(TestCase): + @skipIf(True, "skipping this test") + def test_that_is_decorated_with_skipIf(self): + self.fail() + result = Python26TestResult() + test = SkippingTest("test_that_is_decorated_with_skipIf") + test.run(result) + self.assertEqual('addSuccess', result._events[1][0]) + + def test_skipUnless_decorator(self): + class SkippingTest(TestCase): + @skipUnless(False, "skipping this test") + def test_that_is_decorated_with_skipUnless(self): + self.fail() + result = Python26TestResult() + test = SkippingTest("test_that_is_decorated_with_skipUnless") + test.run(result) + self.assertEqual('addSuccess', result._events[1][0]) + + +class TestOnException(TestCase): + + run_test_with = FullStackRunTest + + def test_default_works(self): + events = [] + class Case(TestCase): + def method(self): + self.onException(an_exc_info) + events.append(True) + case = Case("method") + case.run() + self.assertThat(events, Equals([True])) + + def test_added_handler_works(self): + events = [] + class Case(TestCase): + def method(self): + self.addOnException(events.append) + self.onException(an_exc_info) + case = Case("method") + case.run() + self.assertThat(events, Equals([an_exc_info])) + + def test_handler_that_raises_is_not_caught(self): + events = [] + class Case(TestCase): + def method(self): + self.addOnException(events.index) + self.assertThat(lambda: self.onException(an_exc_info), + Raises(MatchesException(ValueError))) + case = Case("method") + case.run() + self.assertThat(events, Equals([])) + + +class TestPatchSupport(TestCase): + + run_test_with = FullStackRunTest + + class Case(TestCase): + def test(self): + pass + + def test_patch(self): + # TestCase.patch masks obj.attribute with the new value. + self.foo = 'original' + test = self.Case('test') + test.patch(self, 'foo', 'patched') + self.assertEqual('patched', self.foo) + + def test_patch_restored_after_run(self): + # TestCase.patch masks obj.attribute with the new value, but restores + # the original value after the test is finished. + self.foo = 'original' + test = self.Case('test') + test.patch(self, 'foo', 'patched') + test.run() + self.assertEqual('original', self.foo) + + def test_successive_patches_apply(self): + # TestCase.patch can be called multiple times per test. Each time you + # call it, it overrides the original value. + self.foo = 'original' + test = self.Case('test') + test.patch(self, 'foo', 'patched') + test.patch(self, 'foo', 'second') + self.assertEqual('second', self.foo) + + def test_successive_patches_restored_after_run(self): + # TestCase.patch restores the original value, no matter how many times + # it was called. + self.foo = 'original' + test = self.Case('test') + test.patch(self, 'foo', 'patched') + test.patch(self, 'foo', 'second') + test.run() + self.assertEqual('original', self.foo) + + def test_patch_nonexistent_attribute(self): + # TestCase.patch can be used to patch a non-existent attribute. + test = self.Case('test') + test.patch(self, 'doesntexist', 'patched') + self.assertEqual('patched', self.doesntexist) + + def test_restore_nonexistent_attribute(self): + # TestCase.patch can be used to patch a non-existent attribute, after + # the test run, the attribute is then removed from the object. + test = self.Case('test') + test.patch(self, 'doesntexist', 'patched') + test.run() + marker = object() + value = getattr(self, 'doesntexist', marker) + self.assertIs(marker, value) + + +class TestTestCaseSuper(TestCase): + + run_test_with = FullStackRunTest + + def test_setup_uses_super(self): + class OtherBaseCase(unittest.TestCase): + setup_called = False + def setUp(self): + self.setup_called = True + super(OtherBaseCase, self).setUp() + class OurCase(TestCase, OtherBaseCase): + def runTest(self): + pass + test = OurCase() + test.setUp() + test.tearDown() + self.assertTrue(test.setup_called) + + def test_teardown_uses_super(self): + class OtherBaseCase(unittest.TestCase): + teardown_called = False + def tearDown(self): + self.teardown_called = True + super(OtherBaseCase, self).tearDown() + class OurCase(TestCase, OtherBaseCase): + def runTest(self): + pass + test = OurCase() + test.setUp() + test.tearDown() + self.assertTrue(test.teardown_called) + + +class TestNullary(TestCase): + + def test_repr(self): + # The repr() of nullary is the same as the repr() of the wrapped + # function. + def foo(): + pass + wrapped = Nullary(foo) + self.assertEqual(repr(wrapped), repr(foo)) + + def test_called_with_arguments(self): + # The function is called with the arguments given to Nullary's + # constructor. + l = [] + def foo(*args, **kwargs): + l.append((args, kwargs)) + wrapped = Nullary(foo, 1, 2, a="b") + wrapped() + self.assertEqual(l, [((1, 2), {'a': 'b'})]) + + def test_returns_wrapped(self): + # Calling Nullary returns whatever the function returns. + ret = object() + wrapped = Nullary(lambda: ret) + self.assertIs(ret, wrapped()) + + def test_raises(self): + # If the function raises, so does Nullary when called. + wrapped = Nullary(lambda: 1/0) + self.assertRaises(ZeroDivisionError, wrapped) + + +class TestAttributes(TestCase): + + def test_simple_attr(self): + # Adding an attr to a test changes its id(). + class MyTest(WithAttributes, TestCase): + @attr('foo') + def test_bar(self): + pass + case = MyTest('test_bar') + self.assertEqual('testtools.tests.test_testcase.MyTest.test_bar[foo]', + case.id()) + + def test_multiple_attributes(self): + class MyTest(WithAttributes, TestCase): + # Not sorted here, forward or backwards. + @attr('foo', 'quux', 'bar') + def test_bar(self): + pass + case = MyTest('test_bar') + self.assertEqual( + 'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]', + case.id()) + + def test_multiple_attr_decorators(self): + class MyTest(WithAttributes, TestCase): + # Not sorted here, forward or backwards. + @attr('bar') + @attr('quux') + @attr('foo') + def test_bar(self): + pass + case = MyTest('test_bar') + self.assertEqual( + 'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]', + case.id()) + + +class TestDecorateTestCaseResult(TestCase): + + def setUp(self): + super(TestDecorateTestCaseResult, self).setUp() + self.log = [] + + def make_result(self, result): + self.log.append(('result', result)) + return LoggingResult(self.log) + + def test___call__(self): + case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result) + case(None) + case('something') + self.assertEqual([('result', None), + ('tags', set(), set()), + ('startTest', case.decorated), + ('addSuccess', case.decorated), + ('stopTest', case.decorated), + ('tags', set(), set()), + ('result', 'something'), + ('tags', set(), set()), + ('startTest', case.decorated), + ('addSuccess', case.decorated), + ('stopTest', case.decorated), + ('tags', set(), set()) + ], self.log) + + def test_run(self): + case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result) + case.run(None) + case.run('something') + self.assertEqual([('result', None), + ('tags', set(), set()), + ('startTest', case.decorated), + ('addSuccess', case.decorated), + ('stopTest', case.decorated), + ('tags', set(), set()), + ('result', 'something'), + ('tags', set(), set()), + ('startTest', case.decorated), + ('addSuccess', case.decorated), + ('stopTest', case.decorated), + ('tags', set(), set()) + ], self.log) + + def test_before_after_hooks(self): + case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result, + before_run=lambda result: self.log.append('before'), + after_run=lambda result: self.log.append('after')) + case.run(None) + case(None) + self.assertEqual([ + ('result', None), + 'before', + ('tags', set(), set()), + ('startTest', case.decorated), + ('addSuccess', case.decorated), + ('stopTest', case.decorated), + ('tags', set(), set()), + 'after', + ('result', None), + 'before', + ('tags', set(), set()), + ('startTest', case.decorated), + ('addSuccess', case.decorated), + ('stopTest', case.decorated), + ('tags', set(), set()), + 'after', + ], self.log) + + def test_other_attribute(self): + orig = PlaceHolder('foo') + orig.thing = 'fred' + case = DecorateTestCaseResult(orig, self.make_result) + self.assertEqual('fred', case.thing) + self.assertRaises(AttributeError, getattr, case, 'other') + case.other = 'barbara' + self.assertEqual('barbara', orig.other) + del case.thing + self.assertRaises(AttributeError, getattr, orig, 'thing') + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py new file mode 100644 index 00000000000..04aa0873ccd --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py @@ -0,0 +1,2919 @@ +# Copyright (c) 2008-2012 testtools developers. See LICENSE for details. + +"""Test TestResults and related things.""" + +__metaclass__ = type + +import codecs +import datetime +import doctest +from itertools import chain, combinations +import os +import shutil +import sys +import tempfile +import threading +from unittest import TestSuite +import warnings + +from extras import safe_hasattr, try_imports + +Queue = try_imports(['Queue.Queue', 'queue.Queue']) + +from testtools import ( + CopyStreamResult, + ExtendedToOriginalDecorator, + ExtendedToStreamDecorator, + MultiTestResult, + PlaceHolder, + StreamFailFast, + StreamResult, + StreamResultRouter, + StreamSummary, + StreamTagger, + StreamToDict, + StreamToExtendedDecorator, + StreamToQueue, + Tagger, + TestCase, + TestControl, + TestResult, + TestResultDecorator, + TestByTestResult, + TextTestResult, + ThreadsafeForwardingResult, + TimestampingStreamResult, + testresult, + ) +from testtools.compat import ( + _b, + _get_exception_encoding, + _r, + _u, + advance_iterator, + str_is_unicode, + StringIO, + ) +from testtools.content import ( + Content, + content_from_stream, + text_content, + TracebackContent, + ) +from testtools.content_type import ContentType, UTF8_TEXT +from testtools.matchers import ( + AllMatch, + Contains, + DocTestMatches, + Equals, + HasLength, + MatchesAny, + MatchesException, + Raises, + ) +from testtools.tests.helpers import ( + an_exc_info, + FullStackRunTest, + LoggingResult, + run_with_stack_hidden, + ) +from testtools.testresult.doubles import ( + Python26TestResult, + Python27TestResult, + ExtendedTestResult, + StreamResult as LoggingStreamResult, + ) +from testtools.testresult.real import ( + _details_to_str, + _merge_tags, + utc, + ) + + +def make_erroring_test(): + class Test(TestCase): + def error(self): + 1/0 + return Test("error") + + +def make_failing_test(): + class Test(TestCase): + def failed(self): + self.fail("yo!") + return Test("failed") + + +def make_mismatching_test(): + class Test(TestCase): + def mismatch(self): + self.assertEqual(1, 2) + return Test("mismatch") + + +def make_unexpectedly_successful_test(): + class Test(TestCase): + def succeeded(self): + self.expectFailure("yo!", lambda: None) + return Test("succeeded") + + +def make_test(): + class Test(TestCase): + def test(self): + pass + return Test("test") + + +def make_exception_info(exceptionFactory, *args, **kwargs): + try: + raise exceptionFactory(*args, **kwargs) + except: + return sys.exc_info() + + +class Python26Contract(object): + + def test_fresh_result_is_successful(self): + # A result is considered successful before any tests are run. + result = self.makeResult() + self.assertTrue(result.wasSuccessful()) + + def test_addError_is_failure(self): + # addError fails the test run. + result = self.makeResult() + result.startTest(self) + result.addError(self, an_exc_info) + result.stopTest(self) + self.assertFalse(result.wasSuccessful()) + + def test_addFailure_is_failure(self): + # addFailure fails the test run. + result = self.makeResult() + result.startTest(self) + result.addFailure(self, an_exc_info) + result.stopTest(self) + self.assertFalse(result.wasSuccessful()) + + def test_addSuccess_is_success(self): + # addSuccess does not fail the test run. + result = self.makeResult() + result.startTest(self) + result.addSuccess(self) + result.stopTest(self) + self.assertTrue(result.wasSuccessful()) + + def test_stop_sets_shouldStop(self): + result = self.makeResult() + result.stop() + self.assertTrue(result.shouldStop) + + +class Python27Contract(Python26Contract): + + def test_addExpectedFailure(self): + # Calling addExpectedFailure(test, exc_info) completes ok. + result = self.makeResult() + result.startTest(self) + result.addExpectedFailure(self, an_exc_info) + + def test_addExpectedFailure_is_success(self): + # addExpectedFailure does not fail the test run. + result = self.makeResult() + result.startTest(self) + result.addExpectedFailure(self, an_exc_info) + result.stopTest(self) + self.assertTrue(result.wasSuccessful()) + + def test_addSkipped(self): + # Calling addSkip(test, reason) completes ok. + result = self.makeResult() + result.startTest(self) + result.addSkip(self, _u("Skipped for some reason")) + + def test_addSkip_is_success(self): + # addSkip does not fail the test run. + result = self.makeResult() + result.startTest(self) + result.addSkip(self, _u("Skipped for some reason")) + result.stopTest(self) + self.assertTrue(result.wasSuccessful()) + + def test_addUnexpectedSuccess(self): + # Calling addUnexpectedSuccess(test) completes ok. + result = self.makeResult() + result.startTest(self) + result.addUnexpectedSuccess(self) + + def test_addUnexpectedSuccess_was_successful(self): + # addUnexpectedSuccess does not fail the test run in Python 2.7. + result = self.makeResult() + result.startTest(self) + result.addUnexpectedSuccess(self) + result.stopTest(self) + self.assertTrue(result.wasSuccessful()) + + def test_startStopTestRun(self): + # Calling startTestRun completes ok. + result = self.makeResult() + result.startTestRun() + result.stopTestRun() + + def test_failfast(self): + result = self.makeResult() + result.failfast = True + class Failing(TestCase): + def test_a(self): + self.fail('a') + def test_b(self): + self.fail('b') + TestSuite([Failing('test_a'), Failing('test_b')]).run(result) + self.assertEqual(1, result.testsRun) + + +class TagsContract(Python27Contract): + """Tests to ensure correct tagging behaviour. + + See the subunit docs for guidelines on how this is supposed to work. + """ + + def test_no_tags_by_default(self): + # Results initially have no tags. + result = self.makeResult() + result.startTestRun() + self.assertEqual(frozenset(), result.current_tags) + + def test_adding_tags(self): + # Tags are added using 'tags' and thus become visible in + # 'current_tags'. + result = self.makeResult() + result.startTestRun() + result.tags(set(['foo']), set()) + self.assertEqual(set(['foo']), result.current_tags) + + def test_removing_tags(self): + # Tags are removed using 'tags'. + result = self.makeResult() + result.startTestRun() + result.tags(set(['foo']), set()) + result.tags(set(), set(['foo'])) + self.assertEqual(set(), result.current_tags) + + def test_startTestRun_resets_tags(self): + # startTestRun makes a new test run, and thus clears all the tags. + result = self.makeResult() + result.startTestRun() + result.tags(set(['foo']), set()) + result.startTestRun() + self.assertEqual(set(), result.current_tags) + + def test_add_tags_within_test(self): + # Tags can be added after a test has run. + result = self.makeResult() + result.startTestRun() + result.tags(set(['foo']), set()) + result.startTest(self) + result.tags(set(['bar']), set()) + self.assertEqual(set(['foo', 'bar']), result.current_tags) + + def test_tags_added_in_test_are_reverted(self): + # Tags added during a test run are then reverted once that test has + # finished. + result = self.makeResult() + result.startTestRun() + result.tags(set(['foo']), set()) + result.startTest(self) + result.tags(set(['bar']), set()) + result.addSuccess(self) + result.stopTest(self) + self.assertEqual(set(['foo']), result.current_tags) + + def test_tags_removed_in_test(self): + # Tags can be removed during tests. + result = self.makeResult() + result.startTestRun() + result.tags(set(['foo']), set()) + result.startTest(self) + result.tags(set(), set(['foo'])) + self.assertEqual(set(), result.current_tags) + + def test_tags_removed_in_test_are_restored(self): + # Tags removed during tests are restored once that test has finished. + result = self.makeResult() + result.startTestRun() + result.tags(set(['foo']), set()) + result.startTest(self) + result.tags(set(), set(['foo'])) + result.addSuccess(self) + result.stopTest(self) + self.assertEqual(set(['foo']), result.current_tags) + + +class DetailsContract(TagsContract): + """Tests for the details API of TestResults.""" + + def test_addExpectedFailure_details(self): + # Calling addExpectedFailure(test, details=xxx) completes ok. + result = self.makeResult() + result.startTest(self) + result.addExpectedFailure(self, details={}) + + def test_addError_details(self): + # Calling addError(test, details=xxx) completes ok. + result = self.makeResult() + result.startTest(self) + result.addError(self, details={}) + + def test_addFailure_details(self): + # Calling addFailure(test, details=xxx) completes ok. + result = self.makeResult() + result.startTest(self) + result.addFailure(self, details={}) + + def test_addSkipped_details(self): + # Calling addSkip(test, reason) completes ok. + result = self.makeResult() + result.startTest(self) + result.addSkip(self, details={}) + + def test_addUnexpectedSuccess_details(self): + # Calling addUnexpectedSuccess(test) completes ok. + result = self.makeResult() + result.startTest(self) + result.addUnexpectedSuccess(self, details={}) + + def test_addSuccess_details(self): + # Calling addSuccess(test) completes ok. + result = self.makeResult() + result.startTest(self) + result.addSuccess(self, details={}) + + +class FallbackContract(DetailsContract): + """When we fallback we take our policy choice to map calls. + + For instance, we map unexpectedSuccess to an error code, not to success. + """ + + def test_addUnexpectedSuccess_was_successful(self): + # addUnexpectedSuccess fails test run in testtools. + result = self.makeResult() + result.startTest(self) + result.addUnexpectedSuccess(self) + result.stopTest(self) + self.assertFalse(result.wasSuccessful()) + + +class StartTestRunContract(FallbackContract): + """Defines the contract for testtools policy choices. + + That is things which are not simply extensions to unittest but choices we + have made differently. + """ + + def test_startTestRun_resets_unexpected_success(self): + result = self.makeResult() + result.startTest(self) + result.addUnexpectedSuccess(self) + result.stopTest(self) + result.startTestRun() + self.assertTrue(result.wasSuccessful()) + + def test_startTestRun_resets_failure(self): + result = self.makeResult() + result.startTest(self) + result.addFailure(self, an_exc_info) + result.stopTest(self) + result.startTestRun() + self.assertTrue(result.wasSuccessful()) + + def test_startTestRun_resets_errors(self): + result = self.makeResult() + result.startTest(self) + result.addError(self, an_exc_info) + result.stopTest(self) + result.startTestRun() + self.assertTrue(result.wasSuccessful()) + + +class TestTestResultContract(TestCase, StartTestRunContract): + + run_test_with = FullStackRunTest + + def makeResult(self): + return TestResult() + + +class TestMultiTestResultContract(TestCase, StartTestRunContract): + + run_test_with = FullStackRunTest + + def makeResult(self): + return MultiTestResult(TestResult(), TestResult()) + + +class TestTextTestResultContract(TestCase, StartTestRunContract): + + run_test_with = FullStackRunTest + + def makeResult(self): + return TextTestResult(StringIO()) + + +class TestThreadSafeForwardingResultContract(TestCase, StartTestRunContract): + + run_test_with = FullStackRunTest + + def makeResult(self): + result_semaphore = threading.Semaphore(1) + target = TestResult() + return ThreadsafeForwardingResult(target, result_semaphore) + + +class TestExtendedTestResultContract(TestCase, StartTestRunContract): + + def makeResult(self): + return ExtendedTestResult() + + +class TestPython26TestResultContract(TestCase, Python26Contract): + + def makeResult(self): + return Python26TestResult() + + +class TestAdaptedPython26TestResultContract(TestCase, FallbackContract): + + def makeResult(self): + return ExtendedToOriginalDecorator(Python26TestResult()) + + +class TestPython27TestResultContract(TestCase, Python27Contract): + + def makeResult(self): + return Python27TestResult() + + +class TestAdaptedPython27TestResultContract(TestCase, DetailsContract): + + def makeResult(self): + return ExtendedToOriginalDecorator(Python27TestResult()) + + +class TestAdaptedStreamResult(TestCase, DetailsContract): + + def makeResult(self): + return ExtendedToStreamDecorator(StreamResult()) + + +class TestTestResultDecoratorContract(TestCase, StartTestRunContract): + + run_test_with = FullStackRunTest + + def makeResult(self): + return TestResultDecorator(TestResult()) + + +# DetailsContract because ExtendedToStreamDecorator follows Python for +# uxsuccess handling. +class TestStreamToExtendedContract(TestCase, DetailsContract): + + def makeResult(self): + return ExtendedToStreamDecorator( + StreamToExtendedDecorator(ExtendedTestResult())) + + +class TestStreamResultContract(object): + + def _make_result(self): + raise NotImplementedError(self._make_result) + + def test_startTestRun(self): + result = self._make_result() + result.startTestRun() + result.stopTestRun() + + def test_files(self): + # Test parameter combinations when files are being emitted. + result = self._make_result() + result.startTestRun() + self.addCleanup(result.stopTestRun) + now = datetime.datetime.now(utc) + inputs = list(dict( + eof=True, + mime_type="text/plain", + route_code=_u("1234"), + test_id=_u("foo"), + timestamp=now, + ).items()) + param_dicts = self._power_set(inputs) + for kwargs in param_dicts: + result.status(file_name=_u("foo"), file_bytes=_b(""), **kwargs) + result.status(file_name=_u("foo"), file_bytes=_b("bar"), **kwargs) + + def test_test_status(self): + # Tests non-file attachment parameter combinations. + result = self._make_result() + result.startTestRun() + self.addCleanup(result.stopTestRun) + now = datetime.datetime.now(utc) + args = [[_u("foo"), s] for s in ['exists', 'inprogress', 'xfail', + 'uxsuccess', 'success', 'fail', 'skip']] + inputs = list(dict( + runnable=False, + test_tags=set(['quux']), + route_code=_u("1234"), + timestamp=now, + ).items()) + param_dicts = self._power_set(inputs) + for kwargs in param_dicts: + for arg in args: + result.status(test_id=arg[0], test_status=arg[1], **kwargs) + + def _power_set(self, iterable): + "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" + s = list(iterable) + param_dicts = [] + for ss in chain.from_iterable(combinations(s, r) for r in range(len(s)+1)): + param_dicts.append(dict(ss)) + return param_dicts + + +class TestBaseStreamResultContract(TestCase, TestStreamResultContract): + + def _make_result(self): + return StreamResult() + + +class TestCopyStreamResultContract(TestCase, TestStreamResultContract): + + def _make_result(self): + return CopyStreamResult([StreamResult(), StreamResult()]) + + +class TestDoubleStreamResultContract(TestCase, TestStreamResultContract): + + def _make_result(self): + return LoggingStreamResult() + + +class TestExtendedToStreamDecoratorContract(TestCase, TestStreamResultContract): + + def _make_result(self): + return ExtendedToStreamDecorator(StreamResult()) + + +class TestStreamSummaryResultContract(TestCase, TestStreamResultContract): + + def _make_result(self): + return StreamSummary() + + +class TestStreamTaggerContract(TestCase, TestStreamResultContract): + + def _make_result(self): + return StreamTagger([StreamResult()], add=set(), discard=set()) + + +class TestStreamToDictContract(TestCase, TestStreamResultContract): + + def _make_result(self): + return StreamToDict(lambda x:None) + + +class TestStreamToExtendedDecoratorContract(TestCase, TestStreamResultContract): + + def _make_result(self): + return StreamToExtendedDecorator(ExtendedTestResult()) + + +class TestStreamToQueueContract(TestCase, TestStreamResultContract): + + def _make_result(self): + queue = Queue() + return StreamToQueue(queue, "foo") + + +class TestStreamFailFastContract(TestCase, TestStreamResultContract): + + def _make_result(self): + return StreamFailFast(lambda:None) + + +class TestStreamResultRouterContract(TestCase, TestStreamResultContract): + + def _make_result(self): + return StreamResultRouter(StreamResult()) + + +class TestDoubleStreamResultEvents(TestCase): + + def test_startTestRun(self): + result = LoggingStreamResult() + result.startTestRun() + self.assertEqual([('startTestRun',)], result._events) + + def test_stopTestRun(self): + result = LoggingStreamResult() + result.startTestRun() + result.stopTestRun() + self.assertEqual([('startTestRun',), ('stopTestRun',)], result._events) + + def test_file(self): + result = LoggingStreamResult() + result.startTestRun() + now = datetime.datetime.now(utc) + result.status(file_name="foo", file_bytes="bar", eof=True, mime_type="text/json", + test_id="id", route_code='abc', timestamp=now) + self.assertEqual( + [('startTestRun',), + ('status', 'id', None, None, True, 'foo', 'bar', True, 'text/json', 'abc', now)], + result._events) + + def test_status(self): + result = LoggingStreamResult() + result.startTestRun() + now = datetime.datetime.now(utc) + result.status("foo", "success", test_tags=set(['tag']), + runnable=False, route_code='abc', timestamp=now) + self.assertEqual( + [('startTestRun',), + ('status', 'foo', 'success', set(['tag']), False, None, None, False, None, 'abc', now)], + result._events) + + +class TestCopyStreamResultCopies(TestCase): + + def setUp(self): + super(TestCopyStreamResultCopies, self).setUp() + self.target1 = LoggingStreamResult() + self.target2 = LoggingStreamResult() + self.targets = [self.target1._events, self.target2._events] + self.result = CopyStreamResult([self.target1, self.target2]) + + def test_startTestRun(self): + self.result.startTestRun() + self.assertThat(self.targets, AllMatch(Equals([('startTestRun',)]))) + + def test_stopTestRun(self): + self.result.startTestRun() + self.result.stopTestRun() + self.assertThat(self.targets, + AllMatch(Equals([('startTestRun',), ('stopTestRun',)]))) + + def test_status(self): + self.result.startTestRun() + now = datetime.datetime.now(utc) + self.result.status("foo", "success", test_tags=set(['tag']), + runnable=False, file_name="foo", file_bytes=b'bar', eof=True, + mime_type="text/json", route_code='abc', timestamp=now) + self.assertThat(self.targets, + AllMatch(Equals([('startTestRun',), + ('status', 'foo', 'success', set(['tag']), False, "foo", + b'bar', True, "text/json", 'abc', now) + ]))) + + +class TestStreamTagger(TestCase): + + def test_adding(self): + log = LoggingStreamResult() + result = StreamTagger([log], add=['foo']) + result.startTestRun() + result.status() + result.status(test_tags=set(['bar'])) + result.status(test_tags=None) + result.stopTestRun() + self.assertEqual([ + ('startTestRun',), + ('status', None, None, set(['foo']), True, None, None, False, None, None, None), + ('status', None, None, set(['foo', 'bar']), True, None, None, False, None, None, None), + ('status', None, None, set(['foo']), True, None, None, False, None, None, None), + ('stopTestRun',), + ], log._events) + + def test_discarding(self): + log = LoggingStreamResult() + result = StreamTagger([log], discard=['foo']) + result.startTestRun() + result.status() + result.status(test_tags=None) + result.status(test_tags=set(['foo'])) + result.status(test_tags=set(['bar'])) + result.status(test_tags=set(['foo', 'bar'])) + result.stopTestRun() + self.assertEqual([ + ('startTestRun',), + ('status', None, None, None, True, None, None, False, None, None, None), + ('status', None, None, None, True, None, None, False, None, None, None), + ('status', None, None, None, True, None, None, False, None, None, None), + ('status', None, None, set(['bar']), True, None, None, False, None, None, None), + ('status', None, None, set(['bar']), True, None, None, False, None, None, None), + ('stopTestRun',), + ], log._events) + + +class TestStreamToDict(TestCase): + + def test_hung_test(self): + tests = [] + result = StreamToDict(tests.append) + result.startTestRun() + result.status('foo', 'inprogress') + self.assertEqual([], tests) + result.stopTestRun() + self.assertEqual([ + {'id': 'foo', 'tags': set(), 'details': {}, 'status': 'inprogress', + 'timestamps': [None, None]} + ], tests) + + def test_all_terminal_states_reported(self): + tests = [] + result = StreamToDict(tests.append) + result.startTestRun() + result.status('success', 'success') + result.status('skip', 'skip') + result.status('exists', 'exists') + result.status('fail', 'fail') + result.status('xfail', 'xfail') + result.status('uxsuccess', 'uxsuccess') + self.assertThat(tests, HasLength(6)) + self.assertEqual( + ['success', 'skip', 'exists', 'fail', 'xfail', 'uxsuccess'], + [test['id'] for test in tests]) + result.stopTestRun() + self.assertThat(tests, HasLength(6)) + + def test_files_reported(self): + tests = [] + result = StreamToDict(tests.append) + result.startTestRun() + result.status(file_name="some log.txt", + file_bytes=_b("1234 log message"), eof=True, + mime_type="text/plain; charset=utf8", test_id="foo.bar") + result.status(file_name="another file", + file_bytes=_b("""Traceback..."""), test_id="foo.bar") + result.stopTestRun() + self.assertThat(tests, HasLength(1)) + test = tests[0] + self.assertEqual("foo.bar", test['id']) + self.assertEqual("unknown", test['status']) + details = test['details'] + self.assertEqual( + _u("1234 log message"), details['some log.txt'].as_text()) + self.assertEqual( + _b("Traceback..."), + _b('').join(details['another file'].iter_bytes())) + self.assertEqual( + "application/octet-stream", repr(details['another file'].content_type)) + + def test_bad_mime(self): + # Testtools was making bad mime types, this tests that the specific + # corruption is catered for. + tests = [] + result = StreamToDict(tests.append) + result.startTestRun() + result.status(file_name="file", file_bytes=b'a', + mime_type='text/plain; charset=utf8, language=python', + test_id='id') + result.stopTestRun() + self.assertThat(tests, HasLength(1)) + test = tests[0] + self.assertEqual("id", test['id']) + details = test['details'] + self.assertEqual(_u("a"), details['file'].as_text()) + self.assertEqual( + "text/plain; charset=\"utf8\"", + repr(details['file'].content_type)) + + def test_timestamps(self): + tests = [] + result = StreamToDict(tests.append) + result.startTestRun() + result.status(test_id='foo', test_status='inprogress', timestamp="A") + result.status(test_id='foo', test_status='success', timestamp="B") + result.status(test_id='bar', test_status='inprogress', timestamp="C") + result.stopTestRun() + self.assertThat(tests, HasLength(2)) + self.assertEqual(["A", "B"], tests[0]['timestamps']) + self.assertEqual(["C", None], tests[1]['timestamps']) + + +class TestExtendedToStreamDecorator(TestCase): + + def test_explicit_time(self): + log = LoggingStreamResult() + result = ExtendedToStreamDecorator(log) + result.startTestRun() + now = datetime.datetime.now(utc) + result.time(now) + result.startTest(self) + result.addSuccess(self) + result.stopTest(self) + result.stopTestRun() + self.assertEqual([ + ('startTestRun',), + ('status', + 'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time', + 'inprogress', + None, + True, + None, + None, + False, + None, + None, + now), + ('status', + 'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time', + 'success', + set(), + True, + None, + None, + False, + None, + None, + now), + ('stopTestRun',)], log._events) + + def test_wasSuccessful_after_stopTestRun(self): + log = LoggingStreamResult() + result = ExtendedToStreamDecorator(log) + result.startTestRun() + result.status(test_id='foo', test_status='fail') + result.stopTestRun() + self.assertEqual(False, result.wasSuccessful()) + + +class TestStreamFailFast(TestCase): + + def test_inprogress(self): + result = StreamFailFast(self.fail) + result.status('foo', 'inprogress') + + def test_exists(self): + result = StreamFailFast(self.fail) + result.status('foo', 'exists') + + def test_xfail(self): + result = StreamFailFast(self.fail) + result.status('foo', 'xfail') + + def test_uxsuccess(self): + calls = [] + def hook(): + calls.append("called") + result = StreamFailFast(hook) + result.status('foo', 'uxsuccess') + result.status('foo', 'uxsuccess') + self.assertEqual(['called', 'called'], calls) + + def test_success(self): + result = StreamFailFast(self.fail) + result.status('foo', 'success') + + def test_fail(self): + calls = [] + def hook(): + calls.append("called") + result = StreamFailFast(hook) + result.status('foo', 'fail') + result.status('foo', 'fail') + self.assertEqual(['called', 'called'], calls) + + def test_skip(self): + result = StreamFailFast(self.fail) + result.status('foo', 'skip') + + +class TestStreamSummary(TestCase): + + def test_attributes(self): + result = StreamSummary() + result.startTestRun() + self.assertEqual([], result.failures) + self.assertEqual([], result.errors) + self.assertEqual([], result.skipped) + self.assertEqual([], result.expectedFailures) + self.assertEqual([], result.unexpectedSuccesses) + self.assertEqual(0, result.testsRun) + + def test_startTestRun(self): + result = StreamSummary() + result.startTestRun() + result.failures.append('x') + result.errors.append('x') + result.skipped.append('x') + result.expectedFailures.append('x') + result.unexpectedSuccesses.append('x') + result.testsRun = 1 + result.startTestRun() + self.assertEqual([], result.failures) + self.assertEqual([], result.errors) + self.assertEqual([], result.skipped) + self.assertEqual([], result.expectedFailures) + self.assertEqual([], result.unexpectedSuccesses) + self.assertEqual(0, result.testsRun) + + def test_wasSuccessful(self): + # wasSuccessful returns False if any of + # failures/errors is non-empty. + result = StreamSummary() + result.startTestRun() + self.assertEqual(True, result.wasSuccessful()) + result.failures.append('x') + self.assertEqual(False, result.wasSuccessful()) + result.startTestRun() + result.errors.append('x') + self.assertEqual(False, result.wasSuccessful()) + result.startTestRun() + result.skipped.append('x') + self.assertEqual(True, result.wasSuccessful()) + result.startTestRun() + result.expectedFailures.append('x') + self.assertEqual(True, result.wasSuccessful()) + result.startTestRun() + result.unexpectedSuccesses.append('x') + self.assertEqual(True, result.wasSuccessful()) + + def test_stopTestRun(self): + result = StreamSummary() + # terminal successful codes. + result.startTestRun() + result.status("foo", "inprogress") + result.status("foo", "success") + result.status("bar", "skip") + result.status("baz", "exists") + result.stopTestRun() + self.assertEqual(True, result.wasSuccessful()) + # Existence is terminal but doesn't count as 'running' a test. + self.assertEqual(2, result.testsRun) + + def test_stopTestRun_inprogress_test_fails(self): + # Tests inprogress at stopTestRun trigger a failure. + result = StreamSummary() + result.startTestRun() + result.status("foo", "inprogress") + result.stopTestRun() + self.assertEqual(False, result.wasSuccessful()) + self.assertThat(result.errors, HasLength(1)) + self.assertEqual("foo", result.errors[0][0].id()) + self.assertEqual("Test did not complete", result.errors[0][1]) + # interim state detection handles route codes - while duplicate ids in + # one run is undesirable, it may happen (e.g. with repeated tests). + result.startTestRun() + result.status("foo", "inprogress") + result.status("foo", "inprogress", route_code="A") + result.status("foo", "success", route_code="A") + result.stopTestRun() + self.assertEqual(False, result.wasSuccessful()) + + def test_status_skip(self): + # when skip is seen, a synthetic test is reported with reason captured + # from the 'reason' file attachment if any. + result = StreamSummary() + result.startTestRun() + result.status(file_name="reason", + file_bytes=_b("Missing dependency"), eof=True, + mime_type="text/plain; charset=utf8", test_id="foo.bar") + result.status("foo.bar", "skip") + self.assertThat(result.skipped, HasLength(1)) + self.assertEqual("foo.bar", result.skipped[0][0].id()) + self.assertEqual(_u("Missing dependency"), result.skipped[0][1]) + + def _report_files(self, result): + result.status(file_name="some log.txt", + file_bytes=_b("1234 log message"), eof=True, + mime_type="text/plain; charset=utf8", test_id="foo.bar") + result.status(file_name="traceback", + file_bytes=_b("""Traceback (most recent call last): + File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun + AllMatch(Equals([('startTestRun',), ('stopTestRun',)]))) +testtools.matchers._impl.MismatchError: Differences: [ +[('startTestRun',), ('stopTestRun',)] != [] +[('startTestRun',), ('stopTestRun',)] != [] +] +"""), eof=True, mime_type="text/plain; charset=utf8", test_id="foo.bar") + + files_message = Equals(_u("""some log.txt: {{{1234 log message}}} + +Traceback (most recent call last): + File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun + AllMatch(Equals([('startTestRun',), ('stopTestRun',)]))) +testtools.matchers._impl.MismatchError: Differences: [ +[('startTestRun',), ('stopTestRun',)] != [] +[('startTestRun',), ('stopTestRun',)] != [] +] +""")) + + def test_status_fail(self): + # when fail is seen, a synthetic test is reported with all files + # attached shown as the message. + result = StreamSummary() + result.startTestRun() + self._report_files(result) + result.status("foo.bar", "fail") + self.assertThat(result.errors, HasLength(1)) + self.assertEqual("foo.bar", result.errors[0][0].id()) + self.assertThat(result.errors[0][1], self.files_message) + + def test_status_xfail(self): + # when xfail is seen, a synthetic test is reported with all files + # attached shown as the message. + result = StreamSummary() + result.startTestRun() + self._report_files(result) + result.status("foo.bar", "xfail") + self.assertThat(result.expectedFailures, HasLength(1)) + self.assertEqual("foo.bar", result.expectedFailures[0][0].id()) + self.assertThat(result.expectedFailures[0][1], self.files_message) + + def test_status_uxsuccess(self): + # when uxsuccess is seen, a synthetic test is reported. + result = StreamSummary() + result.startTestRun() + result.status("foo.bar", "uxsuccess") + self.assertThat(result.unexpectedSuccesses, HasLength(1)) + self.assertEqual("foo.bar", result.unexpectedSuccesses[0].id()) + + +class TestTestControl(TestCase): + + def test_default(self): + self.assertEqual(False, TestControl().shouldStop) + + def test_stop(self): + control = TestControl() + control.stop() + self.assertEqual(True, control.shouldStop) + + +class TestTestResult(TestCase): + """Tests for 'TestResult'.""" + + run_tests_with = FullStackRunTest + + def makeResult(self): + """Make an arbitrary result for testing.""" + return TestResult() + + def test_addSkipped(self): + # Calling addSkip on a TestResult records the test that was skipped in + # its skip_reasons dict. + result = self.makeResult() + result.addSkip(self, _u("Skipped for some reason")) + self.assertEqual({_u("Skipped for some reason"):[self]}, + result.skip_reasons) + result.addSkip(self, _u("Skipped for some reason")) + self.assertEqual({_u("Skipped for some reason"):[self, self]}, + result.skip_reasons) + result.addSkip(self, _u("Skipped for another reason")) + self.assertEqual({_u("Skipped for some reason"):[self, self], + _u("Skipped for another reason"):[self]}, + result.skip_reasons) + + def test_now_datetime_now(self): + result = self.makeResult() + olddatetime = testresult.real.datetime + def restore(): + testresult.real.datetime = olddatetime + self.addCleanup(restore) + class Module: + pass + now = datetime.datetime.now(utc) + stubdatetime = Module() + stubdatetime.datetime = Module() + stubdatetime.datetime.now = lambda tz: now + testresult.real.datetime = stubdatetime + # Calling _now() looks up the time. + self.assertEqual(now, result._now()) + then = now + datetime.timedelta(0, 1) + # Set an explicit datetime, which gets returned from then on. + result.time(then) + self.assertNotEqual(now, result._now()) + self.assertEqual(then, result._now()) + # go back to looking it up. + result.time(None) + self.assertEqual(now, result._now()) + + def test_now_datetime_time(self): + result = self.makeResult() + now = datetime.datetime.now(utc) + result.time(now) + self.assertEqual(now, result._now()) + + def test_traceback_formatting_without_stack_hidden(self): + # During the testtools test run, we show our levels of the stack, + # because we want to be able to use our test suite to debug our own + # code. + result = self.makeResult() + test = make_erroring_test() + test.run(result) + self.assertThat( + result.errors[0][1], + DocTestMatches( + 'Traceback (most recent call last):\n' + ' File "...testtools...runtest.py", line ..., in _run_user\n' + ' return fn(*args, **kwargs)\n' + ' File "...testtools...testcase.py", line ..., in _run_test_method\n' + ' return self._get_test_method()()\n' + ' File "...testtools...tests...test_testresult.py", line ..., in error\n' + ' 1/0\n' + 'ZeroDivisionError: ...\n', + doctest.ELLIPSIS | doctest.REPORT_UDIFF)) + + def test_traceback_formatting_with_stack_hidden(self): + result = self.makeResult() + test = make_erroring_test() + run_with_stack_hidden(True, test.run, result) + self.assertThat( + result.errors[0][1], + DocTestMatches( + 'Traceback (most recent call last):\n' + ' File "...testtools...tests...test_testresult.py", line ..., in error\n' + ' 1/0\n' + 'ZeroDivisionError: ...\n', + doctest.ELLIPSIS)) + + def test_traceback_formatting_with_stack_hidden_mismatch(self): + result = self.makeResult() + test = make_mismatching_test() + run_with_stack_hidden(True, test.run, result) + self.assertThat( + result.failures[0][1], + DocTestMatches( + 'Traceback (most recent call last):\n' + ' File "...testtools...tests...test_testresult.py", line ..., in mismatch\n' + ' self.assertEqual(1, 2)\n' + '...MismatchError: 1 != 2\n', + doctest.ELLIPSIS)) + + def test_exc_info_to_unicode(self): + # subunit upcalls to TestResult._exc_info_to_unicode, so we need to + # make sure that it's there. + # + # See <https://bugs.launchpad.net/testtools/+bug/929063>. + test = make_erroring_test() + exc_info = make_exception_info(RuntimeError, "foo") + result = self.makeResult() + text_traceback = result._exc_info_to_unicode(exc_info, test) + self.assertEqual( + TracebackContent(exc_info, test).as_text(), text_traceback) + + +class TestMultiTestResult(TestCase): + """Tests for 'MultiTestResult'.""" + + def setUp(self): + super(TestMultiTestResult, self).setUp() + self.result1 = LoggingResult([]) + self.result2 = LoggingResult([]) + self.multiResult = MultiTestResult(self.result1, self.result2) + + def assertResultLogsEqual(self, expectedEvents): + """Assert that our test results have received the expected events.""" + self.assertEqual(expectedEvents, self.result1._events) + self.assertEqual(expectedEvents, self.result2._events) + + def test_repr(self): + self.assertEqual( + '<MultiTestResult (%r, %r)>' % ( + ExtendedToOriginalDecorator(self.result1), + ExtendedToOriginalDecorator(self.result2)), + repr(self.multiResult)) + + def test_empty(self): + # Initializing a `MultiTestResult` doesn't do anything to its + # `TestResult`s. + self.assertResultLogsEqual([]) + + def test_failfast_get(self): + # Reading reads from the first one - arbitrary choice. + self.assertEqual(False, self.multiResult.failfast) + self.result1.failfast = True + self.assertEqual(True, self.multiResult.failfast) + + def test_failfast_set(self): + # Writing writes to all. + self.multiResult.failfast = True + self.assertEqual(True, self.result1.failfast) + self.assertEqual(True, self.result2.failfast) + + def test_shouldStop(self): + self.assertFalse(self.multiResult.shouldStop) + self.result2.stop() + # NB: result1 is not stopped: MultiTestResult has to combine the + # values. + self.assertTrue(self.multiResult.shouldStop) + + def test_startTest(self): + # Calling `startTest` on a `MultiTestResult` calls `startTest` on all + # its `TestResult`s. + self.multiResult.startTest(self) + self.assertResultLogsEqual([('startTest', self)]) + + def test_stop(self): + self.assertFalse(self.multiResult.shouldStop) + self.multiResult.stop() + self.assertResultLogsEqual(['stop']) + + def test_stopTest(self): + # Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all + # its `TestResult`s. + self.multiResult.stopTest(self) + self.assertResultLogsEqual([('stopTest', self)]) + + def test_addSkipped(self): + # Calling `addSkip` on a `MultiTestResult` calls addSkip on its + # results. + reason = _u("Skipped for some reason") + self.multiResult.addSkip(self, reason) + self.assertResultLogsEqual([('addSkip', self, reason)]) + + def test_addSuccess(self): + # Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on + # all its `TestResult`s. + self.multiResult.addSuccess(self) + self.assertResultLogsEqual([('addSuccess', self)]) + + def test_done(self): + # Calling `done` on a `MultiTestResult` calls `done` on all its + # `TestResult`s. + self.multiResult.done() + self.assertResultLogsEqual([('done')]) + + def test_addFailure(self): + # Calling `addFailure` on a `MultiTestResult` calls `addFailure` on + # all its `TestResult`s. + exc_info = make_exception_info(AssertionError, 'failure') + self.multiResult.addFailure(self, exc_info) + self.assertResultLogsEqual([('addFailure', self, exc_info)]) + + def test_addError(self): + # Calling `addError` on a `MultiTestResult` calls `addError` on all + # its `TestResult`s. + exc_info = make_exception_info(RuntimeError, 'error') + self.multiResult.addError(self, exc_info) + self.assertResultLogsEqual([('addError', self, exc_info)]) + + def test_startTestRun(self): + # Calling `startTestRun` on a `MultiTestResult` forwards to all its + # `TestResult`s. + self.multiResult.startTestRun() + self.assertResultLogsEqual([('startTestRun')]) + + def test_stopTestRun(self): + # Calling `stopTestRun` on a `MultiTestResult` forwards to all its + # `TestResult`s. + self.multiResult.stopTestRun() + self.assertResultLogsEqual([('stopTestRun')]) + + def test_stopTestRun_returns_results(self): + # `MultiTestResult.stopTestRun` returns a tuple of all of the return + # values the `stopTestRun`s that it forwards to. + class Result(LoggingResult): + def stopTestRun(self): + super(Result, self).stopTestRun() + return 'foo' + multi_result = MultiTestResult(Result([]), Result([])) + result = multi_result.stopTestRun() + self.assertEqual(('foo', 'foo'), result) + + def test_tags(self): + # Calling `tags` on a `MultiTestResult` calls `tags` on all its + # `TestResult`s. + added_tags = set(['foo', 'bar']) + removed_tags = set(['eggs']) + self.multiResult.tags(added_tags, removed_tags) + self.assertResultLogsEqual([('tags', added_tags, removed_tags)]) + + def test_time(self): + # the time call is dispatched, not eaten by the base class + self.multiResult.time('foo') + self.assertResultLogsEqual([('time', 'foo')]) + + +class TestTextTestResult(TestCase): + """Tests for 'TextTestResult'.""" + + def setUp(self): + super(TestTextTestResult, self).setUp() + self.result = TextTestResult(StringIO()) + + def getvalue(self): + return self.result.stream.getvalue() + + def test__init_sets_stream(self): + result = TextTestResult("fp") + self.assertEqual("fp", result.stream) + + def reset_output(self): + self.result.stream = StringIO() + + def test_startTestRun(self): + self.result.startTestRun() + self.assertEqual("Tests running...\n", self.getvalue()) + + def test_stopTestRun_count_many(self): + test = make_test() + self.result.startTestRun() + self.result.startTest(test) + self.result.stopTest(test) + self.result.startTest(test) + self.result.stopTest(test) + self.result.stream = StringIO() + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("\nRan 2 tests in ...s\n...", doctest.ELLIPSIS)) + + def test_stopTestRun_count_single(self): + test = make_test() + self.result.startTestRun() + self.result.startTest(test) + self.result.stopTest(test) + self.reset_output() + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("\nRan 1 test in ...s\nOK\n", doctest.ELLIPSIS)) + + def test_stopTestRun_count_zero(self): + self.result.startTestRun() + self.reset_output() + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("\nRan 0 tests in ...s\nOK\n", doctest.ELLIPSIS)) + + def test_stopTestRun_current_time(self): + test = make_test() + now = datetime.datetime.now(utc) + self.result.time(now) + self.result.startTestRun() + self.result.startTest(test) + now = now + datetime.timedelta(0, 0, 0, 1) + self.result.time(now) + self.result.stopTest(test) + self.reset_output() + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS)) + + def test_stopTestRun_successful(self): + self.result.startTestRun() + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("...\nOK\n", doctest.ELLIPSIS)) + + def test_stopTestRun_not_successful_failure(self): + test = make_failing_test() + self.result.startTestRun() + test.run(self.result) + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS)) + + def test_stopTestRun_not_successful_error(self): + test = make_erroring_test() + self.result.startTestRun() + test.run(self.result) + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS)) + + def test_stopTestRun_not_successful_unexpected_success(self): + test = make_unexpectedly_successful_test() + self.result.startTestRun() + test.run(self.result) + self.result.stopTestRun() + self.assertThat(self.getvalue(), + DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS)) + + def test_stopTestRun_shows_details(self): + self.skip("Disabled per bug 1188420") + def run_tests(): + self.result.startTestRun() + make_erroring_test().run(self.result) + make_unexpectedly_successful_test().run(self.result) + make_failing_test().run(self.result) + self.reset_output() + self.result.stopTestRun() + run_with_stack_hidden(True, run_tests) + self.assertThat(self.getvalue(), + DocTestMatches("""...====================================================================== +ERROR: testtools.tests.test_testresult.Test.error +---------------------------------------------------------------------- +Traceback (most recent call last): + File "...testtools...tests...test_testresult.py", line ..., in error + 1/0 +ZeroDivisionError:... divi... by zero... +====================================================================== +FAIL: testtools.tests.test_testresult.Test.failed +---------------------------------------------------------------------- +Traceback (most recent call last): + File "...testtools...tests...test_testresult.py", line ..., in failed + self.fail("yo!") +AssertionError: yo! +====================================================================== +UNEXPECTED SUCCESS: testtools.tests.test_testresult.Test.succeeded +---------------------------------------------------------------------- +...""", doctest.ELLIPSIS | doctest.REPORT_NDIFF)) + + +class TestThreadSafeForwardingResult(TestCase): + """Tests for `TestThreadSafeForwardingResult`.""" + + def make_results(self, n): + events = [] + target = LoggingResult(events) + semaphore = threading.Semaphore(1) + return [ + ThreadsafeForwardingResult(target, semaphore) + for i in range(n)], events + + def test_nonforwarding_methods(self): + # startTest and stopTest are not forwarded because they need to be + # batched. + [result], events = self.make_results(1) + result.startTest(self) + result.stopTest(self) + self.assertEqual([], events) + + def test_tags_not_forwarded(self): + # Tags need to be batched for each test, so they aren't forwarded + # until a test runs. + [result], events = self.make_results(1) + result.tags(set(['foo']), set(['bar'])) + self.assertEqual([], events) + + def test_global_tags_simple(self): + # Tags specified outside of a test result are global. When a test's + # results are finally forwarded, we send through these global tags + # *as* test specific tags, because as a multiplexer there should be no + # way for a global tag on an input stream to affect tests from other + # streams - we can just always issue test local tags. + [result], events = self.make_results(1) + result.tags(set(['foo']), set()) + result.time(1) + result.startTest(self) + result.time(2) + result.addSuccess(self) + self.assertEqual( + [('time', 1), + ('startTest', self), + ('time', 2), + ('tags', set(['foo']), set()), + ('addSuccess', self), + ('stopTest', self), + ], events) + + def test_global_tags_complex(self): + # Multiple calls to tags() in a global context are buffered until the + # next test completes and are issued as part of of the test context, + # because they cannot be issued until the output result is locked. + # The sample data shows them being merged together, this is, strictly + # speaking incidental - they could be issued separately (in-order) and + # still be legitimate. + [result], events = self.make_results(1) + result.tags(set(['foo', 'bar']), set(['baz', 'qux'])) + result.tags(set(['cat', 'qux']), set(['bar', 'dog'])) + result.time(1) + result.startTest(self) + result.time(2) + result.addSuccess(self) + self.assertEqual( + [('time', 1), + ('startTest', self), + ('time', 2), + ('tags', set(['cat', 'foo', 'qux']), set(['dog', 'bar', 'baz'])), + ('addSuccess', self), + ('stopTest', self), + ], events) + + def test_local_tags(self): + # Any tags set within a test context are forwarded in that test + # context when the result is finally forwarded. This means that the + # tags for the test are part of the atomic message communicating + # everything about that test. + [result], events = self.make_results(1) + result.time(1) + result.startTest(self) + result.tags(set(['foo']), set([])) + result.tags(set(), set(['bar'])) + result.time(2) + result.addSuccess(self) + self.assertEqual( + [('time', 1), + ('startTest', self), + ('time', 2), + ('tags', set(['foo']), set(['bar'])), + ('addSuccess', self), + ('stopTest', self), + ], events) + + def test_local_tags_dont_leak(self): + # A tag set during a test is local to that test and is not set during + # the tests that follow. + [result], events = self.make_results(1) + a, b = PlaceHolder('a'), PlaceHolder('b') + result.time(1) + result.startTest(a) + result.tags(set(['foo']), set([])) + result.time(2) + result.addSuccess(a) + result.stopTest(a) + result.time(3) + result.startTest(b) + result.time(4) + result.addSuccess(b) + result.stopTest(b) + self.assertEqual( + [('time', 1), + ('startTest', a), + ('time', 2), + ('tags', set(['foo']), set()), + ('addSuccess', a), + ('stopTest', a), + ('time', 3), + ('startTest', b), + ('time', 4), + ('addSuccess', b), + ('stopTest', b), + ], events) + + def test_startTestRun(self): + # Calls to startTestRun are not batched, because we are only + # interested in sending tests atomically, not the whole run. + [result1, result2], events = self.make_results(2) + result1.startTestRun() + result2.startTestRun() + self.assertEqual(["startTestRun", "startTestRun"], events) + + def test_stopTestRun(self): + # Calls to stopTestRun are not batched, because we are only + # interested in sending tests atomically, not the whole run. + [result1, result2], events = self.make_results(2) + result1.stopTestRun() + result2.stopTestRun() + self.assertEqual(["stopTestRun", "stopTestRun"], events) + + def test_forward_addError(self): + # Once we receive an addError event, we forward all of the events for + # that test, as we now know that test is complete. + [result], events = self.make_results(1) + exc_info = make_exception_info(RuntimeError, 'error') + start_time = datetime.datetime.utcfromtimestamp(1.489) + end_time = datetime.datetime.utcfromtimestamp(51.476) + result.time(start_time) + result.startTest(self) + result.time(end_time) + result.addError(self, exc_info) + self.assertEqual([ + ('time', start_time), + ('startTest', self), + ('time', end_time), + ('addError', self, exc_info), + ('stopTest', self), + ], events) + + def test_forward_addFailure(self): + # Once we receive an addFailure event, we forward all of the events + # for that test, as we now know that test is complete. + [result], events = self.make_results(1) + exc_info = make_exception_info(AssertionError, 'failure') + start_time = datetime.datetime.utcfromtimestamp(2.489) + end_time = datetime.datetime.utcfromtimestamp(3.476) + result.time(start_time) + result.startTest(self) + result.time(end_time) + result.addFailure(self, exc_info) + self.assertEqual([ + ('time', start_time), + ('startTest', self), + ('time', end_time), + ('addFailure', self, exc_info), + ('stopTest', self), + ], events) + + def test_forward_addSkip(self): + # Once we receive an addSkip event, we forward all of the events for + # that test, as we now know that test is complete. + [result], events = self.make_results(1) + reason = _u("Skipped for some reason") + start_time = datetime.datetime.utcfromtimestamp(4.489) + end_time = datetime.datetime.utcfromtimestamp(5.476) + result.time(start_time) + result.startTest(self) + result.time(end_time) + result.addSkip(self, reason) + self.assertEqual([ + ('time', start_time), + ('startTest', self), + ('time', end_time), + ('addSkip', self, reason), + ('stopTest', self), + ], events) + + def test_forward_addSuccess(self): + # Once we receive an addSuccess event, we forward all of the events + # for that test, as we now know that test is complete. + [result], events = self.make_results(1) + start_time = datetime.datetime.utcfromtimestamp(6.489) + end_time = datetime.datetime.utcfromtimestamp(7.476) + result.time(start_time) + result.startTest(self) + result.time(end_time) + result.addSuccess(self) + self.assertEqual([ + ('time', start_time), + ('startTest', self), + ('time', end_time), + ('addSuccess', self), + ('stopTest', self), + ], events) + + def test_only_one_test_at_a_time(self): + # Even if there are multiple ThreadsafeForwardingResults forwarding to + # the same target result, the target result only receives the complete + # events for one test at a time. + [result1, result2], events = self.make_results(2) + test1, test2 = self, make_test() + start_time1 = datetime.datetime.utcfromtimestamp(1.489) + end_time1 = datetime.datetime.utcfromtimestamp(2.476) + start_time2 = datetime.datetime.utcfromtimestamp(3.489) + end_time2 = datetime.datetime.utcfromtimestamp(4.489) + result1.time(start_time1) + result2.time(start_time2) + result1.startTest(test1) + result2.startTest(test2) + result1.time(end_time1) + result2.time(end_time2) + result2.addSuccess(test2) + result1.addSuccess(test1) + self.assertEqual([ + # test2 finishes first, and so is flushed first. + ('time', start_time2), + ('startTest', test2), + ('time', end_time2), + ('addSuccess', test2), + ('stopTest', test2), + # test1 finishes next, and thus follows. + ('time', start_time1), + ('startTest', test1), + ('time', end_time1), + ('addSuccess', test1), + ('stopTest', test1), + ], events) + + +class TestMergeTags(TestCase): + + def test_merge_unseen_gone_tag(self): + # If an incoming "gone" tag isn't currently tagged one way or the + # other, add it to the "gone" tags. + current_tags = set(['present']), set(['missing']) + changing_tags = set(), set(['going']) + expected = set(['present']), set(['missing', 'going']) + self.assertEqual( + expected, _merge_tags(current_tags, changing_tags)) + + def test_merge_incoming_gone_tag_with_current_new_tag(self): + # If one of the incoming "gone" tags is one of the existing "new" + # tags, then it overrides the "new" tag, leaving it marked as "gone". + current_tags = set(['present', 'going']), set(['missing']) + changing_tags = set(), set(['going']) + expected = set(['present']), set(['missing', 'going']) + self.assertEqual( + expected, _merge_tags(current_tags, changing_tags)) + + def test_merge_unseen_new_tag(self): + current_tags = set(['present']), set(['missing']) + changing_tags = set(['coming']), set() + expected = set(['coming', 'present']), set(['missing']) + self.assertEqual( + expected, _merge_tags(current_tags, changing_tags)) + + def test_merge_incoming_new_tag_with_current_gone_tag(self): + # If one of the incoming "new" tags is currently marked as "gone", + # then it overrides the "gone" tag, leaving it marked as "new". + current_tags = set(['present']), set(['coming', 'missing']) + changing_tags = set(['coming']), set() + expected = set(['coming', 'present']), set(['missing']) + self.assertEqual( + expected, _merge_tags(current_tags, changing_tags)) + + +class TestStreamResultRouter(TestCase): + + def test_start_stop_test_run_no_fallback(self): + result = StreamResultRouter() + result.startTestRun() + result.stopTestRun() + + def test_no_fallback_errors(self): + self.assertRaises(Exception, StreamResultRouter().status, test_id='f') + + def test_fallback_calls(self): + fallback = LoggingStreamResult() + result = StreamResultRouter(fallback) + result.startTestRun() + result.status(test_id='foo') + result.stopTestRun() + self.assertEqual([ + ('startTestRun',), + ('status', 'foo', None, None, True, None, None, False, None, None, + None), + ('stopTestRun',), + ], + fallback._events) + + def test_fallback_no_do_start_stop_run(self): + fallback = LoggingStreamResult() + result = StreamResultRouter(fallback, do_start_stop_run=False) + result.startTestRun() + result.status(test_id='foo') + result.stopTestRun() + self.assertEqual([ + ('status', 'foo', None, None, True, None, None, False, None, None, + None) + ], + fallback._events) + + def test_add_rule_bad_policy(self): + router = StreamResultRouter() + target = LoggingStreamResult() + self.assertRaises(ValueError, router.add_rule, target, 'route_code_prefixa', + route_prefix='0') + + def test_add_rule_extra_policy_arg(self): + router = StreamResultRouter() + target = LoggingStreamResult() + self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix', + route_prefix='0', foo=1) + + def test_add_rule_missing_prefix(self): + router = StreamResultRouter() + target = LoggingStreamResult() + self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix') + + def test_add_rule_slash_in_prefix(self): + router = StreamResultRouter() + target = LoggingStreamResult() + self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix', + route_prefix='0/') + + def test_add_rule_route_code_consume_False(self): + fallback = LoggingStreamResult() + target = LoggingStreamResult() + router = StreamResultRouter(fallback) + router.add_rule(target, 'route_code_prefix', route_prefix='0') + router.status(test_id='foo', route_code='0') + router.status(test_id='foo', route_code='0/1') + router.status(test_id='foo') + self.assertEqual([ + ('status', 'foo', None, None, True, None, None, False, None, '0', + None), + ('status', 'foo', None, None, True, None, None, False, None, '0/1', + None), + ], + target._events) + self.assertEqual([ + ('status', 'foo', None, None, True, None, None, False, None, None, + None), + ], + fallback._events) + + def test_add_rule_route_code_consume_True(self): + fallback = LoggingStreamResult() + target = LoggingStreamResult() + router = StreamResultRouter(fallback) + router.add_rule( + target, 'route_code_prefix', route_prefix='0', consume_route=True) + router.status(test_id='foo', route_code='0') # -> None + router.status(test_id='foo', route_code='0/1') # -> 1 + router.status(test_id='foo', route_code='1') # -> fallback as-is. + self.assertEqual([ + ('status', 'foo', None, None, True, None, None, False, None, None, + None), + ('status', 'foo', None, None, True, None, None, False, None, '1', + None), + ], + target._events) + self.assertEqual([ + ('status', 'foo', None, None, True, None, None, False, None, '1', + None), + ], + fallback._events) + + def test_add_rule_test_id(self): + nontest = LoggingStreamResult() + test = LoggingStreamResult() + router = StreamResultRouter(test) + router.add_rule(nontest, 'test_id', test_id=None) + router.status(test_id='foo', file_name="bar", file_bytes=b'') + router.status(file_name="bar", file_bytes=b'') + self.assertEqual([ + ('status', 'foo', None, None, True, 'bar', b'', False, None, None, + None),], test._events) + self.assertEqual([ + ('status', None, None, None, True, 'bar', b'', False, None, None, + None),], nontest._events) + + def test_add_rule_do_start_stop_run(self): + nontest = LoggingStreamResult() + router = StreamResultRouter() + router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True) + router.startTestRun() + router.stopTestRun() + self.assertEqual([ + ('startTestRun',), + ('stopTestRun',), + ], nontest._events) + + def test_add_rule_do_start_stop_run_after_startTestRun(self): + nontest = LoggingStreamResult() + router = StreamResultRouter() + router.startTestRun() + router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True) + router.stopTestRun() + self.assertEqual([ + ('startTestRun',), + ('stopTestRun',), + ], nontest._events) + + +class TestStreamToQueue(TestCase): + + def make_result(self): + queue = Queue() + return queue, StreamToQueue(queue, "foo") + + def test_status(self): + def check_event(event_dict, route=None, time=None): + self.assertEqual("status", event_dict['event']) + self.assertEqual("test", event_dict['test_id']) + self.assertEqual("fail", event_dict['test_status']) + self.assertEqual(set(["quux"]), event_dict['test_tags']) + self.assertEqual(False, event_dict['runnable']) + self.assertEqual("file", event_dict['file_name']) + self.assertEqual(_b("content"), event_dict['file_bytes']) + self.assertEqual(True, event_dict['eof']) + self.assertEqual("quux", event_dict['mime_type']) + self.assertEqual("test", event_dict['test_id']) + self.assertEqual(route, event_dict['route_code']) + self.assertEqual(time, event_dict['timestamp']) + queue, result = self.make_result() + result.status("test", "fail", test_tags=set(["quux"]), runnable=False, + file_name="file", file_bytes=_b("content"), eof=True, + mime_type="quux", route_code=None, timestamp=None) + self.assertEqual(1, queue.qsize()) + a_time = datetime.datetime.now(utc) + result.status("test", "fail", test_tags=set(["quux"]), runnable=False, + file_name="file", file_bytes=_b("content"), eof=True, + mime_type="quux", route_code="bar", timestamp=a_time) + self.assertEqual(2, queue.qsize()) + check_event(queue.get(False), route="foo", time=None) + check_event(queue.get(False), route="foo/bar", time=a_time) + + def testStartTestRun(self): + queue, result = self.make_result() + result.startTestRun() + self.assertEqual( + {'event':'startTestRun', 'result':result}, queue.get(False)) + self.assertTrue(queue.empty()) + + def testStopTestRun(self): + queue, result = self.make_result() + result.stopTestRun() + self.assertEqual( + {'event':'stopTestRun', 'result':result}, queue.get(False)) + self.assertTrue(queue.empty()) + + +class TestExtendedToOriginalResultDecoratorBase(TestCase): + + def make_26_result(self): + self.result = Python26TestResult() + self.make_converter() + + def make_27_result(self): + self.result = Python27TestResult() + self.make_converter() + + def make_converter(self): + self.converter = ExtendedToOriginalDecorator(self.result) + + def make_extended_result(self): + self.result = ExtendedTestResult() + self.make_converter() + + def check_outcome_details(self, outcome): + """Call an outcome with a details dict to be passed through.""" + # This dict is /not/ convertible - thats deliberate, as it should + # not hit the conversion code path. + details = {'foo': 'bar'} + getattr(self.converter, outcome)(self, details=details) + self.assertEqual([(outcome, self, details)], self.result._events) + + def get_details_and_string(self): + """Get a details dict and expected string.""" + text1 = lambda: [_b("1\n2\n")] + text2 = lambda: [_b("3\n4\n")] + bin1 = lambda: [_b("5\n")] + details = {'text 1': Content(ContentType('text', 'plain'), text1), + 'text 2': Content(ContentType('text', 'strange'), text2), + 'bin 1': Content(ContentType('application', 'binary'), bin1)} + return (details, + ("Binary content:\n" + " bin 1 (application/binary)\n" + "\n" + "text 1: {{{\n" + "1\n" + "2\n" + "}}}\n" + "\n" + "text 2: {{{\n" + "3\n" + "4\n" + "}}}\n")) + + def check_outcome_details_to_exec_info(self, outcome, expected=None): + """Call an outcome with a details dict to be made into exc_info.""" + # The conversion is a done using RemoteError and the string contents + # of the text types in the details dict. + if not expected: + expected = outcome + details, err_str = self.get_details_and_string() + getattr(self.converter, outcome)(self, details=details) + err = self.converter._details_to_exc_info(details) + self.assertEqual([(expected, self, err)], self.result._events) + + def check_outcome_details_to_nothing(self, outcome, expected=None): + """Call an outcome with a details dict to be swallowed.""" + if not expected: + expected = outcome + details = {'foo': 'bar'} + getattr(self.converter, outcome)(self, details=details) + self.assertEqual([(expected, self)], self.result._events) + + def check_outcome_details_to_string(self, outcome): + """Call an outcome with a details dict to be stringified.""" + details, err_str = self.get_details_and_string() + getattr(self.converter, outcome)(self, details=details) + self.assertEqual([(outcome, self, err_str)], self.result._events) + + def check_outcome_details_to_arg(self, outcome, arg, extra_detail=None): + """Call an outcome with a details dict to have an arg extracted.""" + details, _ = self.get_details_and_string() + if extra_detail: + details.update(extra_detail) + getattr(self.converter, outcome)(self, details=details) + self.assertEqual([(outcome, self, arg)], self.result._events) + + def check_outcome_exc_info(self, outcome, expected=None): + """Check that calling a legacy outcome still works.""" + # calling some outcome with the legacy exc_info style api (no keyword + # parameters) gets passed through. + if not expected: + expected = outcome + err = sys.exc_info() + getattr(self.converter, outcome)(self, err) + self.assertEqual([(expected, self, err)], self.result._events) + + def check_outcome_exc_info_to_nothing(self, outcome, expected=None): + """Check that calling a legacy outcome on a fallback works.""" + # calling some outcome with the legacy exc_info style api (no keyword + # parameters) gets passed through. + if not expected: + expected = outcome + err = sys.exc_info() + getattr(self.converter, outcome)(self, err) + self.assertEqual([(expected, self)], self.result._events) + + def check_outcome_nothing(self, outcome, expected=None): + """Check that calling a legacy outcome still works.""" + if not expected: + expected = outcome + getattr(self.converter, outcome)(self) + self.assertEqual([(expected, self)], self.result._events) + + def check_outcome_string_nothing(self, outcome, expected): + """Check that calling outcome with a string calls expected.""" + getattr(self.converter, outcome)(self, "foo") + self.assertEqual([(expected, self)], self.result._events) + + def check_outcome_string(self, outcome): + """Check that calling outcome with a string works.""" + getattr(self.converter, outcome)(self, "foo") + self.assertEqual([(outcome, self, "foo")], self.result._events) + + +class TestExtendedToOriginalResultDecorator( + TestExtendedToOriginalResultDecoratorBase): + + def test_failfast_py26(self): + self.make_26_result() + self.assertEqual(False, self.converter.failfast) + self.converter.failfast = True + self.assertFalse(safe_hasattr(self.converter.decorated, 'failfast')) + + def test_failfast_py27(self): + self.make_27_result() + self.assertEqual(False, self.converter.failfast) + # setting it should write it to the backing result + self.converter.failfast = True + self.assertEqual(True, self.converter.decorated.failfast) + + def test_progress_py26(self): + self.make_26_result() + self.converter.progress(1, 2) + + def test_progress_py27(self): + self.make_27_result() + self.converter.progress(1, 2) + + def test_progress_pyextended(self): + self.make_extended_result() + self.converter.progress(1, 2) + self.assertEqual([('progress', 1, 2)], self.result._events) + + def test_shouldStop(self): + self.make_26_result() + self.assertEqual(False, self.converter.shouldStop) + self.converter.decorated.stop() + self.assertEqual(True, self.converter.shouldStop) + + def test_startTest_py26(self): + self.make_26_result() + self.converter.startTest(self) + self.assertEqual([('startTest', self)], self.result._events) + + def test_startTest_py27(self): + self.make_27_result() + self.converter.startTest(self) + self.assertEqual([('startTest', self)], self.result._events) + + def test_startTest_pyextended(self): + self.make_extended_result() + self.converter.startTest(self) + self.assertEqual([('startTest', self)], self.result._events) + + def test_startTestRun_py26(self): + self.make_26_result() + self.converter.startTestRun() + self.assertEqual([], self.result._events) + + def test_startTestRun_py27(self): + self.make_27_result() + self.converter.startTestRun() + self.assertEqual([('startTestRun',)], self.result._events) + + def test_startTestRun_pyextended(self): + self.make_extended_result() + self.converter.startTestRun() + self.assertEqual([('startTestRun',)], self.result._events) + + def test_stopTest_py26(self): + self.make_26_result() + self.converter.stopTest(self) + self.assertEqual([('stopTest', self)], self.result._events) + + def test_stopTest_py27(self): + self.make_27_result() + self.converter.stopTest(self) + self.assertEqual([('stopTest', self)], self.result._events) + + def test_stopTest_pyextended(self): + self.make_extended_result() + self.converter.stopTest(self) + self.assertEqual([('stopTest', self)], self.result._events) + + def test_stopTestRun_py26(self): + self.make_26_result() + self.converter.stopTestRun() + self.assertEqual([], self.result._events) + + def test_stopTestRun_py27(self): + self.make_27_result() + self.converter.stopTestRun() + self.assertEqual([('stopTestRun',)], self.result._events) + + def test_stopTestRun_pyextended(self): + self.make_extended_result() + self.converter.stopTestRun() + self.assertEqual([('stopTestRun',)], self.result._events) + + def test_tags_py26(self): + self.make_26_result() + self.converter.tags(set([1]), set([2])) + + def test_tags_py27(self): + self.make_27_result() + self.converter.tags(set([1]), set([2])) + + def test_tags_pyextended(self): + self.make_extended_result() + self.converter.tags(set([1]), set([2])) + self.assertEqual([('tags', set([1]), set([2]))], self.result._events) + + def test_time_py26(self): + self.make_26_result() + self.converter.time(1) + + def test_time_py27(self): + self.make_27_result() + self.converter.time(1) + + def test_time_pyextended(self): + self.make_extended_result() + self.converter.time(1) + self.assertEqual([('time', 1)], self.result._events) + + +class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase): + + outcome = 'addError' + + def test_outcome_Original_py26(self): + self.make_26_result() + self.check_outcome_exc_info(self.outcome) + + def test_outcome_Original_py27(self): + self.make_27_result() + self.check_outcome_exc_info(self.outcome) + + def test_outcome_Original_pyextended(self): + self.make_extended_result() + self.check_outcome_exc_info(self.outcome) + + def test_outcome_Extended_py26(self): + self.make_26_result() + self.check_outcome_details_to_exec_info(self.outcome) + + def test_outcome_Extended_py27(self): + self.make_27_result() + self.check_outcome_details_to_exec_info(self.outcome) + + def test_outcome_Extended_pyextended(self): + self.make_extended_result() + self.check_outcome_details(self.outcome) + + def test_outcome__no_details(self): + self.make_extended_result() + self.assertThat( + lambda: getattr(self.converter, self.outcome)(self), + Raises(MatchesException(ValueError))) + + +class TestExtendedToOriginalAddFailure( + TestExtendedToOriginalAddError): + + outcome = 'addFailure' + + +class TestExtendedToOriginalAddExpectedFailure( + TestExtendedToOriginalAddError): + + outcome = 'addExpectedFailure' + + def test_outcome_Original_py26(self): + self.make_26_result() + self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess') + + def test_outcome_Extended_py26(self): + self.make_26_result() + self.check_outcome_details_to_nothing(self.outcome, 'addSuccess') + + + +class TestExtendedToOriginalAddSkip( + TestExtendedToOriginalResultDecoratorBase): + + outcome = 'addSkip' + + def test_outcome_Original_py26(self): + self.make_26_result() + self.check_outcome_string_nothing(self.outcome, 'addSuccess') + + def test_outcome_Original_py27(self): + self.make_27_result() + self.check_outcome_string(self.outcome) + + def test_outcome_Original_pyextended(self): + self.make_extended_result() + self.check_outcome_string(self.outcome) + + def test_outcome_Extended_py26(self): + self.make_26_result() + self.check_outcome_string_nothing(self.outcome, 'addSuccess') + + def test_outcome_Extended_py27_no_reason(self): + self.make_27_result() + self.check_outcome_details_to_string(self.outcome) + + def test_outcome_Extended_py27_reason(self): + self.make_27_result() + self.check_outcome_details_to_arg(self.outcome, 'foo', + {'reason': Content(UTF8_TEXT, lambda:[_b('foo')])}) + + def test_outcome_Extended_pyextended(self): + self.make_extended_result() + self.check_outcome_details(self.outcome) + + def test_outcome__no_details(self): + self.make_extended_result() + self.assertThat( + lambda: getattr(self.converter, self.outcome)(self), + Raises(MatchesException(ValueError))) + + +class TestExtendedToOriginalAddSuccess( + TestExtendedToOriginalResultDecoratorBase): + + outcome = 'addSuccess' + expected = 'addSuccess' + + def test_outcome_Original_py26(self): + self.make_26_result() + self.check_outcome_nothing(self.outcome, self.expected) + + def test_outcome_Original_py27(self): + self.make_27_result() + self.check_outcome_nothing(self.outcome) + + def test_outcome_Original_pyextended(self): + self.make_extended_result() + self.check_outcome_nothing(self.outcome) + + def test_outcome_Extended_py26(self): + self.make_26_result() + self.check_outcome_details_to_nothing(self.outcome, self.expected) + + def test_outcome_Extended_py27(self): + self.make_27_result() + self.check_outcome_details_to_nothing(self.outcome) + + def test_outcome_Extended_pyextended(self): + self.make_extended_result() + self.check_outcome_details(self.outcome) + + +class TestExtendedToOriginalAddUnexpectedSuccess( + TestExtendedToOriginalResultDecoratorBase): + + outcome = 'addUnexpectedSuccess' + expected = 'addFailure' + + def test_outcome_Original_py26(self): + self.make_26_result() + getattr(self.converter, self.outcome)(self) + [event] = self.result._events + self.assertEqual((self.expected, self), event[:2]) + + def test_outcome_Original_py27(self): + self.make_27_result() + self.check_outcome_nothing(self.outcome) + + def test_outcome_Original_pyextended(self): + self.make_extended_result() + self.check_outcome_nothing(self.outcome) + + def test_outcome_Extended_py26(self): + self.make_26_result() + getattr(self.converter, self.outcome)(self) + [event] = self.result._events + self.assertEqual((self.expected, self), event[:2]) + + def test_outcome_Extended_py27(self): + self.make_27_result() + self.check_outcome_details_to_nothing(self.outcome) + + def test_outcome_Extended_pyextended(self): + self.make_extended_result() + self.check_outcome_details(self.outcome) + + +class TestExtendedToOriginalResultOtherAttributes( + TestExtendedToOriginalResultDecoratorBase): + + def test_other_attribute(self): + class OtherExtendedResult: + def foo(self): + return 2 + bar = 1 + self.result = OtherExtendedResult() + self.make_converter() + self.assertEqual(1, self.converter.bar) + self.assertEqual(2, self.converter.foo()) + + +class TestNonAsciiResults(TestCase): + """Test all kinds of tracebacks are cleanly interpreted as unicode + + Currently only uses weak "contains" assertions, would be good to be much + stricter about the expected output. This would add a few failures for the + current release of IronPython for instance, which gets some traceback + lines muddled. + """ + + _sample_texts = ( + _u("pa\u026a\u03b8\u0259n"), # Unicode encodings only + _u("\u5357\u7121"), # In ISO 2022 encodings + _u("\xa7\xa7\xa7"), # In ISO 8859 encodings + ) + + _is_pypy = "__pypy__" in sys.builtin_module_names + # Everything but Jython shows syntax errors on the current character + _error_on_character = os.name != "java" and not _is_pypy + + def _run(self, stream, test): + """Run the test, the same as in testtools.run but not to stdout""" + result = TextTestResult(stream) + result.startTestRun() + try: + return test.run(result) + finally: + result.stopTestRun() + + def _write_module(self, name, encoding, contents): + """Create Python module on disk with contents in given encoding""" + try: + # Need to pre-check that the coding is valid or codecs.open drops + # the file without closing it which breaks non-refcounted pythons + codecs.lookup(encoding) + except LookupError: + self.skip("Encoding unsupported by implementation: %r" % encoding) + f = codecs.open(os.path.join(self.dir, name + ".py"), "w", encoding) + try: + f.write(contents) + finally: + f.close() + + def _test_external_case(self, testline, coding="ascii", modulelevel="", + suffix=""): + """Create and run a test case in a seperate module""" + self._setup_external_case(testline, coding, modulelevel, suffix) + return self._run_external_case() + + def _setup_external_case(self, testline, coding="ascii", modulelevel="", + suffix=""): + """Create a test case in a seperate module""" + _, prefix, self.modname = self.id().rsplit(".", 2) + self.dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix) + self.addCleanup(shutil.rmtree, self.dir) + self._write_module(self.modname, coding, + # Older Python 2 versions don't see a coding declaration in a + # docstring so it has to be in a comment, but then we can't + # workaround bug: <http://ironpython.codeplex.com/workitem/26940> + "# coding: %s\n" + "import testtools\n" + "%s\n" + "class Test(testtools.TestCase):\n" + " def runTest(self):\n" + " %s\n" % (coding, modulelevel, testline)) + + def _run_external_case(self): + """Run the prepared test case in a seperate module""" + sys.path.insert(0, self.dir) + self.addCleanup(sys.path.remove, self.dir) + module = __import__(self.modname) + self.addCleanup(sys.modules.pop, self.modname) + stream = StringIO() + self._run(stream, module.Test()) + return stream.getvalue() + + def _silence_deprecation_warnings(self): + """Shut up DeprecationWarning for this test only""" + warnings.simplefilter("ignore", DeprecationWarning) + self.addCleanup(warnings.filters.remove, warnings.filters[0]) + + def _get_sample_text(self, encoding="unicode_internal"): + if encoding is None and str_is_unicode: + encoding = "unicode_internal" + for u in self._sample_texts: + try: + b = u.encode(encoding) + if u == b.decode(encoding): + if str_is_unicode: + return u, u + return u, b + except (LookupError, UnicodeError): + pass + self.skip("Could not find a sample text for encoding: %r" % encoding) + + def _as_output(self, text): + return text + + def test_non_ascii_failure_string(self): + """Assertion contents can be non-ascii and should get decoded""" + text, raw = self._get_sample_text(_get_exception_encoding()) + textoutput = self._test_external_case("self.fail(%s)" % _r(raw)) + self.assertIn(self._as_output(text), textoutput) + + def test_non_ascii_failure_string_via_exec(self): + """Assertion via exec can be non-ascii and still gets decoded""" + text, raw = self._get_sample_text(_get_exception_encoding()) + textoutput = self._test_external_case( + testline='exec ("self.fail(%s)")' % _r(raw)) + self.assertIn(self._as_output(text), textoutput) + + def test_control_characters_in_failure_string(self): + """Control characters in assertions should be escaped""" + textoutput = self._test_external_case("self.fail('\\a\\a\\a')") + self.expectFailure("Defense against the beeping horror unimplemented", + self.assertNotIn, self._as_output("\a\a\a"), textoutput) + self.assertIn(self._as_output(_u("\uFFFD\uFFFD\uFFFD")), textoutput) + + def _local_os_error_matcher(self): + if sys.version_info > (3, 3): + return MatchesAny(Contains("FileExistsError: "), + Contains("PermissionError: ")) + elif os.name != "nt" or sys.version_info < (2, 5): + return Contains(self._as_output("OSError: ")) + else: + return Contains(self._as_output("WindowsError: ")) + + def test_os_error(self): + """Locale error messages from the OS shouldn't break anything""" + textoutput = self._test_external_case( + modulelevel="import os", + testline="os.mkdir('/')") + self.assertThat(textoutput, self._local_os_error_matcher()) + + def test_assertion_text_shift_jis(self): + """A terminal raw backslash in an encoded string is weird but fine""" + example_text = _u("\u5341") + textoutput = self._test_external_case( + coding="shift_jis", + testline="self.fail('%s')" % example_text) + if str_is_unicode: + output_text = example_text + else: + output_text = example_text.encode("shift_jis").decode( + _get_exception_encoding(), "replace") + self.assertIn(self._as_output("AssertionError: %s" % output_text), + textoutput) + + def test_file_comment_iso2022_jp(self): + """Control character escapes must be preserved if valid encoding""" + example_text, _ = self._get_sample_text("iso2022_jp") + textoutput = self._test_external_case( + coding="iso2022_jp", + testline="self.fail('Simple') # %s" % example_text) + self.assertIn(self._as_output(example_text), textoutput) + + def test_unicode_exception(self): + """Exceptions that can be formated losslessly as unicode should be""" + example_text, _ = self._get_sample_text() + exception_class = ( + "class FancyError(Exception):\n" + # A __unicode__ method does nothing on py3k but the default works + " def __unicode__(self):\n" + " return self.args[0]\n") + textoutput = self._test_external_case( + modulelevel=exception_class, + testline="raise FancyError(%s)" % _r(example_text)) + self.assertIn(self._as_output(example_text), textoutput) + + def test_unprintable_exception(self): + """A totally useless exception instance still prints something""" + exception_class = ( + "class UnprintableError(Exception):\n" + " def __str__(self):\n" + " raise RuntimeError\n" + " def __unicode__(self):\n" + " raise RuntimeError\n" + " def __repr__(self):\n" + " raise RuntimeError\n") + textoutput = self._test_external_case( + modulelevel=exception_class, + testline="raise UnprintableError") + self.assertIn(self._as_output( + "UnprintableError: <unprintable UnprintableError object>\n"), + textoutput) + + def test_string_exception(self): + """Raise a string rather than an exception instance if supported""" + if sys.version_info > (2, 6): + self.skip("No string exceptions in Python 2.6 or later") + elif sys.version_info > (2, 5): + self._silence_deprecation_warnings() + textoutput = self._test_external_case(testline="raise 'plain str'") + self.assertIn(self._as_output("\nplain str\n"), textoutput) + + def test_non_ascii_dirname(self): + """Script paths in the traceback can be non-ascii""" + text, raw = self._get_sample_text(sys.getfilesystemencoding()) + textoutput = self._test_external_case( + # Avoid bug in Python 3 by giving a unicode source encoding rather + # than just ascii which raises a SyntaxError with no other details + coding="utf-8", + testline="self.fail('Simple')", + suffix=raw) + self.assertIn(self._as_output(text), textoutput) + + def test_syntax_error(self): + """Syntax errors should still have fancy special-case formatting""" + textoutput = self._test_external_case("exec ('f(a, b c)')") + self.assertIn(self._as_output( + ' File "<string>", line 1\n' + ' f(a, b c)\n' + + ' ' * self._error_on_character + + ' ^\n' + 'SyntaxError: ' + ), textoutput) + + def test_syntax_error_malformed(self): + """Syntax errors with bogus parameters should break anything""" + textoutput = self._test_external_case("raise SyntaxError(3, 2, 1)") + self.assertIn(self._as_output("\nSyntaxError: "), textoutput) + + def test_syntax_error_import_binary(self): + """Importing a binary file shouldn't break SyntaxError formatting""" + if sys.version_info < (2, 5): + # Python 2.4 assumes the file is latin-1 and tells you off + self._silence_deprecation_warnings() + self._setup_external_case("import bad") + f = open(os.path.join(self.dir, "bad.py"), "wb") + try: + f.write(_b("x\x9c\xcb*\xcd\xcb\x06\x00\x04R\x01\xb9")) + finally: + f.close() + textoutput = self._run_external_case() + matches_error = MatchesAny( + Contains('\nTypeError: '), Contains('\nSyntaxError: ')) + self.assertThat(textoutput, matches_error) + + def test_syntax_error_line_iso_8859_1(self): + """Syntax error on a latin-1 line shows the line decoded""" + text, raw = self._get_sample_text("iso-8859-1") + textoutput = self._setup_external_case("import bad") + self._write_module("bad", "iso-8859-1", + "# coding: iso-8859-1\n! = 0 # %s\n" % text) + textoutput = self._run_external_case() + self.assertIn(self._as_output(_u( + #'bad.py", line 2\n' + ' ! = 0 # %s\n' + ' ^\n' + 'SyntaxError: ') % + (text,)), textoutput) + + def test_syntax_error_line_iso_8859_5(self): + """Syntax error on a iso-8859-5 line shows the line decoded""" + text, raw = self._get_sample_text("iso-8859-5") + textoutput = self._setup_external_case("import bad") + self._write_module("bad", "iso-8859-5", + "# coding: iso-8859-5\n%% = 0 # %s\n" % text) + textoutput = self._run_external_case() + self.assertIn(self._as_output(_u( + #'bad.py", line 2\n' + ' %% = 0 # %s\n' + + ' ' * self._error_on_character + + ' ^\n' + 'SyntaxError: ') % + (text,)), textoutput) + + def test_syntax_error_line_euc_jp(self): + """Syntax error on a euc_jp line shows the line decoded""" + text, raw = self._get_sample_text("euc_jp") + textoutput = self._setup_external_case("import bad") + self._write_module("bad", "euc_jp", + "# coding: euc_jp\n$ = 0 # %s\n" % text) + textoutput = self._run_external_case() + # pypy uses cpython's multibyte codecs so has their behavior here + if self._is_pypy: + self._error_on_character = True + self.assertIn(self._as_output(_u( + #'bad.py", line 2\n' + ' $ = 0 # %s\n' + + ' ' * self._error_on_character + + ' ^\n' + 'SyntaxError: ') % + (text,)), textoutput) + + def test_syntax_error_line_utf_8(self): + """Syntax error on a utf-8 line shows the line decoded""" + text, raw = self._get_sample_text("utf-8") + textoutput = self._setup_external_case("import bad") + self._write_module("bad", "utf-8", _u("\ufeff^ = 0 # %s\n") % text) + textoutput = self._run_external_case() + self.assertIn(self._as_output(_u( + 'bad.py", line 1\n' + ' ^ = 0 # %s\n' + + ' ' * self._error_on_character + + ' ^\n' + 'SyntaxError: ') % + text), textoutput) + + +class TestNonAsciiResultsWithUnittest(TestNonAsciiResults): + """Test that running under unittest produces clean ascii strings""" + + def _run(self, stream, test): + from unittest import TextTestRunner as _Runner + return _Runner(stream).run(test) + + def _as_output(self, text): + if str_is_unicode: + return text + return text.encode("utf-8") + + +class TestDetailsToStr(TestCase): + + def test_no_details(self): + string = _details_to_str({}) + self.assertThat(string, Equals('')) + + def test_binary_content(self): + content = content_from_stream( + StringIO('foo'), content_type=ContentType('image', 'jpeg')) + string = _details_to_str({'attachment': content}) + self.assertThat( + string, Equals("""\ +Binary content: + attachment (image/jpeg) +""")) + + def test_single_line_content(self): + content = text_content('foo') + string = _details_to_str({'attachment': content}) + self.assertThat(string, Equals('attachment: {{{foo}}}\n')) + + def test_multi_line_text_content(self): + content = text_content('foo\nbar\nbaz') + string = _details_to_str({'attachment': content}) + self.assertThat(string, Equals('attachment: {{{\nfoo\nbar\nbaz\n}}}\n')) + + def test_special_text_content(self): + content = text_content('foo') + string = _details_to_str({'attachment': content}, special='attachment') + self.assertThat(string, Equals('foo\n')) + + def test_multiple_text_content(self): + string = _details_to_str( + {'attachment': text_content('foo\nfoo'), + 'attachment-1': text_content('bar\nbar')}) + self.assertThat( + string, Equals('attachment: {{{\n' + 'foo\n' + 'foo\n' + '}}}\n' + '\n' + 'attachment-1: {{{\n' + 'bar\n' + 'bar\n' + '}}}\n')) + + def test_empty_attachment(self): + string = _details_to_str({'attachment': text_content('')}) + self.assertThat( + string, Equals("""\ +Empty attachments: + attachment +""")) + + def test_lots_of_different_attachments(self): + jpg = lambda x: content_from_stream( + StringIO(x), ContentType('image', 'jpeg')) + attachments = { + 'attachment': text_content('foo'), + 'attachment-1': text_content('traceback'), + 'attachment-2': jpg('pic1'), + 'attachment-3': text_content('bar'), + 'attachment-4': text_content(''), + 'attachment-5': jpg('pic2'), + } + string = _details_to_str(attachments, special='attachment-1') + self.assertThat( + string, Equals("""\ +Binary content: + attachment-2 (image/jpeg) + attachment-5 (image/jpeg) +Empty attachments: + attachment-4 + +attachment: {{{foo}}} +attachment-3: {{{bar}}} + +traceback +""")) + + +class TestByTestResultTests(TestCase): + + def setUp(self): + super(TestByTestResultTests, self).setUp() + self.log = [] + self.result = TestByTestResult(self.on_test) + now = iter(range(5)) + self.result._now = lambda: advance_iterator(now) + + def assertCalled(self, **kwargs): + defaults = { + 'test': self, + 'tags': set(), + 'details': None, + 'start_time': 0, + 'stop_time': 1, + } + defaults.update(kwargs) + self.assertEqual([defaults], self.log) + + def on_test(self, **kwargs): + self.log.append(kwargs) + + def test_no_tests_nothing_reported(self): + self.result.startTestRun() + self.result.stopTestRun() + self.assertEqual([], self.log) + + def test_add_success(self): + self.result.startTest(self) + self.result.addSuccess(self) + self.result.stopTest(self) + self.assertCalled(status='success') + + def test_add_success_details(self): + self.result.startTest(self) + details = {'foo': 'bar'} + self.result.addSuccess(self, details=details) + self.result.stopTest(self) + self.assertCalled(status='success', details=details) + + def test_global_tags(self): + self.result.tags(['foo'], []) + self.result.startTest(self) + self.result.addSuccess(self) + self.result.stopTest(self) + self.assertCalled(status='success', tags=set(['foo'])) + + def test_local_tags(self): + self.result.tags(['foo'], []) + self.result.startTest(self) + self.result.tags(['bar'], []) + self.result.addSuccess(self) + self.result.stopTest(self) + self.assertCalled(status='success', tags=set(['foo', 'bar'])) + + def test_add_error(self): + self.result.startTest(self) + try: + 1/0 + except ZeroDivisionError: + error = sys.exc_info() + self.result.addError(self, error) + self.result.stopTest(self) + self.assertCalled( + status='error', + details={'traceback': TracebackContent(error, self)}) + + def test_add_error_details(self): + self.result.startTest(self) + details = {"foo": text_content("bar")} + self.result.addError(self, details=details) + self.result.stopTest(self) + self.assertCalled(status='error', details=details) + + def test_add_failure(self): + self.result.startTest(self) + try: + self.fail("intentional failure") + except self.failureException: + failure = sys.exc_info() + self.result.addFailure(self, failure) + self.result.stopTest(self) + self.assertCalled( + status='failure', + details={'traceback': TracebackContent(failure, self)}) + + def test_add_failure_details(self): + self.result.startTest(self) + details = {"foo": text_content("bar")} + self.result.addFailure(self, details=details) + self.result.stopTest(self) + self.assertCalled(status='failure', details=details) + + def test_add_xfail(self): + self.result.startTest(self) + try: + 1/0 + except ZeroDivisionError: + error = sys.exc_info() + self.result.addExpectedFailure(self, error) + self.result.stopTest(self) + self.assertCalled( + status='xfail', + details={'traceback': TracebackContent(error, self)}) + + def test_add_xfail_details(self): + self.result.startTest(self) + details = {"foo": text_content("bar")} + self.result.addExpectedFailure(self, details=details) + self.result.stopTest(self) + self.assertCalled(status='xfail', details=details) + + def test_add_unexpected_success(self): + self.result.startTest(self) + details = {'foo': 'bar'} + self.result.addUnexpectedSuccess(self, details=details) + self.result.stopTest(self) + self.assertCalled(status='success', details=details) + + def test_add_skip_reason(self): + self.result.startTest(self) + reason = self.getUniqueString() + self.result.addSkip(self, reason) + self.result.stopTest(self) + self.assertCalled( + status='skip', details={'reason': text_content(reason)}) + + def test_add_skip_details(self): + self.result.startTest(self) + details = {'foo': 'bar'} + self.result.addSkip(self, details=details) + self.result.stopTest(self) + self.assertCalled(status='skip', details=details) + + def test_twice(self): + self.result.startTest(self) + self.result.addSuccess(self, details={'foo': 'bar'}) + self.result.stopTest(self) + self.result.startTest(self) + self.result.addSuccess(self) + self.result.stopTest(self) + self.assertEqual( + [{'test': self, + 'status': 'success', + 'start_time': 0, + 'stop_time': 1, + 'tags': set(), + 'details': {'foo': 'bar'}}, + {'test': self, + 'status': 'success', + 'start_time': 2, + 'stop_time': 3, + 'tags': set(), + 'details': None}, + ], + self.log) + + +class TestTagger(TestCase): + + def test_tags_tests(self): + result = ExtendedTestResult() + tagger = Tagger(result, set(['foo']), set(['bar'])) + test1, test2 = self, make_test() + tagger.startTest(test1) + tagger.addSuccess(test1) + tagger.stopTest(test1) + tagger.startTest(test2) + tagger.addSuccess(test2) + tagger.stopTest(test2) + self.assertEqual( + [('startTest', test1), + ('tags', set(['foo']), set(['bar'])), + ('addSuccess', test1), + ('stopTest', test1), + ('startTest', test2), + ('tags', set(['foo']), set(['bar'])), + ('addSuccess', test2), + ('stopTest', test2), + ], result._events) + + +class TestTimestampingStreamResult(TestCase): + + def test_startTestRun(self): + result = TimestampingStreamResult(LoggingStreamResult()) + result.startTestRun() + self.assertEqual([('startTestRun',)], result.targets[0]._events) + + def test_stopTestRun(self): + result = TimestampingStreamResult(LoggingStreamResult()) + result.stopTestRun() + self.assertEqual([('stopTestRun',)], result.targets[0]._events) + + def test_status_no_timestamp(self): + result = TimestampingStreamResult(LoggingStreamResult()) + result.status(test_id="A", test_status="B", test_tags="C", + runnable="D", file_name="E", file_bytes=b"F", eof=True, + mime_type="G", route_code="H") + events = result.targets[0]._events + self.assertThat(events, HasLength(1)) + self.assertThat(events[0], HasLength(11)) + self.assertEqual( + ("status", "A", "B", "C", "D", "E", b"F", True, "G", "H"), + events[0][:10]) + self.assertNotEqual(None, events[0][10]) + self.assertIsInstance(events[0][10], datetime.datetime) + + def test_status_timestamp(self): + result = TimestampingStreamResult(LoggingStreamResult()) + result.status(timestamp="F") + self.assertEqual("F", result.targets[0]._events[0][10]) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py new file mode 100644 index 00000000000..e2c33062b2d --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py @@ -0,0 +1,279 @@ +# Copyright (c) 2009-2011 testtools developers. See LICENSE for details. + +"""Test ConcurrentTestSuite and related things.""" + +__metaclass__ = type + +import doctest +from functools import partial +import sys +import unittest + +from extras import try_import + +from testtools import ( + ConcurrentTestSuite, + ConcurrentStreamTestSuite, + iterate_tests, + PlaceHolder, + TestByTestResult, + TestCase, + ) +from testtools.compat import _b, _u +from testtools.matchers import DocTestMatches +from testtools.testsuite import FixtureSuite, iterate_tests, sorted_tests +from testtools.tests.helpers import LoggingResult +from testtools.testresult.doubles import StreamResult as LoggingStream + +FunctionFixture = try_import('fixtures.FunctionFixture') + +class Sample(TestCase): + def __hash__(self): + return id(self) + def test_method1(self): + pass + def test_method2(self): + pass + + +class TestConcurrentTestSuiteRun(TestCase): + + def test_broken_test(self): + log = [] + def on_test(test, status, start_time, stop_time, tags, details): + log.append((test.id(), status, set(details.keys()))) + class BrokenTest(object): + # Simple break - no result parameter to run() + def __call__(self): + pass + run = __call__ + original_suite = unittest.TestSuite([BrokenTest()]) + suite = ConcurrentTestSuite(original_suite, self.split_suite) + suite.run(TestByTestResult(on_test)) + self.assertEqual([('broken-runner', 'error', set(['traceback']))], log) + + def test_trivial(self): + log = [] + result = LoggingResult(log) + test1 = Sample('test_method1') + test2 = Sample('test_method2') + original_suite = unittest.TestSuite([test1, test2]) + suite = ConcurrentTestSuite(original_suite, self.split_suite) + suite.run(result) + # log[0] is the timestamp for the first test starting. + test1 = log[1][1] + test2 = log[-1][1] + self.assertIsInstance(test1, Sample) + self.assertIsInstance(test2, Sample) + self.assertNotEqual(test1.id(), test2.id()) + + def test_wrap_result(self): + # ConcurrentTestSuite has a hook for wrapping the per-thread result. + wrap_log = [] + + def wrap_result(thread_safe_result, thread_number): + wrap_log.append( + (thread_safe_result.result.decorated, thread_number)) + return thread_safe_result + + result_log = [] + result = LoggingResult(result_log) + test1 = Sample('test_method1') + test2 = Sample('test_method2') + original_suite = unittest.TestSuite([test1, test2]) + suite = ConcurrentTestSuite( + original_suite, self.split_suite, wrap_result=wrap_result) + suite.run(result) + self.assertEqual( + [(result, 0), + (result, 1), + ], wrap_log) + # Smoke test to make sure everything ran OK. + self.assertNotEqual([], result_log) + + def split_suite(self, suite): + return list(iterate_tests(suite)) + + +class TestConcurrentStreamTestSuiteRun(TestCase): + + def test_trivial(self): + result = LoggingStream() + test1 = Sample('test_method1') + test2 = Sample('test_method2') + cases = lambda:[(test1, '0'), (test2, '1')] + suite = ConcurrentStreamTestSuite(cases) + suite.run(result) + def freeze(set_or_none): + if set_or_none is None: + return set_or_none + return frozenset(set_or_none) + # Ignore event order: we're testing the code is all glued together, + # which just means we can pump events through and they get route codes + # added appropriately. + self.assertEqual(set([ + ('status', + 'testtools.tests.test_testsuite.Sample.test_method1', + 'inprogress', + None, + True, + None, + None, + False, + None, + '0', + None, + ), + ('status', + 'testtools.tests.test_testsuite.Sample.test_method1', + 'success', + frozenset(), + True, + None, + None, + False, + None, + '0', + None, + ), + ('status', + 'testtools.tests.test_testsuite.Sample.test_method2', + 'inprogress', + None, + True, + None, + None, + False, + None, + '1', + None, + ), + ('status', + 'testtools.tests.test_testsuite.Sample.test_method2', + 'success', + frozenset(), + True, + None, + None, + False, + None, + '1', + None, + ), + ]), set(event[0:3] + (freeze(event[3]),) + event[4:10] + (None,) + for event in result._events)) + + def test_broken_runner(self): + # If the object called breaks, the stream is informed about it + # regardless. + class BrokenTest(object): + # broken - no result parameter! + def __call__(self): + pass + def run(self): + pass + result = LoggingStream() + cases = lambda:[(BrokenTest(), '0')] + suite = ConcurrentStreamTestSuite(cases) + suite.run(result) + events = result._events + # Check the traceback loosely. + self.assertThat(events[1][6].decode('utf8'), DocTestMatches("""\ +Traceback (most recent call last): + File "...testtools/testsuite.py", line ..., in _run_test + test.run(process_result) +TypeError: run() takes ...1 ...argument...2...given... +""", doctest.ELLIPSIS)) + events = [event[0:10] + (None,) for event in events] + events[1] = events[1][:6] + (None,) + events[1][7:] + self.assertEqual([ + ('status', "broken-runner-'0'", 'inprogress', None, True, None, None, False, None, _u('0'), None), + ('status', "broken-runner-'0'", None, None, True, 'traceback', None, + False, + 'text/x-traceback; charset="utf8"; language="python"', + '0', + None), + ('status', "broken-runner-'0'", None, None, True, 'traceback', b'', True, + 'text/x-traceback; charset="utf8"; language="python"', '0', None), + ('status', "broken-runner-'0'", 'fail', set(), True, None, None, False, None, _u('0'), None) + ], events) + + def split_suite(self, suite): + tests = list(enumerate(iterate_tests(suite))) + return [(test, _u(str(pos))) for pos, test in tests] + + +class TestFixtureSuite(TestCase): + + def setUp(self): + super(TestFixtureSuite, self).setUp() + if FunctionFixture is None: + self.skip("Need fixtures") + + def test_fixture_suite(self): + log = [] + class Sample(TestCase): + def test_one(self): + log.append(1) + def test_two(self): + log.append(2) + fixture = FunctionFixture( + lambda: log.append('setUp'), + lambda fixture: log.append('tearDown')) + suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_two')]) + suite.run(LoggingResult([])) + self.assertEqual(['setUp', 1, 2, 'tearDown'], log) + + def test_fixture_suite_sort(self): + log = [] + class Sample(TestCase): + def test_one(self): + log.append(1) + def test_two(self): + log.append(2) + fixture = FunctionFixture( + lambda: log.append('setUp'), + lambda fixture: log.append('tearDown')) + suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_one')]) + self.assertRaises(ValueError, suite.sort_tests) + + +class TestSortedTests(TestCase): + + def test_sorts_custom_suites(self): + a = PlaceHolder('a') + b = PlaceHolder('b') + class Subclass(unittest.TestSuite): + def sort_tests(self): + self._tests = sorted_tests(self, True) + input_suite = Subclass([b, a]) + suite = sorted_tests(input_suite) + self.assertEqual([a, b], list(iterate_tests(suite))) + self.assertEqual([input_suite], list(iter(suite))) + + def test_custom_suite_without_sort_tests_works(self): + a = PlaceHolder('a') + b = PlaceHolder('b') + class Subclass(unittest.TestSuite):pass + input_suite = Subclass([b, a]) + suite = sorted_tests(input_suite) + self.assertEqual([b, a], list(iterate_tests(suite))) + self.assertEqual([input_suite], list(iter(suite))) + + def test_sorts_simple_suites(self): + a = PlaceHolder('a') + b = PlaceHolder('b') + suite = sorted_tests(unittest.TestSuite([b, a])) + self.assertEqual([a, b], list(iterate_tests(suite))) + + def test_duplicate_simple_suites(self): + a = PlaceHolder('a') + b = PlaceHolder('b') + c = PlaceHolder('a') + self.assertRaises( + ValueError, sorted_tests, unittest.TestSuite([a, b, c])) + + +def test_suite(): + from unittest import TestLoader + return TestLoader().loadTestsFromName(__name__) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py new file mode 100644 index 00000000000..4305c624a86 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py @@ -0,0 +1,88 @@ +# Copyright (c) 2011 testtools developers. See LICENSE for details. + +from __future__ import with_statement + +import sys + +from testtools import ( + ExpectedException, + TestCase, + ) +from testtools.matchers import ( + AfterPreprocessing, + Equals, + EndsWith, + ) + + +class TestExpectedException(TestCase): + """Test the ExpectedException context manager.""" + + def test_pass_on_raise(self): + with ExpectedException(ValueError, 'tes.'): + raise ValueError('test') + + def test_pass_on_raise_matcher(self): + with ExpectedException( + ValueError, AfterPreprocessing(str, Equals('test'))): + raise ValueError('test') + + def test_raise_on_text_mismatch(self): + try: + with ExpectedException(ValueError, 'tes.'): + raise ValueError('mismatch') + except AssertionError: + e = sys.exc_info()[1] + self.assertEqual("'mismatch' does not match /tes./", str(e)) + else: + self.fail('AssertionError not raised.') + + def test_raise_on_general_mismatch(self): + matcher = AfterPreprocessing(str, Equals('test')) + value_error = ValueError('mismatch') + try: + with ExpectedException(ValueError, matcher): + raise value_error + except AssertionError: + e = sys.exc_info()[1] + self.assertEqual(matcher.match(value_error).describe(), str(e)) + else: + self.fail('AssertionError not raised.') + + def test_raise_on_error_mismatch(self): + try: + with ExpectedException(TypeError, 'tes.'): + raise ValueError('mismatch') + except ValueError: + e = sys.exc_info()[1] + self.assertEqual('mismatch', str(e)) + else: + self.fail('ValueError not raised.') + + def test_raise_if_no_exception(self): + try: + with ExpectedException(TypeError, 'tes.'): + pass + except AssertionError: + e = sys.exc_info()[1] + self.assertEqual('TypeError not raised.', str(e)) + else: + self.fail('AssertionError not raised.') + + def test_pass_on_raise_any_message(self): + with ExpectedException(ValueError): + raise ValueError('whatever') + + def test_annotate(self): + def die(): + with ExpectedException(ValueError, msg="foo"): + pass + exc = self.assertRaises(AssertionError, die) + self.assertThat(exc.args[0], EndsWith(': foo')) + + def test_annotated_matcher(self): + def die(): + with ExpectedException(ValueError, 'bar', msg="foo"): + pass + exc = self.assertRaises(AssertionError, die) + self.assertThat(exc.args[0], EndsWith(': foo')) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testsuite.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testsuite.py new file mode 100644 index 00000000000..9e92e0cb8b1 --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testsuite.py @@ -0,0 +1,317 @@ +# Copyright (c) 2009-2011 testtools developers. See LICENSE for details. + +"""Test suites and related things.""" + +__metaclass__ = type +__all__ = [ + 'ConcurrentTestSuite', + 'ConcurrentStreamTestSuite', + 'filter_by_ids', + 'iterate_tests', + 'sorted_tests', + ] + +import sys +import threading +import unittest + +from extras import safe_hasattr, try_imports + +Queue = try_imports(['Queue.Queue', 'queue.Queue']) + +import testtools + + +def iterate_tests(test_suite_or_case): + """Iterate through all of the test cases in 'test_suite_or_case'.""" + try: + suite = iter(test_suite_or_case) + except TypeError: + yield test_suite_or_case + else: + for test in suite: + for subtest in iterate_tests(test): + yield subtest + + +class ConcurrentTestSuite(unittest.TestSuite): + """A TestSuite whose run() calls out to a concurrency strategy.""" + + def __init__(self, suite, make_tests, wrap_result=None): + """Create a ConcurrentTestSuite to execute suite. + + :param suite: A suite to run concurrently. + :param make_tests: A helper function to split the tests in the + ConcurrentTestSuite into some number of concurrently executing + sub-suites. make_tests must take a suite, and return an iterable + of TestCase-like object, each of which must have a run(result) + method. + :param wrap_result: An optional function that takes a thread-safe + result and a thread number and must return a ``TestResult`` + object. If not provided, then ``ConcurrentTestSuite`` will just + use a ``ThreadsafeForwardingResult`` wrapped around the result + passed to ``run()``. + """ + super(ConcurrentTestSuite, self).__init__([suite]) + self.make_tests = make_tests + if wrap_result: + self._wrap_result = wrap_result + + def _wrap_result(self, thread_safe_result, thread_number): + """Wrap a thread-safe result before sending it test results. + + You can either override this in a subclass or pass your own + ``wrap_result`` in to the constructor. The latter is preferred. + """ + return thread_safe_result + + def run(self, result): + """Run the tests concurrently. + + This calls out to the provided make_tests helper, and then serialises + the results so that result only sees activity from one TestCase at + a time. + + ConcurrentTestSuite provides no special mechanism to stop the tests + returned by make_tests, it is up to the make_tests to honour the + shouldStop attribute on the result object they are run with, which will + be set if an exception is raised in the thread which + ConcurrentTestSuite.run is called in. + """ + tests = self.make_tests(self) + try: + threads = {} + queue = Queue() + semaphore = threading.Semaphore(1) + for i, test in enumerate(tests): + process_result = self._wrap_result( + testtools.ThreadsafeForwardingResult(result, semaphore), i) + reader_thread = threading.Thread( + target=self._run_test, args=(test, process_result, queue)) + threads[test] = reader_thread, process_result + reader_thread.start() + while threads: + finished_test = queue.get() + threads[finished_test][0].join() + del threads[finished_test] + except: + for thread, process_result in threads.values(): + process_result.stop() + raise + + def _run_test(self, test, process_result, queue): + try: + try: + test.run(process_result) + except Exception as e: + # The run logic itself failed. + case = testtools.ErrorHolder( + "broken-runner", + error=sys.exc_info()) + case.run(process_result) + finally: + queue.put(test) + + +class ConcurrentStreamTestSuite(object): + """A TestSuite whose run() parallelises.""" + + def __init__(self, make_tests): + """Create a ConcurrentTestSuite to execute tests returned by make_tests. + + :param make_tests: A helper function that should return some number + of concurrently executable test suite / test case objects. + make_tests must take no parameters and return an iterable of + tuples. Each tuple must be of the form (case, route_code), where + case is a TestCase-like object with a run(result) method, and + route_code is either None or a unicode string. + """ + super(ConcurrentStreamTestSuite, self).__init__() + self.make_tests = make_tests + + def run(self, result): + """Run the tests concurrently. + + This calls out to the provided make_tests helper to determine the + concurrency to use and to assign routing codes to each worker. + + ConcurrentTestSuite provides no special mechanism to stop the tests + returned by make_tests, it is up to the made tests to honour the + shouldStop attribute on the result object they are run with, which will + be set if the test run is to be aborted. + + The tests are run with an ExtendedToStreamDecorator wrapped around a + StreamToQueue instance. ConcurrentStreamTestSuite dequeues events from + the queue and forwards them to result. Tests can therefore be either + original unittest tests (or compatible tests), or new tests that emit + StreamResult events directly. + + :param result: A StreamResult instance. The caller is responsible for + calling startTestRun on this instance prior to invoking suite.run, + and stopTestRun subsequent to the run method returning. + """ + tests = self.make_tests() + try: + threads = {} + queue = Queue() + for test, route_code in tests: + to_queue = testtools.StreamToQueue(queue, route_code) + process_result = testtools.ExtendedToStreamDecorator( + testtools.TimestampingStreamResult(to_queue)) + runner_thread = threading.Thread( + target=self._run_test, + args=(test, process_result, route_code)) + threads[to_queue] = runner_thread, process_result + runner_thread.start() + while threads: + event_dict = queue.get() + event = event_dict.pop('event') + if event == 'status': + result.status(**event_dict) + elif event == 'stopTestRun': + thread = threads.pop(event_dict['result'])[0] + thread.join() + elif event == 'startTestRun': + pass + else: + raise ValueError('unknown event type %r' % (event,)) + except: + for thread, process_result in threads.values(): + # Signal to each TestControl in the ExtendedToStreamDecorator + # that the thread should stop running tests and cleanup + process_result.stop() + raise + + def _run_test(self, test, process_result, route_code): + process_result.startTestRun() + try: + try: + test.run(process_result) + except Exception as e: + # The run logic itself failed. + case = testtools.ErrorHolder( + "broken-runner-'%s'" % (route_code,), + error=sys.exc_info()) + case.run(process_result) + finally: + process_result.stopTestRun() + + +class FixtureSuite(unittest.TestSuite): + + def __init__(self, fixture, tests): + super(FixtureSuite, self).__init__(tests) + self._fixture = fixture + + def run(self, result): + self._fixture.setUp() + try: + super(FixtureSuite, self).run(result) + finally: + self._fixture.cleanUp() + + def sort_tests(self): + self._tests = sorted_tests(self, True) + + +def _flatten_tests(suite_or_case, unpack_outer=False): + try: + tests = iter(suite_or_case) + except TypeError: + # Not iterable, assume it's a test case. + return [(suite_or_case.id(), suite_or_case)] + if (type(suite_or_case) in (unittest.TestSuite,) or + unpack_outer): + # Plain old test suite (or any others we may add). + result = [] + for test in tests: + # Recurse to flatten. + result.extend(_flatten_tests(test)) + return result + else: + # Find any old actual test and grab its id. + suite_id = None + tests = iterate_tests(suite_or_case) + for test in tests: + suite_id = test.id() + break + # If it has a sort_tests method, call that. + if safe_hasattr(suite_or_case, 'sort_tests'): + suite_or_case.sort_tests() + return [(suite_id, suite_or_case)] + + +def filter_by_ids(suite_or_case, test_ids): + """Remove tests from suite_or_case where their id is not in test_ids. + + :param suite_or_case: A test suite or test case. + :param test_ids: Something that supports the __contains__ protocol. + :return: suite_or_case, unless suite_or_case was a case that itself + fails the predicate when it will return a new unittest.TestSuite with + no contents. + + This helper exists to provide backwards compatability with older versions + of Python (currently all versions :)) that don't have a native + filter_by_ids() method on Test(Case|Suite). + + For subclasses of TestSuite, filtering is done by: + - attempting to call suite.filter_by_ids(test_ids) + - if there is no method, iterating the suite and identifying tests to + remove, then removing them from _tests, manually recursing into + each entry. + + For objects with an id() method - TestCases, filtering is done by: + - attempting to return case.filter_by_ids(test_ids) + - if there is no such method, checking for case.id() in test_ids + and returning case if it is, or TestSuite() if it is not. + + For anything else, it is not filtered - it is returned as-is. + + To provide compatability with this routine for a custom TestSuite, just + define a filter_by_ids() method that will return a TestSuite equivalent to + the original minus any tests not in test_ids. + Similarly to provide compatability for a custom TestCase that does + something unusual define filter_by_ids to return a new TestCase object + that will only run test_ids that are in the provided container. If none + would run, return an empty TestSuite(). + + The contract for this function does not require mutation - each filtered + object can choose to return a new object with the filtered tests. However + because existing custom TestSuite classes in the wild do not have this + method, we need a way to copy their state correctly which is tricky: + thus the backwards-compatible code paths attempt to mutate in place rather + than guessing how to reconstruct a new suite. + """ + # Compatible objects + if safe_hasattr(suite_or_case, 'filter_by_ids'): + return suite_or_case.filter_by_ids(test_ids) + # TestCase objects. + if safe_hasattr(suite_or_case, 'id'): + if suite_or_case.id() in test_ids: + return suite_or_case + else: + return unittest.TestSuite() + # Standard TestSuites or derived classes [assumed to be mutable]. + if isinstance(suite_or_case, unittest.TestSuite): + filtered = [] + for item in suite_or_case: + filtered.append(filter_by_ids(item, test_ids)) + suite_or_case._tests[:] = filtered + # Everything else: + return suite_or_case + + +def sorted_tests(suite_or_case, unpack_outer=False): + """Sort suite_or_case while preserving non-vanilla TestSuites.""" + # Duplicate test id can induce TypeError in Python 3.3. + # Detect the duplicate test id, raise exception when found. + seen = set() + for test_case in iterate_tests(suite_or_case): + test_id = test_case.id() + if test_id not in seen: + seen.add(test_id) + else: + raise ValueError('Duplicate test id detected: %s' % (test_id,)) + tests = _flatten_tests(suite_or_case, unpack_outer=unpack_outer) + tests.sort() + return unittest.TestSuite([test for (sort_key, test) in tests]) diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/utils.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/utils.py new file mode 100644 index 00000000000..0f39d8f5b6e --- /dev/null +++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/utils.py @@ -0,0 +1,13 @@ +# Copyright (c) 2008-2010 testtools developers. See LICENSE for details. + +"""Utilities for dealing with stuff in unittest. + +Legacy - deprecated - use testtools.testsuite.iterate_tests +""" + +import warnings +warnings.warn("Please import iterate_tests from testtools.testsuite - " + "testtools.utils is deprecated.", DeprecationWarning, stacklevel=2) + +from testtools.testsuite import iterate_tests + |