summaryrefslogtreecommitdiff
path: root/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests
diff options
context:
space:
mode:
Diffstat (limited to 'src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests')
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py47
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py108
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py29
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py42
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py396
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py209
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py227
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py82
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py192
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py243
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py254
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py132
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py603
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py349
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py66
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py767
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py100
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py118
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py30
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py167
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py248
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py303
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py333
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py84
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py1550
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py2919
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py279
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py88
28 files changed, 9965 insertions, 0 deletions
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py
new file mode 100644
index 00000000000..db215ff12f8
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2008-2013 testtools developers. See LICENSE for details.
+
+"""Tests for testtools itself."""
+
+
+from unittest import TestSuite
+
+
+def test_suite():
+ from testtools.tests import (
+ matchers,
+ test_compat,
+ test_content,
+ test_content_type,
+ test_deferredruntest,
+ test_distutilscmd,
+ test_fixturesupport,
+ test_helpers,
+ test_monkey,
+ test_run,
+ test_runtest,
+ test_spinner,
+ test_tags,
+ test_testcase,
+ test_testresult,
+ test_testsuite,
+ )
+ modules = [
+ matchers,
+ test_compat,
+ test_content,
+ test_content_type,
+ test_deferredruntest,
+ test_distutilscmd,
+ test_fixturesupport,
+ test_helpers,
+ test_monkey,
+ test_run,
+ test_runtest,
+ test_spinner,
+ test_tags,
+ test_testcase,
+ test_testresult,
+ test_testsuite,
+ ]
+ suites = map(lambda x: x.test_suite(), modules)
+ return TestSuite(suites)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py
new file mode 100644
index 00000000000..f766da33c9f
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py
@@ -0,0 +1,108 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Helpers for tests."""
+
+__all__ = [
+ 'LoggingResult',
+ ]
+
+import sys
+
+from extras import safe_hasattr
+
+from testtools import TestResult
+from testtools.content import StackLinesContent
+from testtools import runtest
+
+
+# Importing to preserve compatibility.
+safe_hasattr
+
+# GZ 2010-08-12: Don't do this, pointlessly creates an exc_info cycle
+try:
+ raise Exception
+except Exception:
+ an_exc_info = sys.exc_info()
+
+# Deprecated: This classes attributes are somewhat non deterministic which
+# leads to hard to predict tests (because Python upstream are changing things.
+class LoggingResult(TestResult):
+ """TestResult that logs its event to a list."""
+
+ def __init__(self, log):
+ self._events = log
+ super(LoggingResult, self).__init__()
+
+ def startTest(self, test):
+ self._events.append(('startTest', test))
+ super(LoggingResult, self).startTest(test)
+
+ def stop(self):
+ self._events.append('stop')
+ super(LoggingResult, self).stop()
+
+ def stopTest(self, test):
+ self._events.append(('stopTest', test))
+ super(LoggingResult, self).stopTest(test)
+
+ def addFailure(self, test, error):
+ self._events.append(('addFailure', test, error))
+ super(LoggingResult, self).addFailure(test, error)
+
+ def addError(self, test, error):
+ self._events.append(('addError', test, error))
+ super(LoggingResult, self).addError(test, error)
+
+ def addSkip(self, test, reason):
+ self._events.append(('addSkip', test, reason))
+ super(LoggingResult, self).addSkip(test, reason)
+
+ def addSuccess(self, test):
+ self._events.append(('addSuccess', test))
+ super(LoggingResult, self).addSuccess(test)
+
+ def startTestRun(self):
+ self._events.append('startTestRun')
+ super(LoggingResult, self).startTestRun()
+
+ def stopTestRun(self):
+ self._events.append('stopTestRun')
+ super(LoggingResult, self).stopTestRun()
+
+ def done(self):
+ self._events.append('done')
+ super(LoggingResult, self).done()
+
+ def tags(self, new_tags, gone_tags):
+ self._events.append(('tags', new_tags, gone_tags))
+ super(LoggingResult, self).tags(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ self._events.append(('time', a_datetime))
+ super(LoggingResult, self).time(a_datetime)
+
+
+def is_stack_hidden():
+ return StackLinesContent.HIDE_INTERNAL_STACK
+
+
+def hide_testtools_stack(should_hide=True):
+ result = StackLinesContent.HIDE_INTERNAL_STACK
+ StackLinesContent.HIDE_INTERNAL_STACK = should_hide
+ return result
+
+
+def run_with_stack_hidden(should_hide, f, *args, **kwargs):
+ old_should_hide = hide_testtools_stack(should_hide)
+ try:
+ return f(*args, **kwargs)
+ finally:
+ hide_testtools_stack(old_should_hide)
+
+
+class FullStackRunTest(runtest.RunTest):
+
+ def _run_user(self, fn, *args, **kwargs):
+ return run_with_stack_hidden(
+ False,
+ super(FullStackRunTest, self)._run_user, fn, *args, **kwargs)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py
new file mode 100644
index 00000000000..ebab308e77c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+
+from unittest import TestSuite
+
+
+def test_suite():
+ from testtools.tests.matchers import (
+ test_basic,
+ test_datastructures,
+ test_dict,
+ test_doctest,
+ test_exception,
+ test_filesystem,
+ test_higherorder,
+ test_impl,
+ )
+ modules = [
+ test_basic,
+ test_datastructures,
+ test_dict,
+ test_doctest,
+ test_exception,
+ test_filesystem,
+ test_higherorder,
+ test_impl,
+ ]
+ suites = map(lambda x: x.test_suite(), modules)
+ return TestSuite(suites)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py
new file mode 100644
index 00000000000..3ff87278dae
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py
@@ -0,0 +1,42 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+from testtools.tests.helpers import FullStackRunTest
+
+
+class TestMatchersInterface(object):
+
+ run_tests_with = FullStackRunTest
+
+ def test_matches_match(self):
+ matcher = self.matches_matcher
+ matches = self.matches_matches
+ mismatches = self.matches_mismatches
+ for candidate in matches:
+ self.assertEqual(None, matcher.match(candidate))
+ for candidate in mismatches:
+ mismatch = matcher.match(candidate)
+ self.assertNotEqual(None, mismatch)
+ self.assertNotEqual(None, getattr(mismatch, 'describe', None))
+
+ def test__str__(self):
+ # [(expected, object to __str__)].
+ from testtools.matchers._doctest import DocTestMatches
+ examples = self.str_examples
+ for expected, matcher in examples:
+ self.assertThat(matcher, DocTestMatches(expected))
+
+ def test_describe_difference(self):
+ # [(expected, matchee, matcher), ...]
+ examples = self.describe_examples
+ for difference, matchee, matcher in examples:
+ mismatch = matcher.match(matchee)
+ self.assertEqual(difference, mismatch.describe())
+
+ def test_mismatch_details(self):
+ # The mismatch object must provide get_details, which must return a
+ # dictionary mapping names to Content objects.
+ examples = self.describe_examples
+ for difference, matchee, matcher in examples:
+ mismatch = matcher.match(matchee)
+ details = mismatch.get_details()
+ self.assertEqual(dict(details), details)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py
new file mode 100644
index 00000000000..c53bc9e9c42
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py
@@ -0,0 +1,396 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import re
+
+from testtools import TestCase
+from testtools.compat import (
+ text_repr,
+ _b,
+ _u,
+ )
+from testtools.matchers._basic import (
+ _BinaryMismatch,
+ Contains,
+ DoesNotEndWith,
+ DoesNotStartWith,
+ EndsWith,
+ Equals,
+ Is,
+ IsInstance,
+ LessThan,
+ GreaterThan,
+ HasLength,
+ MatchesRegex,
+ NotEquals,
+ SameMembers,
+ StartsWith,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class Test_BinaryMismatch(TestCase):
+ """Mismatches from binary comparisons need useful describe output"""
+
+ _long_string = "This is a longish multiline non-ascii string\n\xa7"
+ _long_b = _b(_long_string)
+ _long_u = _u(_long_string)
+
+ class CustomRepr(object):
+ def __init__(self, repr_string):
+ self._repr_string = repr_string
+ def __repr__(self):
+ return _u('<object ') + _u(self._repr_string) + _u('>')
+
+ def test_short_objects(self):
+ o1, o2 = self.CustomRepr('a'), self.CustomRepr('b')
+ mismatch = _BinaryMismatch(o1, "!~", o2)
+ self.assertEqual(mismatch.describe(), "%r !~ %r" % (o1, o2))
+
+ def test_short_mixed_strings(self):
+ b, u = _b("\xa7"), _u("\xa7")
+ mismatch = _BinaryMismatch(b, "!~", u)
+ self.assertEqual(mismatch.describe(), "%r !~ %r" % (b, u))
+
+ def test_long_bytes(self):
+ one_line_b = self._long_b.replace(_b("\n"), _b(" "))
+ mismatch = _BinaryMismatch(one_line_b, "!~", self._long_b)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(one_line_b),
+ text_repr(self._long_b, multiline=True)))
+
+ def test_long_unicode(self):
+ one_line_u = self._long_u.replace("\n", " ")
+ mismatch = _BinaryMismatch(one_line_u, "!~", self._long_u)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(one_line_u),
+ text_repr(self._long_u, multiline=True)))
+
+ def test_long_mixed_strings(self):
+ mismatch = _BinaryMismatch(self._long_b, "!~", self._long_u)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(self._long_b, multiline=True),
+ text_repr(self._long_u, multiline=True)))
+
+ def test_long_bytes_and_object(self):
+ obj = object()
+ mismatch = _BinaryMismatch(self._long_b, "!~", obj)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(self._long_b, multiline=True),
+ repr(obj)))
+
+ def test_long_unicode_and_object(self):
+ obj = object()
+ mismatch = _BinaryMismatch(self._long_u, "!~", obj)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(self._long_u, multiline=True),
+ repr(obj)))
+
+
+class TestEqualsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Equals(1)
+ matches_matches = [1]
+ matches_mismatches = [2]
+
+ str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))]
+
+ describe_examples = [("1 != 2", 2, Equals(1))]
+
+
+class TestNotEqualsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = NotEquals(1)
+ matches_matches = [2]
+ matches_mismatches = [1]
+
+ str_examples = [
+ ("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))]
+
+ describe_examples = [("1 == 1", 1, NotEquals(1))]
+
+
+class TestIsInterface(TestCase, TestMatchersInterface):
+
+ foo = object()
+ bar = object()
+
+ matches_matcher = Is(foo)
+ matches_matches = [foo]
+ matches_mismatches = [bar, 1]
+
+ str_examples = [("Is(2)", Is(2))]
+
+ describe_examples = [("1 is not 2", 2, Is(1))]
+
+
+class TestIsInstanceInterface(TestCase, TestMatchersInterface):
+
+ class Foo:pass
+
+ matches_matcher = IsInstance(Foo)
+ matches_matches = [Foo()]
+ matches_mismatches = [object(), 1, Foo]
+
+ str_examples = [
+ ("IsInstance(str)", IsInstance(str)),
+ ("IsInstance(str, int)", IsInstance(str, int)),
+ ]
+
+ describe_examples = [
+ ("'foo' is not an instance of int", 'foo', IsInstance(int)),
+ ("'foo' is not an instance of any of (int, type)", 'foo',
+ IsInstance(int, type)),
+ ]
+
+
+class TestLessThanInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = LessThan(4)
+ matches_matches = [-5, 3]
+ matches_mismatches = [4, 5, 5000]
+
+ str_examples = [
+ ("LessThan(12)", LessThan(12)),
+ ]
+
+ describe_examples = [
+ ('4 is not > 5', 5, LessThan(4)),
+ ('4 is not > 4', 4, LessThan(4)),
+ ]
+
+
+class TestGreaterThanInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = GreaterThan(4)
+ matches_matches = [5, 8]
+ matches_mismatches = [-2, 0, 4]
+
+ str_examples = [
+ ("GreaterThan(12)", GreaterThan(12)),
+ ]
+
+ describe_examples = [
+ ('5 is not < 4', 4, GreaterThan(5)),
+ ('4 is not < 4', 4, GreaterThan(4)),
+ ]
+
+
+class TestContainsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Contains('foo')
+ matches_matches = ['foo', 'afoo', 'fooa']
+ matches_mismatches = ['f', 'fo', 'oo', 'faoo', 'foao']
+
+ str_examples = [
+ ("Contains(1)", Contains(1)),
+ ("Contains('foo')", Contains('foo')),
+ ]
+
+ describe_examples = [("1 not in 2", 2, Contains(1))]
+
+
+class DoesNotStartWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_describe(self):
+ mismatch = DoesNotStartWith("fo", "bo")
+ self.assertEqual("'fo' does not start with 'bo'.", mismatch.describe())
+
+ def test_describe_non_ascii_unicode(self):
+ string = _u("A\xA7")
+ suffix = _u("B\xA7")
+ mismatch = DoesNotStartWith(string, suffix)
+ self.assertEqual("%s does not start with %s." % (
+ text_repr(string), text_repr(suffix)),
+ mismatch.describe())
+
+ def test_describe_non_ascii_bytes(self):
+ string = _b("A\xA7")
+ suffix = _b("B\xA7")
+ mismatch = DoesNotStartWith(string, suffix)
+ self.assertEqual("%r does not start with %r." % (string, suffix),
+ mismatch.describe())
+
+
+class StartsWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_str(self):
+ matcher = StartsWith("bar")
+ self.assertEqual("StartsWith('bar')", str(matcher))
+
+ def test_str_with_bytes(self):
+ b = _b("\xA7")
+ matcher = StartsWith(b)
+ self.assertEqual("StartsWith(%r)" % (b,), str(matcher))
+
+ def test_str_with_unicode(self):
+ u = _u("\xA7")
+ matcher = StartsWith(u)
+ self.assertEqual("StartsWith(%r)" % (u,), str(matcher))
+
+ def test_match(self):
+ matcher = StartsWith("bar")
+ self.assertIs(None, matcher.match("barf"))
+
+ def test_mismatch_returns_does_not_start_with(self):
+ matcher = StartsWith("bar")
+ self.assertIsInstance(matcher.match("foo"), DoesNotStartWith)
+
+ def test_mismatch_sets_matchee(self):
+ matcher = StartsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("foo", mismatch.matchee)
+
+ def test_mismatch_sets_expected(self):
+ matcher = StartsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("bar", mismatch.expected)
+
+
+class DoesNotEndWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_describe(self):
+ mismatch = DoesNotEndWith("fo", "bo")
+ self.assertEqual("'fo' does not end with 'bo'.", mismatch.describe())
+
+ def test_describe_non_ascii_unicode(self):
+ string = _u("A\xA7")
+ suffix = _u("B\xA7")
+ mismatch = DoesNotEndWith(string, suffix)
+ self.assertEqual("%s does not end with %s." % (
+ text_repr(string), text_repr(suffix)),
+ mismatch.describe())
+
+ def test_describe_non_ascii_bytes(self):
+ string = _b("A\xA7")
+ suffix = _b("B\xA7")
+ mismatch = DoesNotEndWith(string, suffix)
+ self.assertEqual("%r does not end with %r." % (string, suffix),
+ mismatch.describe())
+
+
+class EndsWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_str(self):
+ matcher = EndsWith("bar")
+ self.assertEqual("EndsWith('bar')", str(matcher))
+
+ def test_str_with_bytes(self):
+ b = _b("\xA7")
+ matcher = EndsWith(b)
+ self.assertEqual("EndsWith(%r)" % (b,), str(matcher))
+
+ def test_str_with_unicode(self):
+ u = _u("\xA7")
+ matcher = EndsWith(u)
+ self.assertEqual("EndsWith(%r)" % (u,), str(matcher))
+
+ def test_match(self):
+ matcher = EndsWith("arf")
+ self.assertIs(None, matcher.match("barf"))
+
+ def test_mismatch_returns_does_not_end_with(self):
+ matcher = EndsWith("bar")
+ self.assertIsInstance(matcher.match("foo"), DoesNotEndWith)
+
+ def test_mismatch_sets_matchee(self):
+ matcher = EndsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("foo", mismatch.matchee)
+
+ def test_mismatch_sets_expected(self):
+ matcher = EndsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("bar", mismatch.expected)
+
+
+class TestSameMembers(TestCase, TestMatchersInterface):
+
+ matches_matcher = SameMembers([1, 1, 2, 3, {'foo': 'bar'}])
+ matches_matches = [
+ [1, 1, 2, 3, {'foo': 'bar'}],
+ [3, {'foo': 'bar'}, 1, 2, 1],
+ [3, 2, 1, {'foo': 'bar'}, 1],
+ (2, {'foo': 'bar'}, 3, 1, 1),
+ ]
+ matches_mismatches = [
+ set([1, 2, 3]),
+ [1, 1, 2, 3, 5],
+ [1, 2, 3, {'foo': 'bar'}],
+ 'foo',
+ ]
+
+ describe_examples = [
+ (("elements differ:\n"
+ "reference = ['apple', 'orange', 'canteloupe', 'watermelon', 'lemon', 'banana']\n"
+ "actual = ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe']\n"
+ ": \n"
+ "missing: ['watermelon']\n"
+ "extra: ['sparrow']"
+ ),
+ ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe',],
+ SameMembers(
+ ['apple', 'orange', 'canteloupe', 'watermelon',
+ 'lemon', 'banana',])),
+ ]
+
+ str_examples = [
+ ('SameMembers([1, 2, 3])', SameMembers([1, 2, 3])),
+ ]
+
+
+class TestMatchesRegex(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesRegex('a|b')
+ matches_matches = ['a', 'b']
+ matches_mismatches = ['c']
+
+ str_examples = [
+ ("MatchesRegex('a|b')", MatchesRegex('a|b')),
+ ("MatchesRegex('a|b', re.M)", MatchesRegex('a|b', re.M)),
+ ("MatchesRegex('a|b', re.I|re.M)", MatchesRegex('a|b', re.I|re.M)),
+ ("MatchesRegex(%r)" % (_b("\xA7"),), MatchesRegex(_b("\xA7"))),
+ ("MatchesRegex(%r)" % (_u("\xA7"),), MatchesRegex(_u("\xA7"))),
+ ]
+
+ describe_examples = [
+ ("'c' does not match /a|b/", 'c', MatchesRegex('a|b')),
+ ("'c' does not match /a\d/", 'c', MatchesRegex(r'a\d')),
+ ("%r does not match /\\s+\\xa7/" % (_b('c'),),
+ _b('c'), MatchesRegex(_b("\\s+\xA7"))),
+ ("%r does not match /\\s+\\xa7/" % (_u('c'),),
+ _u('c'), MatchesRegex(_u("\\s+\xA7"))),
+ ]
+
+
+class TestHasLength(TestCase, TestMatchersInterface):
+
+ matches_matcher = HasLength(2)
+ matches_matches = [[1, 2]]
+ matches_mismatches = [[], [1], [3, 2, 1]]
+
+ str_examples = [
+ ("HasLength(2)", HasLength(2)),
+ ]
+
+ describe_examples = [
+ ("len([]) != 1", [], HasLength(1)),
+ ]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py
new file mode 100644
index 00000000000..f6d9d8658c8
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py
@@ -0,0 +1,209 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import doctest
+import re
+import sys
+
+from testtools import TestCase
+from testtools.compat import StringIO
+from testtools.matchers import (
+ Annotate,
+ Equals,
+ LessThan,
+ MatchesRegex,
+ NotEquals,
+ )
+from testtools.matchers._datastructures import (
+ ContainsAll,
+ MatchesListwise,
+ MatchesStructure,
+ MatchesSetwise,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+def run_doctest(obj, name):
+ p = doctest.DocTestParser()
+ t = p.get_doctest(
+ obj.__doc__, sys.modules[obj.__module__].__dict__, name, '', 0)
+ r = doctest.DocTestRunner()
+ output = StringIO()
+ r.run(t, out=output.write)
+ return r.failures, output.getvalue()
+
+
+class TestMatchesListwise(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_docstring(self):
+ failure_count, output = run_doctest(
+ MatchesListwise, "MatchesListwise")
+ if failure_count:
+ self.fail("Doctest failed with %s" % output)
+
+
+class TestMatchesStructure(TestCase, TestMatchersInterface):
+
+ class SimpleClass:
+ def __init__(self, x, y):
+ self.x = x
+ self.y = y
+
+ matches_matcher = MatchesStructure(x=Equals(1), y=Equals(2))
+ matches_matches = [SimpleClass(1, 2)]
+ matches_mismatches = [
+ SimpleClass(2, 2),
+ SimpleClass(1, 1),
+ SimpleClass(3, 3),
+ ]
+
+ str_examples = [
+ ("MatchesStructure(x=Equals(1))", MatchesStructure(x=Equals(1))),
+ ("MatchesStructure(y=Equals(2))", MatchesStructure(y=Equals(2))),
+ ("MatchesStructure(x=Equals(1), y=Equals(2))",
+ MatchesStructure(x=Equals(1), y=Equals(2))),
+ ]
+
+ describe_examples = [
+ ("""\
+Differences: [
+3 != 1: x
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(3), y=Equals(2))),
+ ("""\
+Differences: [
+3 != 2: y
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(1), y=Equals(3))),
+ ("""\
+Differences: [
+0 != 1: x
+0 != 2: y
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(0), y=Equals(0))),
+ ]
+
+ def test_fromExample(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure.fromExample(self.SimpleClass(1, 3), 'x'))
+
+ def test_byEquality(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure.byEquality(x=1))
+
+ def test_withStructure(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure.byMatcher(LessThan, x=2))
+
+ def test_update(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure(x=NotEquals(1)).update(x=Equals(1)))
+
+ def test_update_none(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure(x=Equals(1), z=NotEquals(42)).update(
+ z=None))
+
+
+class TestMatchesSetwise(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def assertMismatchWithDescriptionMatching(self, value, matcher,
+ description_matcher):
+ mismatch = matcher.match(value)
+ if mismatch is None:
+ self.fail("%s matched %s" % (matcher, value))
+ actual_description = mismatch.describe()
+ self.assertThat(
+ actual_description,
+ Annotate(
+ "%s matching %s" % (matcher, value),
+ description_matcher))
+
+ def test_matches(self):
+ self.assertIs(
+ None, MatchesSetwise(Equals(1), Equals(2)).match([2, 1]))
+
+ def test_mismatches(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex('.*There was 1 mismatch$', re.S))
+
+ def test_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
+ Equals('There was 1 matcher left over: Equals(1)'))
+
+ def test_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [1, 2, 3], MatchesSetwise(Equals(1), Equals(2)),
+ Equals('There was 1 value left over: [3]'))
+
+ def test_two_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
+ MatchesRegex(
+ 'There were 2 matchers left over: Equals\([12]\), '
+ 'Equals\([12]\)'))
+
+ def test_two_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [1, 2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex(
+ 'There were 2 values left over: \[[34], [34]\]'))
+
+ def test_mismatch_and_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3], MatchesSetwise(Equals(0), Equals(1), Equals(2)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 1 extra matcher: Equals\([01]\)',
+ re.S))
+
+ def test_mismatch_and_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 1 extra value: \[[34]\]',
+ re.S))
+
+ def test_mismatch_and_two_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [3, 4], MatchesSetwise(
+ Equals(0), Equals(1), Equals(2), Equals(3)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 2 extra matchers: '
+ 'Equals\([012]\), Equals\([012]\)', re.S))
+
+ def test_mismatch_and_two_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3, 4, 5], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 2 extra values: \[[145], [145]\]',
+ re.S))
+
+
+class TestContainsAllInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = ContainsAll(['foo', 'bar'])
+ matches_matches = [['foo', 'bar'], ['foo', 'z', 'bar'], ['bar', 'foo']]
+ matches_mismatches = [['f', 'g'], ['foo', 'baz'], []]
+
+ str_examples = [(
+ "MatchesAll(Contains('foo'), Contains('bar'))",
+ ContainsAll(['foo', 'bar'])),
+ ]
+
+ describe_examples = [("""Differences: [
+'baz' not in 'foo'
+]""",
+ 'foo', ContainsAll(['foo', 'baz']))]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py
new file mode 100644
index 00000000000..00368dd6ceb
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py
@@ -0,0 +1,227 @@
+from testtools import TestCase
+from testtools.matchers import (
+ Equals,
+ NotEquals,
+ Not,
+ )
+from testtools.matchers._dict import (
+ ContainedByDict,
+ ContainsDict,
+ KeysEqual,
+ MatchesAllDict,
+ MatchesDict,
+ _SubDictOf,
+ )
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class TestMatchesAllDictInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})
+ matches_matches = [3, 4]
+ matches_mismatches = [1, 2]
+
+ str_examples = [
+ ("MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})",
+ matches_matcher)]
+
+ describe_examples = [
+ ("""a: 1 == 1""", 1, matches_matcher),
+ ]
+
+
+class TestKeysEqualWithList(TestCase, TestMatchersInterface):
+
+ matches_matcher = KeysEqual('foo', 'bar')
+ matches_matches = [
+ {'foo': 0, 'bar': 1},
+ ]
+ matches_mismatches = [
+ {},
+ {'foo': 0},
+ {'bar': 1},
+ {'foo': 0, 'bar': 1, 'baz': 2},
+ {'a': None, 'b': None, 'c': None},
+ ]
+
+ str_examples = [
+ ("KeysEqual('foo', 'bar')", KeysEqual('foo', 'bar')),
+ ]
+
+ describe_examples = []
+
+ def test_description(self):
+ matchee = {'foo': 0, 'bar': 1, 'baz': 2}
+ mismatch = KeysEqual('foo', 'bar').match(matchee)
+ description = mismatch.describe()
+ self.assertThat(
+ description, Equals(
+ "['bar', 'foo'] does not match %r: Keys not equal"
+ % (matchee,)))
+
+
+class TestKeysEqualWithDict(TestKeysEqualWithList):
+
+ matches_matcher = KeysEqual({'foo': 3, 'bar': 4})
+
+
+class TestSubDictOf(TestCase, TestMatchersInterface):
+
+ matches_matcher = _SubDictOf({'foo': 'bar', 'baz': 'qux'})
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bar'},
+ ]
+
+ matches_mismatches = [
+ {'foo': 'bar', 'baz': 'qux', 'cat': 'dog'},
+ {'foo': 'bar', 'cat': 'dog'},
+ ]
+
+ str_examples = []
+ describe_examples = []
+
+
+class TestMatchesDict(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesDict(
+ {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': None},
+ {'foo': 'bar', 'baz': 'quux'},
+ ]
+ matches_mismatches = [
+ {},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux'},
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+ {'foo': 'bar', 'cat': 'dog'},
+ ]
+
+ str_examples = [
+ ("MatchesDict({'baz': %s, 'foo': %s})" % (
+ Not(Equals('qux')), Equals('bar')),
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ " 'foo': Equals('bar'),\n"
+ "}",
+ {}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ "}",
+ {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ " 'foo': 'bar' != 'bop',\n"
+ "}",
+ {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+ ("Extra: {\n"
+ " 'cat': 'dog',\n"
+ "}",
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'}, matches_matcher),
+ ("Extra: {\n"
+ " 'cat': 'dog',\n"
+ "}\n"
+ "Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ "}",
+ {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+ ]
+
+
+class TestContainsDict(TestCase, TestMatchersInterface):
+
+ matches_matcher = ContainsDict(
+ {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': None},
+ {'foo': 'bar', 'baz': 'quux'},
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+ ]
+ matches_mismatches = [
+ {},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux'},
+ {'foo': 'bar', 'cat': 'dog'},
+ {'foo': 'bar'},
+ ]
+
+ str_examples = [
+ ("ContainsDict({'baz': %s, 'foo': %s})" % (
+ Not(Equals('qux')), Equals('bar')),
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ " 'foo': Equals('bar'),\n"
+ "}",
+ {}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ "}",
+ {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ " 'foo': 'bar' != 'bop',\n"
+ "}",
+ {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+ ("Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ "}",
+ {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+ ]
+
+
+class TestContainedByDict(TestCase, TestMatchersInterface):
+
+ matches_matcher = ContainedByDict(
+ {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+ matches_matches = [
+ {},
+ {'foo': 'bar'},
+ {'foo': 'bar', 'baz': 'quux'},
+ {'baz': 'quux'},
+ ]
+ matches_mismatches = [
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux'},
+ {'foo': 'bar', 'cat': 'dog'},
+ ]
+
+ str_examples = [
+ ("ContainedByDict({'baz': %s, 'foo': %s})" % (
+ Not(Equals('qux')), Equals('bar')),
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ "}",
+ {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ " 'foo': 'bar' != 'bop',\n"
+ "}",
+ {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+ ("Extra: {\n"
+ " 'cat': 'dog',\n"
+ "}",
+ {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+ ]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py
new file mode 100644
index 00000000000..81b9579dbf0
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import doctest
+
+from testtools import TestCase
+from testtools.compat import (
+ str_is_unicode,
+ _b,
+ _u,
+ )
+from testtools.matchers._doctest import DocTestMatches
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+
+class TestDocTestMatchesInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS)
+ matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"]
+ matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"]
+
+ str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')",
+ DocTestMatches("Ran 1 test in ...s")),
+ ("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)),
+ ]
+
+ describe_examples = [('Expected:\n Ran 1 tests in ...s\nGot:\n'
+ ' Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s",
+ DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))]
+
+
+class TestDocTestMatchesInterfaceUnicode(TestCase, TestMatchersInterface):
+
+ matches_matcher = DocTestMatches(_u("\xa7..."), doctest.ELLIPSIS)
+ matches_matches = [_u("\xa7"), _u("\xa7 more\n")]
+ matches_mismatches = ["\\xa7", _u("more \xa7"), _u("\n\xa7")]
+
+ str_examples = [("DocTestMatches(%r)" % (_u("\xa7\n"),),
+ DocTestMatches(_u("\xa7"))),
+ ]
+
+ describe_examples = [(
+ _u("Expected:\n \xa7\nGot:\n a\n"),
+ "a",
+ DocTestMatches(_u("\xa7"), doctest.ELLIPSIS))]
+
+
+class TestDocTestMatchesSpecific(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test___init__simple(self):
+ matcher = DocTestMatches("foo")
+ self.assertEqual("foo\n", matcher.want)
+
+ def test___init__flags(self):
+ matcher = DocTestMatches("bar\n", doctest.ELLIPSIS)
+ self.assertEqual("bar\n", matcher.want)
+ self.assertEqual(doctest.ELLIPSIS, matcher.flags)
+
+ def test_describe_non_ascii_bytes(self):
+ """Even with bytestrings, the mismatch should be coercible to unicode
+
+ DocTestMatches is intended for text, but the Python 2 str type also
+ permits arbitrary binary inputs. This is a slightly bogus thing to do,
+ and under Python 3 using bytes objects will reasonably raise an error.
+ """
+ header = _b("\x89PNG\r\n\x1a\n...")
+ if str_is_unicode:
+ self.assertRaises(TypeError,
+ DocTestMatches, header, doctest.ELLIPSIS)
+ return
+ matcher = DocTestMatches(header, doctest.ELLIPSIS)
+ mismatch = matcher.match(_b("GIF89a\1\0\1\0\0\0\0;"))
+ # Must be treatable as unicode text, the exact output matters less
+ self.assertTrue(unicode(mismatch.describe()))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py
new file mode 100644
index 00000000000..ef7185f19a4
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py
@@ -0,0 +1,192 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import sys
+
+from testtools import TestCase
+from testtools.matchers import (
+ AfterPreprocessing,
+ Equals,
+ )
+from testtools.matchers._exception import (
+ MatchesException,
+ Raises,
+ raises,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+def make_error(type, *args, **kwargs):
+ try:
+ raise type(*args, **kwargs)
+ except type:
+ return sys.exc_info()
+
+
+class TestMatchesExceptionInstanceInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(ValueError("foo"))
+ error_foo = make_error(ValueError, 'foo')
+ error_bar = make_error(ValueError, 'bar')
+ error_base_foo = make_error(Exception, 'foo')
+ matches_matches = [error_foo]
+ matches_mismatches = [error_bar, error_base_foo]
+
+ str_examples = [
+ ("MatchesException(Exception('foo',))",
+ MatchesException(Exception('foo')))
+ ]
+ describe_examples = [
+ ("%r is not a %r" % (Exception, ValueError),
+ error_base_foo,
+ MatchesException(ValueError("foo"))),
+ ("ValueError('bar',) has different arguments to ValueError('foo',).",
+ error_bar,
+ MatchesException(ValueError("foo"))),
+ ]
+
+
+class TestMatchesExceptionTypeInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(ValueError)
+ error_foo = make_error(ValueError, 'foo')
+ error_sub = make_error(UnicodeError, 'bar')
+ error_base_foo = make_error(Exception, 'foo')
+ matches_matches = [error_foo, error_sub]
+ matches_mismatches = [error_base_foo]
+
+ str_examples = [
+ ("MatchesException(%r)" % Exception,
+ MatchesException(Exception))
+ ]
+ describe_examples = [
+ ("%r is not a %r" % (Exception, ValueError),
+ error_base_foo,
+ MatchesException(ValueError)),
+ ]
+
+
+class TestMatchesExceptionTypeReInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(ValueError, 'fo.')
+ error_foo = make_error(ValueError, 'foo')
+ error_sub = make_error(UnicodeError, 'foo')
+ error_bar = make_error(ValueError, 'bar')
+ matches_matches = [error_foo, error_sub]
+ matches_mismatches = [error_bar]
+
+ str_examples = [
+ ("MatchesException(%r)" % Exception,
+ MatchesException(Exception, 'fo.'))
+ ]
+ describe_examples = [
+ ("'bar' does not match /fo./",
+ error_bar, MatchesException(ValueError, "fo.")),
+ ]
+
+
+class TestMatchesExceptionTypeMatcherInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(
+ ValueError, AfterPreprocessing(str, Equals('foo')))
+ error_foo = make_error(ValueError, 'foo')
+ error_sub = make_error(UnicodeError, 'foo')
+ error_bar = make_error(ValueError, 'bar')
+ matches_matches = [error_foo, error_sub]
+ matches_mismatches = [error_bar]
+
+ str_examples = [
+ ("MatchesException(%r)" % Exception,
+ MatchesException(Exception, Equals('foo')))
+ ]
+ describe_examples = [
+ ("5 != %r" % (error_bar[1],),
+ error_bar, MatchesException(ValueError, Equals(5))),
+ ]
+
+
+class TestRaisesInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Raises()
+ def boom():
+ raise Exception('foo')
+ matches_matches = [boom]
+ matches_mismatches = [lambda:None]
+
+ # Tricky to get function objects to render constantly, and the interfaces
+ # helper uses assertEqual rather than (for instance) DocTestMatches.
+ str_examples = []
+
+ describe_examples = []
+
+
+class TestRaisesExceptionMatcherInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Raises(
+ exception_matcher=MatchesException(Exception('foo')))
+ def boom_bar():
+ raise Exception('bar')
+ def boom_foo():
+ raise Exception('foo')
+ matches_matches = [boom_foo]
+ matches_mismatches = [lambda:None, boom_bar]
+
+ # Tricky to get function objects to render constantly, and the interfaces
+ # helper uses assertEqual rather than (for instance) DocTestMatches.
+ str_examples = []
+
+ describe_examples = []
+
+
+class TestRaisesBaseTypes(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def raiser(self):
+ raise KeyboardInterrupt('foo')
+
+ def test_KeyboardInterrupt_matched(self):
+ # When KeyboardInterrupt is matched, it is swallowed.
+ matcher = Raises(MatchesException(KeyboardInterrupt))
+ self.assertThat(self.raiser, matcher)
+
+ def test_KeyboardInterrupt_propogates(self):
+ # The default 'it raised' propogates KeyboardInterrupt.
+ match_keyb = Raises(MatchesException(KeyboardInterrupt))
+ def raise_keyb_from_match():
+ matcher = Raises()
+ matcher.match(self.raiser)
+ self.assertThat(raise_keyb_from_match, match_keyb)
+
+ def test_KeyboardInterrupt_match_Exception_propogates(self):
+ # If the raised exception isn't matched, and it is not a subclass of
+ # Exception, it is propogated.
+ match_keyb = Raises(MatchesException(KeyboardInterrupt))
+ def raise_keyb_from_match():
+ if sys.version_info > (2, 5):
+ matcher = Raises(MatchesException(Exception))
+ else:
+ # On Python 2.4 KeyboardInterrupt is a StandardError subclass
+ # but should propogate from less generic exception matchers
+ matcher = Raises(MatchesException(EnvironmentError))
+ matcher.match(self.raiser)
+ self.assertThat(raise_keyb_from_match, match_keyb)
+
+
+class TestRaisesConvenience(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_exc_type(self):
+ self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
+
+ def test_exc_value(self):
+ e = RuntimeError("You lose!")
+ def raiser():
+ raise e
+ self.assertThat(raiser, raises(e))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py
new file mode 100644
index 00000000000..917ff2ed058
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py
@@ -0,0 +1,243 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import os
+import shutil
+import tarfile
+import tempfile
+
+from testtools import TestCase
+from testtools.matchers import (
+ Contains,
+ DocTestMatches,
+ Equals,
+ )
+from testtools.matchers._filesystem import (
+ DirContains,
+ DirExists,
+ FileContains,
+ FileExists,
+ HasPermissions,
+ PathExists,
+ SamePath,
+ TarballContains,
+ )
+
+
+class PathHelpers(object):
+
+ def mkdtemp(self):
+ directory = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, directory)
+ return directory
+
+ def create_file(self, filename, contents=''):
+ fp = open(filename, 'w')
+ try:
+ fp.write(contents)
+ finally:
+ fp.close()
+
+ def touch(self, filename):
+ return self.create_file(filename)
+
+
+class TestPathExists(TestCase, PathHelpers):
+
+ def test_exists(self):
+ tempdir = self.mkdtemp()
+ self.assertThat(tempdir, PathExists())
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = PathExists().match(doesntexist)
+ self.assertThat(
+ "%s does not exist." % doesntexist, Equals(mismatch.describe()))
+
+
+class TestDirExists(TestCase, PathHelpers):
+
+ def test_exists(self):
+ tempdir = self.mkdtemp()
+ self.assertThat(tempdir, DirExists())
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = DirExists().match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_not_a_directory(self):
+ filename = os.path.join(self.mkdtemp(), 'foo')
+ self.touch(filename)
+ mismatch = DirExists().match(filename)
+ self.assertThat(
+ "%s is not a directory." % filename, Equals(mismatch.describe()))
+
+
+class TestFileExists(TestCase, PathHelpers):
+
+ def test_exists(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'filename')
+ self.touch(filename)
+ self.assertThat(filename, FileExists())
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = FileExists().match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_not_a_file(self):
+ tempdir = self.mkdtemp()
+ mismatch = FileExists().match(tempdir)
+ self.assertThat(
+ "%s is not a file." % tempdir, Equals(mismatch.describe()))
+
+
+class TestDirContains(TestCase, PathHelpers):
+
+ def test_empty(self):
+ tempdir = self.mkdtemp()
+ self.assertThat(tempdir, DirContains([]))
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = DirContains([]).match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_contains_files(self):
+ tempdir = self.mkdtemp()
+ self.touch(os.path.join(tempdir, 'foo'))
+ self.touch(os.path.join(tempdir, 'bar'))
+ self.assertThat(tempdir, DirContains(['bar', 'foo']))
+
+ def test_matcher(self):
+ tempdir = self.mkdtemp()
+ self.touch(os.path.join(tempdir, 'foo'))
+ self.touch(os.path.join(tempdir, 'bar'))
+ self.assertThat(tempdir, DirContains(matcher=Contains('bar')))
+
+ def test_neither_specified(self):
+ self.assertRaises(AssertionError, DirContains)
+
+ def test_both_specified(self):
+ self.assertRaises(
+ AssertionError, DirContains, filenames=[], matcher=Contains('a'))
+
+ def test_does_not_contain_files(self):
+ tempdir = self.mkdtemp()
+ self.touch(os.path.join(tempdir, 'foo'))
+ mismatch = DirContains(['bar', 'foo']).match(tempdir)
+ self.assertThat(
+ Equals(['bar', 'foo']).match(['foo']).describe(),
+ Equals(mismatch.describe()))
+
+
+class TestFileContains(TestCase, PathHelpers):
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = FileContains('').match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_contains(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'foo')
+ self.create_file(filename, 'Hello World!')
+ self.assertThat(filename, FileContains('Hello World!'))
+
+ def test_matcher(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'foo')
+ self.create_file(filename, 'Hello World!')
+ self.assertThat(
+ filename, FileContains(matcher=DocTestMatches('Hello World!')))
+
+ def test_neither_specified(self):
+ self.assertRaises(AssertionError, FileContains)
+
+ def test_both_specified(self):
+ self.assertRaises(
+ AssertionError, FileContains, contents=[], matcher=Contains('a'))
+
+ def test_does_not_contain(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'foo')
+ self.create_file(filename, 'Goodbye Cruel World!')
+ mismatch = FileContains('Hello World!').match(filename)
+ self.assertThat(
+ Equals('Hello World!').match('Goodbye Cruel World!').describe(),
+ Equals(mismatch.describe()))
+class TestTarballContains(TestCase, PathHelpers):
+
+ def test_match(self):
+ tempdir = self.mkdtemp()
+ in_temp_dir = lambda x: os.path.join(tempdir, x)
+ self.touch(in_temp_dir('a'))
+ self.touch(in_temp_dir('b'))
+ tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
+ tarball.add(in_temp_dir('a'), 'a')
+ tarball.add(in_temp_dir('b'), 'b')
+ tarball.close()
+ self.assertThat(
+ in_temp_dir('foo.tar.gz'), TarballContains(['b', 'a']))
+
+ def test_mismatch(self):
+ tempdir = self.mkdtemp()
+ in_temp_dir = lambda x: os.path.join(tempdir, x)
+ self.touch(in_temp_dir('a'))
+ self.touch(in_temp_dir('b'))
+ tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
+ tarball.add(in_temp_dir('a'), 'a')
+ tarball.add(in_temp_dir('b'), 'b')
+ tarball.close()
+ mismatch = TarballContains(['d', 'c']).match(in_temp_dir('foo.tar.gz'))
+ self.assertEqual(
+ mismatch.describe(),
+ Equals(['c', 'd']).match(['a', 'b']).describe())
+
+
+class TestSamePath(TestCase, PathHelpers):
+
+ def test_same_string(self):
+ self.assertThat('foo', SamePath('foo'))
+
+ def test_relative_and_absolute(self):
+ path = 'foo'
+ abspath = os.path.abspath(path)
+ self.assertThat(path, SamePath(abspath))
+ self.assertThat(abspath, SamePath(path))
+
+ def test_real_path(self):
+ tempdir = self.mkdtemp()
+ source = os.path.join(tempdir, 'source')
+ self.touch(source)
+ target = os.path.join(tempdir, 'target')
+ try:
+ os.symlink(source, target)
+ except (AttributeError, NotImplementedError):
+ self.skip("No symlink support")
+ self.assertThat(source, SamePath(target))
+ self.assertThat(target, SamePath(source))
+
+
+class TestHasPermissions(TestCase, PathHelpers):
+
+ def test_match(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'filename')
+ self.touch(filename)
+ permissions = oct(os.stat(filename).st_mode)[-4:]
+ self.assertThat(filename, HasPermissions(permissions))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py
new file mode 100644
index 00000000000..fb86b7fe2f9
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py
@@ -0,0 +1,254 @@
+# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.matchers import (
+ DocTestMatches,
+ Equals,
+ LessThan,
+ MatchesStructure,
+ Mismatch,
+ NotEquals,
+ )
+from testtools.matchers._higherorder import (
+ AfterPreprocessing,
+ AllMatch,
+ Annotate,
+ AnnotatedMismatch,
+ AnyMatch,
+ MatchesAny,
+ MatchesAll,
+ MatchesPredicate,
+ MatchesPredicateWithParams,
+ Not,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class TestAllMatch(TestCase, TestMatchersInterface):
+
+ matches_matcher = AllMatch(LessThan(10))
+ matches_matches = [
+ [9, 9, 9],
+ (9, 9),
+ iter([9, 9, 9, 9, 9]),
+ ]
+ matches_mismatches = [
+ [11, 9, 9],
+ iter([9, 12, 9, 11]),
+ ]
+
+ str_examples = [
+ ("AllMatch(LessThan(12))", AllMatch(LessThan(12))),
+ ]
+
+ describe_examples = [
+ ('Differences: [\n'
+ '10 is not > 11\n'
+ '10 is not > 10\n'
+ ']',
+ [11, 9, 10],
+ AllMatch(LessThan(10))),
+ ]
+
+
+class TestAnyMatch(TestCase, TestMatchersInterface):
+
+ matches_matcher = AnyMatch(Equals('elephant'))
+ matches_matches = [
+ ['grass', 'cow', 'steak', 'milk', 'elephant'],
+ (13, 'elephant'),
+ ['elephant', 'elephant', 'elephant'],
+ set(['hippo', 'rhino', 'elephant']),
+ ]
+ matches_mismatches = [
+ [],
+ ['grass', 'cow', 'steak', 'milk'],
+ (13, 12, 10),
+ ['element', 'hephalump', 'pachyderm'],
+ set(['hippo', 'rhino', 'diplodocus']),
+ ]
+
+ str_examples = [
+ ("AnyMatch(Equals('elephant'))", AnyMatch(Equals('elephant'))),
+ ]
+
+ describe_examples = [
+ ('Differences: [\n'
+ '7 != 11\n'
+ '7 != 9\n'
+ '7 != 10\n'
+ ']',
+ [11, 9, 10],
+ AnyMatch(Equals(7))),
+ ]
+
+
+class TestAfterPreprocessing(TestCase, TestMatchersInterface):
+
+ def parity(x):
+ return x % 2
+
+ matches_matcher = AfterPreprocessing(parity, Equals(1))
+ matches_matches = [3, 5]
+ matches_mismatches = [2]
+
+ str_examples = [
+ ("AfterPreprocessing(<function parity>, Equals(1))",
+ AfterPreprocessing(parity, Equals(1))),
+ ]
+
+ describe_examples = [
+ ("1 != 0: after <function parity> on 2", 2,
+ AfterPreprocessing(parity, Equals(1))),
+ ("1 != 0", 2,
+ AfterPreprocessing(parity, Equals(1), annotate=False)),
+ ]
+
+class TestMatchersAnyInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2"))
+ matches_matches = ["1", "2"]
+ matches_mismatches = ["3"]
+
+ str_examples = [(
+ "MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))",
+ MatchesAny(DocTestMatches("1"), DocTestMatches("2"))),
+ ]
+
+ describe_examples = [("""Differences: [
+Expected:
+ 1
+Got:
+ 3
+
+Expected:
+ 2
+Got:
+ 3
+
+]""",
+ "3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))]
+
+
+class TestMatchesAllInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAll(NotEquals(1), NotEquals(2))
+ matches_matches = [3, 4]
+ matches_mismatches = [1, 2]
+
+ str_examples = [
+ ("MatchesAll(NotEquals(1), NotEquals(2))",
+ MatchesAll(NotEquals(1), NotEquals(2)))]
+
+ describe_examples = [
+ ("""Differences: [
+1 == 1
+]""",
+ 1, MatchesAll(NotEquals(1), NotEquals(2))),
+ ("1 == 1", 1,
+ MatchesAll(NotEquals(2), NotEquals(1), Equals(3), first_only=True)),
+ ]
+
+
+class TestAnnotate(TestCase, TestMatchersInterface):
+
+ matches_matcher = Annotate("foo", Equals(1))
+ matches_matches = [1]
+ matches_mismatches = [2]
+
+ str_examples = [
+ ("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))]
+
+ describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))]
+
+ def test_if_message_no_message(self):
+ # Annotate.if_message returns the given matcher if there is no
+ # message.
+ matcher = Equals(1)
+ not_annotated = Annotate.if_message('', matcher)
+ self.assertIs(matcher, not_annotated)
+
+ def test_if_message_given_message(self):
+ # Annotate.if_message returns an annotated version of the matcher if a
+ # message is provided.
+ matcher = Equals(1)
+ expected = Annotate('foo', matcher)
+ annotated = Annotate.if_message('foo', matcher)
+ self.assertThat(
+ annotated,
+ MatchesStructure.fromExample(expected, 'annotation', 'matcher'))
+
+
+class TestAnnotatedMismatch(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_forwards_details(self):
+ x = Mismatch('description', {'foo': 'bar'})
+ annotated = AnnotatedMismatch("annotation", x)
+ self.assertEqual(x.get_details(), annotated.get_details())
+
+
+class TestNotInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Not(Equals(1))
+ matches_matches = [2]
+ matches_mismatches = [1]
+
+ str_examples = [
+ ("Not(Equals(1))", Not(Equals(1))),
+ ("Not(Equals('1'))", Not(Equals('1')))]
+
+ describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))]
+
+
+def is_even(x):
+ return x % 2 == 0
+
+
+class TestMatchesPredicate(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesPredicate(is_even, "%s is not even")
+ matches_matches = [2, 4, 6, 8]
+ matches_mismatches = [3, 5, 7, 9]
+
+ str_examples = [
+ ("MatchesPredicate(%r, %r)" % (is_even, "%s is not even"),
+ MatchesPredicate(is_even, "%s is not even")),
+ ]
+
+ describe_examples = [
+ ('7 is not even', 7, MatchesPredicate(is_even, "%s is not even")),
+ ]
+
+
+def between(x, low, high):
+ return low < x < high
+
+
+class TestMatchesPredicateWithParams(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}")(1, 9)
+ matches_matches = [2, 4, 6, 8]
+ matches_mismatches = [0, 1, 9, 10]
+
+ str_examples = [
+ ("MatchesPredicateWithParams(%r, %r)(%s)" % (
+ between, "{0} is not between {1} and {2}", "1, 2"),
+ MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}")(1, 2)),
+ ("Between(1, 2)", MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}", "Between")(1, 2)),
+ ]
+
+ describe_examples = [
+ ('1 is not between 2 and 3', 1, MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}")(2, 3)),
+ ]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py
new file mode 100644
index 00000000000..10967ead25b
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py
@@ -0,0 +1,132 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Tests for matchers."""
+
+from testtools import (
+ Matcher, # check that Matcher is exposed at the top level for docs.
+ TestCase,
+ )
+from testtools.compat import (
+ str_is_unicode,
+ text_repr,
+ _u,
+ )
+from testtools.matchers import (
+ Equals,
+ MatchesException,
+ Raises,
+ )
+from testtools.matchers._impl import (
+ Mismatch,
+ MismatchDecorator,
+ MismatchError,
+ )
+from testtools.tests.helpers import FullStackRunTest
+
+# Silence pyflakes.
+Matcher
+
+
+class TestMismatch(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_constructor_arguments(self):
+ mismatch = Mismatch("some description", {'detail': "things"})
+ self.assertEqual("some description", mismatch.describe())
+ self.assertEqual({'detail': "things"}, mismatch.get_details())
+
+ def test_constructor_no_arguments(self):
+ mismatch = Mismatch()
+ self.assertThat(mismatch.describe,
+ Raises(MatchesException(NotImplementedError)))
+ self.assertEqual({}, mismatch.get_details())
+
+
+class TestMismatchError(TestCase):
+
+ def test_is_assertion_error(self):
+ # MismatchError is an AssertionError, so that most of the time, it
+ # looks like a test failure, rather than an error.
+ def raise_mismatch_error():
+ raise MismatchError(2, Equals(3), Equals(3).match(2))
+ self.assertRaises(AssertionError, raise_mismatch_error)
+
+ def test_default_description_is_mismatch(self):
+ mismatch = Equals(3).match(2)
+ e = MismatchError(2, Equals(3), mismatch)
+ self.assertEqual(mismatch.describe(), str(e))
+
+ def test_default_description_unicode(self):
+ matchee = _u('\xa7')
+ matcher = Equals(_u('a'))
+ mismatch = matcher.match(matchee)
+ e = MismatchError(matchee, matcher, mismatch)
+ self.assertEqual(mismatch.describe(), str(e))
+
+ def test_verbose_description(self):
+ matchee = 2
+ matcher = Equals(3)
+ mismatch = matcher.match(2)
+ e = MismatchError(matchee, matcher, mismatch, True)
+ expected = (
+ 'Match failed. Matchee: %r\n'
+ 'Matcher: %s\n'
+ 'Difference: %s\n' % (
+ matchee,
+ matcher,
+ matcher.match(matchee).describe(),
+ ))
+ self.assertEqual(expected, str(e))
+
+ def test_verbose_unicode(self):
+ # When assertThat is given matchees or matchers that contain non-ASCII
+ # unicode strings, we can still provide a meaningful error.
+ matchee = _u('\xa7')
+ matcher = Equals(_u('a'))
+ mismatch = matcher.match(matchee)
+ expected = (
+ 'Match failed. Matchee: %s\n'
+ 'Matcher: %s\n'
+ 'Difference: %s\n' % (
+ text_repr(matchee),
+ matcher,
+ mismatch.describe(),
+ ))
+ e = MismatchError(matchee, matcher, mismatch, True)
+ if str_is_unicode:
+ actual = str(e)
+ else:
+ actual = unicode(e)
+ # Using str() should still work, and return ascii only
+ self.assertEqual(
+ expected.replace(matchee, matchee.encode("unicode-escape")),
+ str(e).decode("ascii"))
+ self.assertEqual(expected, actual)
+
+
+class TestMismatchDecorator(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_forwards_description(self):
+ x = Mismatch("description", {'foo': 'bar'})
+ decorated = MismatchDecorator(x)
+ self.assertEqual(x.describe(), decorated.describe())
+
+ def test_forwards_details(self):
+ x = Mismatch("description", {'foo': 'bar'})
+ decorated = MismatchDecorator(x)
+ self.assertEqual(x.get_details(), decorated.get_details())
+
+ def test_repr(self):
+ x = Mismatch("description", {'foo': 'bar'})
+ decorated = MismatchDecorator(x)
+ self.assertEqual(
+ '<testtools.matchers.MismatchDecorator(%r)>' % (x,),
+ repr(decorated))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py
new file mode 100644
index 00000000000..84e57be472c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py
@@ -0,0 +1,603 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Tests for miscellaneous compatibility functions"""
+
+import io
+import linecache
+import os
+import sys
+import tempfile
+import traceback
+
+import testtools
+
+from testtools.compat import (
+ _b,
+ _detect_encoding,
+ _format_exc_info,
+ _format_exception_only,
+ _format_stack_list,
+ _get_source_encoding,
+ _u,
+ reraise,
+ str_is_unicode,
+ text_repr,
+ unicode_output_stream,
+ )
+from testtools.matchers import (
+ Equals,
+ Is,
+ IsInstance,
+ MatchesException,
+ Not,
+ Raises,
+ )
+
+
+class TestDetectEncoding(testtools.TestCase):
+ """Test detection of Python source encodings"""
+
+ def _check_encoding(self, expected, lines, possibly_invalid=False):
+ """Check lines are valid Python and encoding is as expected"""
+ if not possibly_invalid:
+ compile(_b("".join(lines)), "<str>", "exec")
+ encoding = _detect_encoding(lines)
+ self.assertEqual(expected, encoding,
+ "Encoding %r expected but got %r from lines %r" %
+ (expected, encoding, lines))
+
+ def test_examples_from_pep(self):
+ """Check the examples given in PEP 263 all work as specified
+
+ See 'Examples' section of <http://www.python.org/dev/peps/pep-0263/>
+ """
+ # With interpreter binary and using Emacs style file encoding comment:
+ self._check_encoding("latin-1", (
+ "#!/usr/bin/python\n",
+ "# -*- coding: latin-1 -*-\n",
+ "import os, sys\n"))
+ self._check_encoding("iso-8859-15", (
+ "#!/usr/bin/python\n",
+ "# -*- coding: iso-8859-15 -*-\n",
+ "import os, sys\n"))
+ self._check_encoding("ascii", (
+ "#!/usr/bin/python\n",
+ "# -*- coding: ascii -*-\n",
+ "import os, sys\n"))
+ # Without interpreter line, using plain text:
+ self._check_encoding("utf-8", (
+ "# This Python file uses the following encoding: utf-8\n",
+ "import os, sys\n"))
+ # Text editors might have different ways of defining the file's
+ # encoding, e.g.
+ self._check_encoding("latin-1", (
+ "#!/usr/local/bin/python\n",
+ "# coding: latin-1\n",
+ "import os, sys\n"))
+ # Without encoding comment, Python's parser will assume ASCII text:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "import os, sys\n"))
+ # Encoding comments which don't work:
+ # Missing "coding:" prefix:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "# latin-1\n",
+ "import os, sys\n"))
+ # Encoding comment not on line 1 or 2:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "#\n",
+ "# -*- coding: latin-1 -*-\n",
+ "import os, sys\n"))
+ # Unsupported encoding:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "# -*- coding: utf-42 -*-\n",
+ "import os, sys\n"),
+ possibly_invalid=True)
+
+ def test_bom(self):
+ """Test the UTF-8 BOM counts as an encoding declaration"""
+ self._check_encoding("utf-8", (
+ "\xef\xbb\xbfimport sys\n",
+ ))
+ self._check_encoding("utf-8", (
+ "\xef\xbb\xbf# File encoding: utf-8\n",
+ ))
+ self._check_encoding("utf-8", (
+ '\xef\xbb\xbf"""Module docstring\n',
+ '\xef\xbb\xbfThat should just be a ZWNB"""\n'))
+ self._check_encoding("latin-1", (
+ '"""Is this coding: latin-1 or coding: utf-8 instead?\n',
+ '\xef\xbb\xbfThose should be latin-1 bytes"""\n'))
+ self._check_encoding("utf-8", (
+ "\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n",
+ '"""Module docstring say \xe2\x98\x86"""\n'),
+ possibly_invalid=True)
+
+ def test_multiple_coding_comments(self):
+ """Test only the first of multiple coding declarations counts"""
+ self._check_encoding("iso-8859-1", (
+ "# Is the coding: iso-8859-1\n",
+ "# Or is it coding: iso-8859-2\n"),
+ possibly_invalid=True)
+ self._check_encoding("iso-8859-1", (
+ "#!/usr/bin/python\n",
+ "# Is the coding: iso-8859-1\n",
+ "# Or is it coding: iso-8859-2\n"))
+ self._check_encoding("iso-8859-1", (
+ "# Is the coding: iso-8859-1 or coding: iso-8859-2\n",
+ "# Or coding: iso-8859-3 or coding: iso-8859-4\n"),
+ possibly_invalid=True)
+ self._check_encoding("iso-8859-2", (
+ "# Is the coding iso-8859-1 or coding: iso-8859-2\n",
+ "# Spot the missing colon above\n"))
+
+
+class TestGetSourceEncoding(testtools.TestCase):
+ """Test reading and caching the encodings of source files"""
+
+ def setUp(self):
+ testtools.TestCase.setUp(self)
+ dir = tempfile.mkdtemp()
+ self.addCleanup(os.rmdir, dir)
+ self.filename = os.path.join(dir, self.id().rsplit(".", 1)[1] + ".py")
+ self._written = False
+
+ def put_source(self, text):
+ f = open(self.filename, "w")
+ try:
+ f.write(text)
+ finally:
+ f.close()
+ if not self._written:
+ self._written = True
+ self.addCleanup(os.remove, self.filename)
+ self.addCleanup(linecache.cache.pop, self.filename, None)
+
+ def test_nonexistant_file_as_ascii(self):
+ """When file can't be found, the encoding should default to ascii"""
+ self.assertEquals("ascii", _get_source_encoding(self.filename))
+
+ def test_encoding_is_cached(self):
+ """The encoding should stay the same if the cache isn't invalidated"""
+ self.put_source(
+ "# coding: iso-8859-13\n"
+ "import os\n")
+ self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
+ self.put_source(
+ "# coding: rot-13\n"
+ "vzcbeg bf\n")
+ self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
+
+ def test_traceback_rechecks_encoding(self):
+ """A traceback function checks the cache and resets the encoding"""
+ self.put_source(
+ "# coding: iso-8859-8\n"
+ "import os\n")
+ self.assertEquals("iso-8859-8", _get_source_encoding(self.filename))
+ self.put_source(
+ "# coding: utf-8\n"
+ "import os\n")
+ try:
+ exec (compile("raise RuntimeError\n", self.filename, "exec"))
+ except RuntimeError:
+ traceback.extract_tb(sys.exc_info()[2])
+ else:
+ self.fail("RuntimeError not raised")
+ self.assertEquals("utf-8", _get_source_encoding(self.filename))
+
+
+class _FakeOutputStream(object):
+ """A simple file-like object for testing"""
+
+ def __init__(self):
+ self.writelog = []
+
+ def write(self, obj):
+ self.writelog.append(obj)
+
+
+class TestUnicodeOutputStream(testtools.TestCase):
+ """Test wrapping output streams so they work with arbitrary unicode"""
+
+ uni = _u("pa\u026a\u03b8\u0259n")
+
+ def setUp(self):
+ super(TestUnicodeOutputStream, self).setUp()
+ if sys.platform == "cli":
+ self.skip("IronPython shouldn't wrap streams to do encoding")
+
+ def test_no_encoding_becomes_ascii(self):
+ """A stream with no encoding attribute gets ascii/replace strings"""
+ sout = _FakeOutputStream()
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa???n")], sout.writelog)
+
+ def test_encoding_as_none_becomes_ascii(self):
+ """A stream with encoding value of None gets ascii/replace strings"""
+ sout = _FakeOutputStream()
+ sout.encoding = None
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa???n")], sout.writelog)
+
+ def test_bogus_encoding_becomes_ascii(self):
+ """A stream with a bogus encoding gets ascii/replace strings"""
+ sout = _FakeOutputStream()
+ sout.encoding = "bogus"
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa???n")], sout.writelog)
+
+ def test_partial_encoding_replace(self):
+ """A string which can be partly encoded correctly should be"""
+ sout = _FakeOutputStream()
+ sout.encoding = "iso-8859-7"
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa?\xe8?n")], sout.writelog)
+
+ @testtools.skipIf(str_is_unicode, "Tests behaviour when str is not unicode")
+ def test_unicode_encodings_wrapped_when_str_is_not_unicode(self):
+ """A unicode encoding is wrapped but needs no error handler"""
+ sout = _FakeOutputStream()
+ sout.encoding = "utf-8"
+ uout = unicode_output_stream(sout)
+ self.assertEqual(uout.errors, "strict")
+ uout.write(self.uni)
+ self.assertEqual([_b("pa\xc9\xaa\xce\xb8\xc9\x99n")], sout.writelog)
+
+ @testtools.skipIf(not str_is_unicode, "Tests behaviour when str is unicode")
+ def test_unicode_encodings_not_wrapped_when_str_is_unicode(self):
+ # No wrapping needed if native str type is unicode
+ sout = _FakeOutputStream()
+ sout.encoding = "utf-8"
+ uout = unicode_output_stream(sout)
+ self.assertIs(uout, sout)
+
+ def test_stringio(self):
+ """A StringIO object should maybe get an ascii native str type"""
+ try:
+ from cStringIO import StringIO
+ newio = False
+ except ImportError:
+ from io import StringIO
+ newio = True
+ sout = StringIO()
+ soutwrapper = unicode_output_stream(sout)
+ soutwrapper.write(self.uni)
+ if newio:
+ self.assertEqual(self.uni, sout.getvalue())
+ else:
+ self.assertEqual("pa???n", sout.getvalue())
+
+ def test_io_stringio(self):
+ # io.StringIO only accepts unicode so should be returned as itself.
+ s = io.StringIO()
+ self.assertEqual(s, unicode_output_stream(s))
+
+ def test_io_bytesio(self):
+ # io.BytesIO only accepts bytes so should be wrapped.
+ bytes_io = io.BytesIO()
+ self.assertThat(bytes_io, Not(Is(unicode_output_stream(bytes_io))))
+ # Will error if s was not wrapped properly.
+ unicode_output_stream(bytes_io).write(_u('foo'))
+
+ def test_io_textwrapper(self):
+ # textwrapper is unicode, should be returned as itself.
+ text_io = io.TextIOWrapper(io.BytesIO())
+ self.assertThat(unicode_output_stream(text_io), Is(text_io))
+ # To be sure...
+ unicode_output_stream(text_io).write(_u('foo'))
+
+
+class TestTextRepr(testtools.TestCase):
+ """Ensure in extending repr, basic behaviours are not being broken"""
+
+ ascii_examples = (
+ # Single character examples
+ # C0 control codes should be escaped except multiline \n
+ ("\x00", "'\\x00'", "'''\\\n\\x00'''"),
+ ("\b", "'\\x08'", "'''\\\n\\x08'''"),
+ ("\t", "'\\t'", "'''\\\n\\t'''"),
+ ("\n", "'\\n'", "'''\\\n\n'''"),
+ ("\r", "'\\r'", "'''\\\n\\r'''"),
+ # Quotes and backslash should match normal repr behaviour
+ ('"', "'\"'", "'''\\\n\"'''"),
+ ("'", "\"'\"", "'''\\\n\\''''"),
+ ("\\", "'\\\\'", "'''\\\n\\\\'''"),
+ # DEL is also unprintable and should be escaped
+ ("\x7F", "'\\x7f'", "'''\\\n\\x7f'''"),
+
+ # Character combinations that need double checking
+ ("\r\n", "'\\r\\n'", "'''\\\n\\r\n'''"),
+ ("\"'", "'\"\\''", "'''\\\n\"\\''''"),
+ ("'\"", "'\\'\"'", "'''\\\n'\"'''"),
+ ("\\n", "'\\\\n'", "'''\\\n\\\\n'''"),
+ ("\\\n", "'\\\\\\n'", "'''\\\n\\\\\n'''"),
+ ("\\' ", "\"\\\\' \"", "'''\\\n\\\\' '''"),
+ ("\\'\n", "\"\\\\'\\n\"", "'''\\\n\\\\'\n'''"),
+ ("\\'\"", "'\\\\\\'\"'", "'''\\\n\\\\'\"'''"),
+ ("\\'''", "\"\\\\'''\"", "'''\\\n\\\\\\'\\'\\''''"),
+ )
+
+ # Bytes with the high bit set should always be escaped
+ bytes_examples = (
+ (_b("\x80"), "'\\x80'", "'''\\\n\\x80'''"),
+ (_b("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"),
+ (_b("\xC0"), "'\\xc0'", "'''\\\n\\xc0'''"),
+ (_b("\xFF"), "'\\xff'", "'''\\\n\\xff'''"),
+ (_b("\xC2\xA7"), "'\\xc2\\xa7'", "'''\\\n\\xc2\\xa7'''"),
+ )
+
+ # Unicode doesn't escape printable characters as per the Python 3 model
+ unicode_examples = (
+ # C1 codes are unprintable
+ (_u("\x80"), "'\\x80'", "'''\\\n\\x80'''"),
+ (_u("\x9F"), "'\\x9f'", "'''\\\n\\x9f'''"),
+ # No-break space is unprintable
+ (_u("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"),
+ # Letters latin alphabets are printable
+ (_u("\xA1"), _u("'\xa1'"), _u("'''\\\n\xa1'''")),
+ (_u("\xFF"), _u("'\xff'"), _u("'''\\\n\xff'''")),
+ (_u("\u0100"), _u("'\u0100'"), _u("'''\\\n\u0100'''")),
+ # Line and paragraph seperators are unprintable
+ (_u("\u2028"), "'\\u2028'", "'''\\\n\\u2028'''"),
+ (_u("\u2029"), "'\\u2029'", "'''\\\n\\u2029'''"),
+ # Unpaired surrogates are unprintable
+ (_u("\uD800"), "'\\ud800'", "'''\\\n\\ud800'''"),
+ (_u("\uDFFF"), "'\\udfff'", "'''\\\n\\udfff'''"),
+ # Unprintable general categories not fully tested: Cc, Cf, Co, Cn, Zs
+ )
+
+ b_prefix = repr(_b(""))[:-2]
+ u_prefix = repr(_u(""))[:-2]
+
+ def test_ascii_examples_oneline_bytes(self):
+ for s, expected, _ in self.ascii_examples:
+ b = _b(s)
+ actual = text_repr(b, multiline=False)
+ # Add self.assertIsInstance check?
+ self.assertEqual(actual, self.b_prefix + expected)
+ self.assertEqual(eval(actual), b)
+
+ def test_ascii_examples_oneline_unicode(self):
+ for s, expected, _ in self.ascii_examples:
+ u = _u(s)
+ actual = text_repr(u, multiline=False)
+ self.assertEqual(actual, self.u_prefix + expected)
+ self.assertEqual(eval(actual), u)
+
+ def test_ascii_examples_multiline_bytes(self):
+ for s, _, expected in self.ascii_examples:
+ b = _b(s)
+ actual = text_repr(b, multiline=True)
+ self.assertEqual(actual, self.b_prefix + expected)
+ self.assertEqual(eval(actual), b)
+
+ def test_ascii_examples_multiline_unicode(self):
+ for s, _, expected in self.ascii_examples:
+ u = _u(s)
+ actual = text_repr(u, multiline=True)
+ self.assertEqual(actual, self.u_prefix + expected)
+ self.assertEqual(eval(actual), u)
+
+ def test_ascii_examples_defaultline_bytes(self):
+ for s, one, multi in self.ascii_examples:
+ expected = "\n" in s and multi or one
+ self.assertEqual(text_repr(_b(s)), self.b_prefix + expected)
+
+ def test_ascii_examples_defaultline_unicode(self):
+ for s, one, multi in self.ascii_examples:
+ expected = "\n" in s and multi or one
+ self.assertEqual(text_repr(_u(s)), self.u_prefix + expected)
+
+ def test_bytes_examples_oneline(self):
+ for b, expected, _ in self.bytes_examples:
+ actual = text_repr(b, multiline=False)
+ self.assertEqual(actual, self.b_prefix + expected)
+ self.assertEqual(eval(actual), b)
+
+ def test_bytes_examples_multiline(self):
+ for b, _, expected in self.bytes_examples:
+ actual = text_repr(b, multiline=True)
+ self.assertEqual(actual, self.b_prefix + expected)
+ self.assertEqual(eval(actual), b)
+
+ def test_unicode_examples_oneline(self):
+ for u, expected, _ in self.unicode_examples:
+ actual = text_repr(u, multiline=False)
+ self.assertEqual(actual, self.u_prefix + expected)
+ self.assertEqual(eval(actual), u)
+
+ def test_unicode_examples_multiline(self):
+ for u, _, expected in self.unicode_examples:
+ actual = text_repr(u, multiline=True)
+ self.assertEqual(actual, self.u_prefix + expected)
+ self.assertEqual(eval(actual), u)
+
+
+
+class TestReraise(testtools.TestCase):
+ """Tests for trivial reraise wrapper needed for Python 2/3 changes"""
+
+ def test_exc_info(self):
+ """After reraise exc_info matches plus some extra traceback"""
+ try:
+ raise ValueError("Bad value")
+ except ValueError:
+ _exc_info = sys.exc_info()
+ try:
+ reraise(*_exc_info)
+ except ValueError:
+ _new_exc_info = sys.exc_info()
+ self.assertIs(_exc_info[0], _new_exc_info[0])
+ self.assertIs(_exc_info[1], _new_exc_info[1])
+ expected_tb = traceback.extract_tb(_exc_info[2])
+ self.assertEqual(expected_tb,
+ traceback.extract_tb(_new_exc_info[2])[-len(expected_tb):])
+
+ def test_custom_exception_no_args(self):
+ """Reraising does not require args attribute to contain params"""
+
+ class CustomException(Exception):
+ """Exception that expects and sets attrs but not args"""
+
+ def __init__(self, value):
+ Exception.__init__(self)
+ self.value = value
+
+ try:
+ raise CustomException("Some value")
+ except CustomException:
+ _exc_info = sys.exc_info()
+ self.assertRaises(CustomException, reraise, *_exc_info)
+
+
+class Python2CompatibilityTests(testtools.TestCase):
+
+ def setUp(self):
+ super(Python2CompatibilityTests, self).setUp()
+ if sys.version[0] >= '3':
+ self.skip("These tests are only applicable to python 2.")
+
+
+class TestExceptionFormatting(Python2CompatibilityTests):
+ """Test the _format_exception_only function."""
+
+ def _assert_exception_format(self, eclass, evalue, expected):
+ actual = _format_exception_only(eclass, evalue)
+ self.assertThat(actual, Equals(expected))
+ self.assertThat(''.join(actual), IsInstance(unicode))
+
+ def test_supports_string_exception(self):
+ self._assert_exception_format(
+ "String_Exception",
+ None,
+ [_u("String_Exception\n")]
+ )
+
+ def test_supports_regular_exception(self):
+ self._assert_exception_format(
+ RuntimeError,
+ RuntimeError("Something went wrong"),
+ [_u("RuntimeError: Something went wrong\n")]
+ )
+
+ def test_supports_unprintable_exceptions(self):
+ """Verify support for exception classes that raise an exception when
+ __unicode__ or __str__ is called.
+ """
+ class UnprintableException(Exception):
+
+ def __str__(self):
+ raise Exception()
+
+ def __unicode__(self):
+ raise Exception()
+
+ self._assert_exception_format(
+ UnprintableException,
+ UnprintableException("Foo"),
+ [_u("UnprintableException: <unprintable UnprintableException object>\n")]
+ )
+
+ def test_supports_exceptions_with_no_string_value(self):
+ class NoStringException(Exception):
+
+ def __str__(self):
+ return ""
+
+ def __unicode__(self):
+ return _u("")
+
+ self._assert_exception_format(
+ NoStringException,
+ NoStringException("Foo"),
+ [_u("NoStringException\n")]
+ )
+
+ def test_supports_strange_syntax_error(self):
+ """Test support for syntax errors with unusual number of arguments"""
+ self._assert_exception_format(
+ SyntaxError,
+ SyntaxError("Message"),
+ [_u("SyntaxError: Message\n")]
+ )
+
+ def test_supports_syntax_error(self):
+ self._assert_exception_format(
+ SyntaxError,
+ SyntaxError(
+ "Some Syntax Message",
+ (
+ "/path/to/file",
+ 12,
+ 2,
+ "This is the line of code",
+ )
+ ),
+ [
+ _u(' File "/path/to/file", line 12\n'),
+ _u(' This is the line of code\n'),
+ _u(' ^\n'),
+ _u('SyntaxError: Some Syntax Message\n'),
+ ]
+ )
+
+
+class StackListFormattingTests(Python2CompatibilityTests):
+ """Test the _format_stack_list function."""
+
+ def _assert_stack_format(self, stack_lines, expected_output):
+ actual = _format_stack_list(stack_lines)
+ self.assertThat(actual, Equals([expected_output]))
+
+ def test_single_complete_stack_line(self):
+ stack_lines = [(
+ '/path/to/filename',
+ 12,
+ 'func_name',
+ 'some_code()',
+ )]
+ expected = \
+ _u(' File "/path/to/filename", line 12, in func_name\n' \
+ ' some_code()\n')
+
+ self._assert_stack_format(stack_lines, expected)
+
+ def test_single_stack_line_no_code(self):
+ stack_lines = [(
+ '/path/to/filename',
+ 12,
+ 'func_name',
+ None
+ )]
+ expected = _u(' File "/path/to/filename", line 12, in func_name\n')
+ self._assert_stack_format(stack_lines, expected)
+
+
+class FormatExceptionInfoTests(Python2CompatibilityTests):
+
+ def test_individual_functions_called(self):
+ self.patch(
+ testtools.compat,
+ '_format_stack_list',
+ lambda stack_list: [_u("format stack list called\n")]
+ )
+ self.patch(
+ testtools.compat,
+ '_format_exception_only',
+ lambda etype, evalue: [_u("format exception only called\n")]
+ )
+ result = _format_exc_info(None, None, None)
+ expected = [
+ _u("Traceback (most recent call last):\n"),
+ _u("format stack list called\n"),
+ _u("format exception only called\n"),
+ ]
+ self.assertThat(expected, Equals(result))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py
new file mode 100644
index 00000000000..9ed1b2ffba5
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py
@@ -0,0 +1,349 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import json
+import os
+import tempfile
+import unittest
+
+from testtools import TestCase
+from testtools.compat import (
+ _b,
+ _u,
+ BytesIO,
+ StringIO,
+ )
+from testtools.content import (
+ attach_file,
+ Content,
+ content_from_file,
+ content_from_stream,
+ JSON,
+ json_content,
+ StackLinesContent,
+ StacktraceContent,
+ TracebackContent,
+ text_content,
+ )
+from testtools.content_type import (
+ ContentType,
+ UTF8_TEXT,
+ )
+from testtools.matchers import (
+ Equals,
+ MatchesException,
+ Raises,
+ raises,
+ )
+from testtools.tests.helpers import an_exc_info
+
+
+raises_value_error = Raises(MatchesException(ValueError))
+
+
+class TestContent(TestCase):
+
+ def test___init___None_errors(self):
+ self.assertThat(lambda: Content(None, None), raises_value_error)
+ self.assertThat(
+ lambda: Content(None, lambda: ["traceback"]), raises_value_error)
+ self.assertThat(
+ lambda: Content(ContentType("text", "traceback"), None),
+ raises_value_error)
+
+ def test___init___sets_ivars(self):
+ content_type = ContentType("foo", "bar")
+ content = Content(content_type, lambda: ["bytes"])
+ self.assertEqual(content_type, content.content_type)
+ self.assertEqual(["bytes"], list(content.iter_bytes()))
+
+ def test___eq__(self):
+ content_type = ContentType("foo", "bar")
+ one_chunk = lambda: [_b("bytes")]
+ two_chunk = lambda: [_b("by"), _b("tes")]
+ content1 = Content(content_type, one_chunk)
+ content2 = Content(content_type, one_chunk)
+ content3 = Content(content_type, two_chunk)
+ content4 = Content(content_type, lambda: [_b("by"), _b("te")])
+ content5 = Content(ContentType("f", "b"), two_chunk)
+ self.assertEqual(content1, content2)
+ self.assertEqual(content1, content3)
+ self.assertNotEqual(content1, content4)
+ self.assertNotEqual(content1, content5)
+
+ def test___repr__(self):
+ content = Content(ContentType("application", "octet-stream"),
+ lambda: [_b("\x00bin"), _b("ary\xff")])
+ self.assertIn("\\x00binary\\xff", repr(content))
+
+ def test_iter_text_not_text_errors(self):
+ content_type = ContentType("foo", "bar")
+ content = Content(content_type, lambda: ["bytes"])
+ self.assertThat(content.iter_text, raises_value_error)
+
+ def test_iter_text_decodes(self):
+ content_type = ContentType("text", "strange", {"charset": "utf8"})
+ content = Content(
+ content_type, lambda: [_u("bytes\xea").encode("utf8")])
+ self.assertEqual([_u("bytes\xea")], list(content.iter_text()))
+
+ def test_iter_text_default_charset_iso_8859_1(self):
+ content_type = ContentType("text", "strange")
+ text = _u("bytes\xea")
+ iso_version = text.encode("ISO-8859-1")
+ content = Content(content_type, lambda: [iso_version])
+ self.assertEqual([text], list(content.iter_text()))
+
+ def test_as_text(self):
+ content_type = ContentType("text", "strange", {"charset": "utf8"})
+ content = Content(
+ content_type, lambda: [_u("bytes\xea").encode("utf8")])
+ self.assertEqual(_u("bytes\xea"), content.as_text())
+
+ def test_from_file(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ os.write(fd, _b('some data'))
+ os.close(fd)
+ content = content_from_file(path, UTF8_TEXT, chunk_size=2)
+ self.assertThat(
+ list(content.iter_bytes()),
+ Equals([_b('so'), _b('me'), _b(' d'), _b('at'), _b('a')]))
+
+ def test_from_nonexistent_file(self):
+ directory = tempfile.mkdtemp()
+ nonexistent = os.path.join(directory, 'nonexistent-file')
+ content = content_from_file(nonexistent)
+ self.assertThat(content.iter_bytes, raises(IOError))
+
+ def test_from_file_default_type(self):
+ content = content_from_file('/nonexistent/path')
+ self.assertThat(content.content_type, Equals(UTF8_TEXT))
+
+ def test_from_file_eager_loading(self):
+ fd, path = tempfile.mkstemp()
+ os.write(fd, _b('some data'))
+ os.close(fd)
+ content = content_from_file(path, UTF8_TEXT, buffer_now=True)
+ os.remove(path)
+ self.assertThat(
+ ''.join(content.iter_text()), Equals('some data'))
+
+ def test_from_file_with_simple_seek(self):
+ f = tempfile.NamedTemporaryFile()
+ f.write(_b('some data'))
+ f.flush()
+ self.addCleanup(f.close)
+ content = content_from_file(
+ f.name, UTF8_TEXT, chunk_size=50, seek_offset=5)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
+ def test_from_file_with_whence_seek(self):
+ f = tempfile.NamedTemporaryFile()
+ f.write(_b('some data'))
+ f.flush()
+ self.addCleanup(f.close)
+ content = content_from_file(
+ f.name, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
+ def test_from_stream(self):
+ data = StringIO('some data')
+ content = content_from_stream(data, UTF8_TEXT, chunk_size=2)
+ self.assertThat(
+ list(content.iter_bytes()), Equals(['so', 'me', ' d', 'at', 'a']))
+
+ def test_from_stream_default_type(self):
+ data = StringIO('some data')
+ content = content_from_stream(data)
+ self.assertThat(content.content_type, Equals(UTF8_TEXT))
+
+ def test_from_stream_eager_loading(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ self.addCleanup(os.close, fd)
+ os.write(fd, _b('some data'))
+ stream = open(path, 'rb')
+ self.addCleanup(stream.close)
+ content = content_from_stream(stream, UTF8_TEXT, buffer_now=True)
+ os.write(fd, _b('more data'))
+ self.assertThat(
+ ''.join(content.iter_text()), Equals('some data'))
+
+ def test_from_stream_with_simple_seek(self):
+ data = BytesIO(_b('some data'))
+ content = content_from_stream(
+ data, UTF8_TEXT, chunk_size=50, seek_offset=5)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
+ def test_from_stream_with_whence_seek(self):
+ data = BytesIO(_b('some data'))
+ content = content_from_stream(
+ data, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
+ def test_from_text(self):
+ data = _u("some data")
+ expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')])
+ self.assertEqual(expected, text_content(data))
+
+ def test_json_content(self):
+ data = {'foo': 'bar'}
+ expected = Content(JSON, lambda: [_b('{"foo": "bar"}')])
+ self.assertEqual(expected, json_content(data))
+
+
+class TestStackLinesContent(TestCase):
+
+ def _get_stack_line_and_expected_output(self):
+ stack_lines = [
+ ('/path/to/file', 42, 'some_function', 'print("Hello World")'),
+ ]
+ expected = ' File "/path/to/file", line 42, in some_function\n' \
+ ' print("Hello World")\n'
+ return stack_lines, expected
+
+ def test_single_stack_line(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ actual = StackLinesContent(stack_lines).as_text()
+
+ self.assertEqual(expected, actual)
+
+ def test_prefix_content(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ prefix = self.getUniqueString() + '\n'
+ content = StackLinesContent(stack_lines, prefix_content=prefix)
+ actual = content.as_text()
+ expected = prefix + expected
+
+ self.assertEqual(expected, actual)
+
+ def test_postfix_content(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ postfix = '\n' + self.getUniqueString()
+ content = StackLinesContent(stack_lines, postfix_content=postfix)
+ actual = content.as_text()
+ expected = expected + postfix
+
+ self.assertEqual(expected, actual)
+
+ def test___init___sets_content_type(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ content = StackLinesContent(stack_lines)
+ expected_content_type = ContentType("text", "x-traceback",
+ {"language": "python", "charset": "utf8"})
+
+ self.assertEqual(expected_content_type, content.content_type)
+
+
+class TestTracebackContent(TestCase):
+
+ def test___init___None_errors(self):
+ self.assertThat(
+ lambda: TracebackContent(None, None), raises_value_error)
+
+ def test___init___sets_ivars(self):
+ content = TracebackContent(an_exc_info, self)
+ content_type = ContentType("text", "x-traceback",
+ {"language": "python", "charset": "utf8"})
+ self.assertEqual(content_type, content.content_type)
+ result = unittest.TestResult()
+ expected = result._exc_info_to_string(an_exc_info, self)
+ self.assertEqual(expected, ''.join(list(content.iter_text())))
+
+
+class TestStacktraceContent(TestCase):
+
+ def test___init___sets_ivars(self):
+ content = StacktraceContent()
+ content_type = ContentType("text", "x-traceback",
+ {"language": "python", "charset": "utf8"})
+
+ self.assertEqual(content_type, content.content_type)
+
+ def test_prefix_is_used(self):
+ prefix = self.getUniqueString()
+ actual = StacktraceContent(prefix_content=prefix).as_text()
+
+ self.assertTrue(actual.startswith(prefix))
+
+ def test_postfix_is_used(self):
+ postfix = self.getUniqueString()
+ actual = StacktraceContent(postfix_content=postfix).as_text()
+
+ self.assertTrue(actual.endswith(postfix))
+
+ def test_top_frame_is_skipped_when_no_stack_is_specified(self):
+ actual = StacktraceContent().as_text()
+
+ self.assertTrue('testtools/content.py' not in actual)
+
+
+class TestAttachFile(TestCase):
+
+ def make_file(self, data):
+ # GZ 2011-04-21: This helper could be useful for methods above trying
+ # to use mkstemp, but should handle write failures and
+ # always close the fd. There must be a better way.
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ os.write(fd, _b(data))
+ os.close(fd)
+ return path
+
+ def test_simple(self):
+ class SomeTest(TestCase):
+ def test_foo(self):
+ pass
+ test = SomeTest('test_foo')
+ data = 'some data'
+ path = self.make_file(data)
+ my_content = text_content(data)
+ attach_file(test, path, name='foo')
+ self.assertEqual({'foo': my_content}, test.getDetails())
+
+ def test_optional_name(self):
+ # If no name is provided, attach_file just uses the base name of the
+ # file.
+ class SomeTest(TestCase):
+ def test_foo(self):
+ pass
+ test = SomeTest('test_foo')
+ path = self.make_file('some data')
+ base_path = os.path.basename(path)
+ attach_file(test, path)
+ self.assertEqual([base_path], list(test.getDetails()))
+
+ def test_lazy_read(self):
+ class SomeTest(TestCase):
+ def test_foo(self):
+ pass
+ test = SomeTest('test_foo')
+ path = self.make_file('some data')
+ attach_file(test, path, name='foo', buffer_now=False)
+ content = test.getDetails()['foo']
+ content_file = open(path, 'w')
+ content_file.write('new data')
+ content_file.close()
+ self.assertEqual(''.join(content.iter_text()), 'new data')
+
+ def test_eager_read_by_default(self):
+ class SomeTest(TestCase):
+ def test_foo(self):
+ pass
+ test = SomeTest('test_foo')
+ path = self.make_file('some data')
+ attach_file(test, path, name='foo')
+ content = test.getDetails()['foo']
+ content_file = open(path, 'w')
+ content_file.write('new data')
+ content_file.close()
+ self.assertEqual(''.join(content.iter_text()), 'some data')
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py
new file mode 100644
index 00000000000..2d34f95e479
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2008, 2012 testtools developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.matchers import Equals, MatchesException, Raises
+from testtools.content_type import (
+ ContentType,
+ JSON,
+ UTF8_TEXT,
+ )
+
+
+class TestContentType(TestCase):
+
+ def test___init___None_errors(self):
+ raises_value_error = Raises(MatchesException(ValueError))
+ self.assertThat(lambda:ContentType(None, None), raises_value_error)
+ self.assertThat(lambda:ContentType(None, "traceback"),
+ raises_value_error)
+ self.assertThat(lambda:ContentType("text", None), raises_value_error)
+
+ def test___init___sets_ivars(self):
+ content_type = ContentType("foo", "bar")
+ self.assertEqual("foo", content_type.type)
+ self.assertEqual("bar", content_type.subtype)
+ self.assertEqual({}, content_type.parameters)
+
+ def test___init___with_parameters(self):
+ content_type = ContentType("foo", "bar", {"quux": "thing"})
+ self.assertEqual({"quux": "thing"}, content_type.parameters)
+
+ def test___eq__(self):
+ content_type1 = ContentType("foo", "bar", {"quux": "thing"})
+ content_type2 = ContentType("foo", "bar", {"quux": "thing"})
+ content_type3 = ContentType("foo", "bar", {"quux": "thing2"})
+ self.assertTrue(content_type1.__eq__(content_type2))
+ self.assertFalse(content_type1.__eq__(content_type3))
+
+ def test_basic_repr(self):
+ content_type = ContentType('text', 'plain')
+ self.assertThat(repr(content_type), Equals('text/plain'))
+
+ def test_extended_repr(self):
+ content_type = ContentType(
+ 'text', 'plain', {'foo': 'bar', 'baz': 'qux'})
+ self.assertThat(
+ repr(content_type), Equals('text/plain; baz="qux"; foo="bar"'))
+
+
+class TestBuiltinContentTypes(TestCase):
+
+ def test_plain_text(self):
+ # The UTF8_TEXT content type represents UTF-8 encoded text/plain.
+ self.assertThat(UTF8_TEXT.type, Equals('text'))
+ self.assertThat(UTF8_TEXT.subtype, Equals('plain'))
+ self.assertThat(UTF8_TEXT.parameters, Equals({'charset': 'utf8'}))
+
+ def test_json_content(self):
+ # The JSON content type represents implictly UTF-8 application/json.
+ self.assertThat(JSON.type, Equals('application'))
+ self.assertThat(JSON.subtype, Equals('json'))
+ self.assertThat(JSON.parameters, Equals({}))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py
new file mode 100644
index 00000000000..f0510dc9a9f
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py
@@ -0,0 +1,767 @@
+# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
+
+"""Tests for the DeferredRunTest single test execution logic."""
+
+import os
+import signal
+
+from extras import try_import
+
+from testtools import (
+ skipIf,
+ TestCase,
+ TestResult,
+ )
+from testtools.content import (
+ text_content,
+ )
+from testtools.matchers import (
+ Equals,
+ KeysEqual,
+ MatchesException,
+ Raises,
+ )
+from testtools.runtest import RunTest
+from testtools.testresult.doubles import ExtendedTestResult
+from testtools.tests.test_spinner import NeedsTwistedTestCase
+
+assert_fails_with = try_import('testtools.deferredruntest.assert_fails_with')
+AsynchronousDeferredRunTest = try_import(
+ 'testtools.deferredruntest.AsynchronousDeferredRunTest')
+flush_logged_errors = try_import(
+ 'testtools.deferredruntest.flush_logged_errors')
+SynchronousDeferredRunTest = try_import(
+ 'testtools.deferredruntest.SynchronousDeferredRunTest')
+
+defer = try_import('twisted.internet.defer')
+failure = try_import('twisted.python.failure')
+log = try_import('twisted.python.log')
+DelayedCall = try_import('twisted.internet.base.DelayedCall')
+
+
+class X(object):
+ """Tests that we run as part of our tests, nested to avoid discovery."""
+
+ class Base(TestCase):
+ def setUp(self):
+ super(X.Base, self).setUp()
+ self.calls = ['setUp']
+ self.addCleanup(self.calls.append, 'clean-up')
+ def test_something(self):
+ self.calls.append('test')
+ def tearDown(self):
+ self.calls.append('tearDown')
+ super(X.Base, self).tearDown()
+
+ class ErrorInSetup(Base):
+ expected_calls = ['setUp', 'clean-up']
+ expected_results = [('addError', RuntimeError)]
+ def setUp(self):
+ super(X.ErrorInSetup, self).setUp()
+ raise RuntimeError("Error in setUp")
+
+ class ErrorInTest(Base):
+ expected_calls = ['setUp', 'tearDown', 'clean-up']
+ expected_results = [('addError', RuntimeError)]
+ def test_something(self):
+ raise RuntimeError("Error in test")
+
+ class FailureInTest(Base):
+ expected_calls = ['setUp', 'tearDown', 'clean-up']
+ expected_results = [('addFailure', AssertionError)]
+ def test_something(self):
+ self.fail("test failed")
+
+ class ErrorInTearDown(Base):
+ expected_calls = ['setUp', 'test', 'clean-up']
+ expected_results = [('addError', RuntimeError)]
+ def tearDown(self):
+ raise RuntimeError("Error in tearDown")
+
+ class ErrorInCleanup(Base):
+ expected_calls = ['setUp', 'test', 'tearDown', 'clean-up']
+ expected_results = [('addError', ZeroDivisionError)]
+ def test_something(self):
+ self.calls.append('test')
+ self.addCleanup(lambda: 1/0)
+
+ class TestIntegration(NeedsTwistedTestCase):
+
+ def assertResultsMatch(self, test, result):
+ events = list(result._events)
+ self.assertEqual(('startTest', test), events.pop(0))
+ for expected_result in test.expected_results:
+ result = events.pop(0)
+ if len(expected_result) == 1:
+ self.assertEqual((expected_result[0], test), result)
+ else:
+ self.assertEqual((expected_result[0], test), result[:2])
+ error_type = expected_result[1]
+ self.assertIn(error_type.__name__, str(result[2]))
+ self.assertEqual([('stopTest', test)], events)
+
+ def test_runner(self):
+ result = ExtendedTestResult()
+ test = self.test_factory('test_something', runTest=self.runner)
+ test.run(result)
+ self.assertEqual(test.calls, self.test_factory.expected_calls)
+ self.assertResultsMatch(test, result)
+
+
+def make_integration_tests():
+ from unittest import TestSuite
+ from testtools import clone_test_with_new_id
+ runners = [
+ ('RunTest', RunTest),
+ ('SynchronousDeferredRunTest', SynchronousDeferredRunTest),
+ ('AsynchronousDeferredRunTest', AsynchronousDeferredRunTest),
+ ]
+
+ tests = [
+ X.ErrorInSetup,
+ X.ErrorInTest,
+ X.ErrorInTearDown,
+ X.FailureInTest,
+ X.ErrorInCleanup,
+ ]
+ base_test = X.TestIntegration('test_runner')
+ integration_tests = []
+ for runner_name, runner in runners:
+ for test in tests:
+ new_test = clone_test_with_new_id(
+ base_test, '%s(%s, %s)' % (
+ base_test.id(),
+ runner_name,
+ test.__name__))
+ new_test.test_factory = test
+ new_test.runner = runner
+ integration_tests.append(new_test)
+ return TestSuite(integration_tests)
+
+
+class TestSynchronousDeferredRunTest(NeedsTwistedTestCase):
+
+ def make_result(self):
+ return ExtendedTestResult()
+
+ def make_runner(self, test):
+ return SynchronousDeferredRunTest(test, test.exception_handlers)
+
+ def test_success(self):
+ class SomeCase(TestCase):
+ def test_success(self):
+ return defer.succeed(None)
+ test = SomeCase('test_success')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ result._events, Equals([
+ ('startTest', test),
+ ('addSuccess', test),
+ ('stopTest', test)]))
+
+ def test_failure(self):
+ class SomeCase(TestCase):
+ def test_failure(self):
+ return defer.maybeDeferred(self.fail, "Egads!")
+ test = SomeCase('test_failure')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events], Equals([
+ ('startTest', test),
+ ('addFailure', test),
+ ('stopTest', test)]))
+
+ def test_setUp_followed_by_test(self):
+ class SomeCase(TestCase):
+ def setUp(self):
+ super(SomeCase, self).setUp()
+ return defer.succeed(None)
+ def test_failure(self):
+ return defer.maybeDeferred(self.fail, "Egads!")
+ test = SomeCase('test_failure')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events], Equals([
+ ('startTest', test),
+ ('addFailure', test),
+ ('stopTest', test)]))
+
+
+class TestAsynchronousDeferredRunTest(NeedsTwistedTestCase):
+
+ def make_reactor(self):
+ from twisted.internet import reactor
+ return reactor
+
+ def make_result(self):
+ return ExtendedTestResult()
+
+ def make_runner(self, test, timeout=None):
+ if timeout is None:
+ timeout = self.make_timeout()
+ return AsynchronousDeferredRunTest(
+ test, test.exception_handlers, timeout=timeout)
+
+ def make_timeout(self):
+ return 0.005
+
+ def test_setUp_returns_deferred_that_fires_later(self):
+ # setUp can return a Deferred that might fire at any time.
+ # AsynchronousDeferredRunTest will not go on to running the test until
+ # the Deferred returned by setUp actually fires.
+ call_log = []
+ marker = object()
+ d = defer.Deferred().addCallback(call_log.append)
+ class SomeCase(TestCase):
+ def setUp(self):
+ super(SomeCase, self).setUp()
+ call_log.append('setUp')
+ return d
+ def test_something(self):
+ call_log.append('test')
+ def fire_deferred():
+ self.assertThat(call_log, Equals(['setUp']))
+ d.callback(marker)
+ test = SomeCase('test_something')
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout=timeout)
+ result = self.make_result()
+ reactor = self.make_reactor()
+ reactor.callLater(timeout, fire_deferred)
+ runner.run(result)
+ self.assertThat(call_log, Equals(['setUp', marker, 'test']))
+
+ def test_calls_setUp_test_tearDown_in_sequence(self):
+ # setUp, the test method and tearDown can all return
+ # Deferreds. AsynchronousDeferredRunTest will make sure that each of
+ # these are run in turn, only going on to the next stage once the
+ # Deferred from the previous stage has fired.
+ call_log = []
+ a = defer.Deferred()
+ a.addCallback(lambda x: call_log.append('a'))
+ b = defer.Deferred()
+ b.addCallback(lambda x: call_log.append('b'))
+ c = defer.Deferred()
+ c.addCallback(lambda x: call_log.append('c'))
+ class SomeCase(TestCase):
+ def setUp(self):
+ super(SomeCase, self).setUp()
+ call_log.append('setUp')
+ return a
+ def test_success(self):
+ call_log.append('test')
+ return b
+ def tearDown(self):
+ super(SomeCase, self).tearDown()
+ call_log.append('tearDown')
+ return c
+ test = SomeCase('test_success')
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout)
+ result = self.make_result()
+ reactor = self.make_reactor()
+ def fire_a():
+ self.assertThat(call_log, Equals(['setUp']))
+ a.callback(None)
+ def fire_b():
+ self.assertThat(call_log, Equals(['setUp', 'a', 'test']))
+ b.callback(None)
+ def fire_c():
+ self.assertThat(
+ call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown']))
+ c.callback(None)
+ reactor.callLater(timeout * 0.25, fire_a)
+ reactor.callLater(timeout * 0.5, fire_b)
+ reactor.callLater(timeout * 0.75, fire_c)
+ runner.run(result)
+ self.assertThat(
+ call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown', 'c']))
+
+ def test_async_cleanups(self):
+ # Cleanups added with addCleanup can return
+ # Deferreds. AsynchronousDeferredRunTest will run each of them in
+ # turn.
+ class SomeCase(TestCase):
+ def test_whatever(self):
+ pass
+ test = SomeCase('test_whatever')
+ call_log = []
+ a = defer.Deferred().addCallback(lambda x: call_log.append('a'))
+ b = defer.Deferred().addCallback(lambda x: call_log.append('b'))
+ c = defer.Deferred().addCallback(lambda x: call_log.append('c'))
+ test.addCleanup(lambda: a)
+ test.addCleanup(lambda: b)
+ test.addCleanup(lambda: c)
+ def fire_a():
+ self.assertThat(call_log, Equals([]))
+ a.callback(None)
+ def fire_b():
+ self.assertThat(call_log, Equals(['a']))
+ b.callback(None)
+ def fire_c():
+ self.assertThat(call_log, Equals(['a', 'b']))
+ c.callback(None)
+ timeout = self.make_timeout()
+ reactor = self.make_reactor()
+ reactor.callLater(timeout * 0.25, fire_a)
+ reactor.callLater(timeout * 0.5, fire_b)
+ reactor.callLater(timeout * 0.75, fire_c)
+ runner = self.make_runner(test, timeout)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(call_log, Equals(['a', 'b', 'c']))
+
+ def test_clean_reactor(self):
+ # If there's cruft left over in the reactor, the test fails.
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ class SomeCase(TestCase):
+ def test_cruft(self):
+ reactor.callLater(timeout * 10.0, lambda: None)
+ test = SomeCase('test_cruft')
+ runner = self.make_runner(test, timeout)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals(
+ [('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+ def test_exports_reactor(self):
+ # The reactor is set as an attribute on the test case.
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ class SomeCase(TestCase):
+ def test_cruft(self):
+ self.assertIs(reactor, self.reactor)
+ test = SomeCase('test_cruft')
+ runner = self.make_runner(test, timeout)
+ result = TestResult()
+ runner.run(result)
+ self.assertEqual([], result.errors)
+ self.assertEqual([], result.failures)
+
+ def test_unhandled_error_from_deferred(self):
+ # If there's a Deferred with an unhandled error, the test fails. Each
+ # unhandled error is reported with a separate traceback.
+ class SomeCase(TestCase):
+ def test_cruft(self):
+ # Note we aren't returning the Deferred so that the error will
+ # be unhandled.
+ defer.maybeDeferred(lambda: 1/0)
+ defer.maybeDeferred(lambda: 2/0)
+ test = SomeCase('test_cruft')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ error = result._events[1][2]
+ result._events[1] = ('addError', test, None)
+ self.assertThat(result._events, Equals(
+ [('startTest', test),
+ ('addError', test, None),
+ ('stopTest', test)]))
+ self.assertThat(
+ error, KeysEqual(
+ 'twisted-log',
+ 'unhandled-error-in-deferred',
+ 'unhandled-error-in-deferred-1',
+ ))
+
+ def test_unhandled_error_from_deferred_combined_with_error(self):
+ # If there's a Deferred with an unhandled error, the test fails. Each
+ # unhandled error is reported with a separate traceback, and the error
+ # is still reported.
+ class SomeCase(TestCase):
+ def test_cruft(self):
+ # Note we aren't returning the Deferred so that the error will
+ # be unhandled.
+ defer.maybeDeferred(lambda: 1/0)
+ 2 / 0
+ test = SomeCase('test_cruft')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ error = result._events[1][2]
+ result._events[1] = ('addError', test, None)
+ self.assertThat(result._events, Equals(
+ [('startTest', test),
+ ('addError', test, None),
+ ('stopTest', test)]))
+ self.assertThat(
+ error, KeysEqual(
+ 'traceback',
+ 'twisted-log',
+ 'unhandled-error-in-deferred',
+ ))
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_keyboard_interrupt_stops_test_run(self):
+ # If we get a SIGINT during a test run, the test stops and no more
+ # tests run.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ raise self.skipTest("SIGINT unavailable")
+ class SomeCase(TestCase):
+ def test_pause(self):
+ return defer.Deferred()
+ test = SomeCase('test_pause')
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout * 5)
+ result = self.make_result()
+ reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:runner.run(result),
+ Raises(MatchesException(KeyboardInterrupt)))
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_fast_keyboard_interrupt_stops_test_run(self):
+ # If we get a SIGINT during a test run, the test stops and no more
+ # tests run.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ raise self.skipTest("SIGINT unavailable")
+ class SomeCase(TestCase):
+ def test_pause(self):
+ return defer.Deferred()
+ test = SomeCase('test_pause')
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout * 5)
+ result = self.make_result()
+ reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:runner.run(result),
+ Raises(MatchesException(KeyboardInterrupt)))
+
+ def test_timeout_causes_test_error(self):
+ # If a test times out, it reports itself as having failed with a
+ # TimeoutError.
+ class SomeCase(TestCase):
+ def test_pause(self):
+ return defer.Deferred()
+ test = SomeCase('test_pause')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ error = result._events[1][2]
+ self.assertThat(
+ [event[:2] for event in result._events], Equals(
+ [('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ self.assertIn('TimeoutError', str(error['traceback']))
+
+ def test_convenient_construction(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ reactor = object()
+ timeout = object()
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(reactor, timeout)
+ runner = factory(self, [handler])
+ self.assertIs(reactor, runner._reactor)
+ self.assertIs(timeout, runner._timeout)
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+
+ def test_use_convenient_factory(self):
+ # Make sure that the factory can actually be used.
+ factory = AsynchronousDeferredRunTest.make_factory()
+ class SomeCase(TestCase):
+ run_tests_with = factory
+ def test_something(self):
+ pass
+ case = SomeCase('test_something')
+ case.run()
+
+ def test_convenient_construction_default_reactor(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ reactor = object()
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(reactor=reactor)
+ runner = factory(self, [handler])
+ self.assertIs(reactor, runner._reactor)
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+
+ def test_convenient_construction_default_timeout(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ timeout = object()
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(timeout=timeout)
+ runner = factory(self, [handler])
+ self.assertIs(timeout, runner._timeout)
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+
+ def test_convenient_construction_default_debugging(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(debug=True)
+ runner = factory(self, [handler])
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+ self.assertEqual(True, runner._debug)
+
+ def test_deferred_error(self):
+ class SomeTest(TestCase):
+ def test_something(self):
+ return defer.maybeDeferred(lambda: 1/0)
+ test = SomeTest('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+ def test_only_addError_once(self):
+ # Even if the reactor is unclean and the test raises an error and the
+ # cleanups raise errors, we only called addError once per test.
+ reactor = self.make_reactor()
+ class WhenItRains(TestCase):
+ def it_pours(self):
+ # Add a dirty cleanup.
+ self.addCleanup(lambda: 3 / 0)
+ # Dirty the reactor.
+ from twisted.internet.protocol import ServerFactory
+ reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
+ # Unhandled error.
+ defer.maybeDeferred(lambda: 2 / 0)
+ # Actual error.
+ raise RuntimeError("Excess precipitation")
+ test = WhenItRains('it_pours')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(
+ error, KeysEqual(
+ 'traceback',
+ 'traceback-1',
+ 'traceback-2',
+ 'twisted-log',
+ 'unhandled-error-in-deferred',
+ ))
+
+ def test_log_err_is_error(self):
+ # An error logged during the test run is recorded as an error in the
+ # tests.
+ class LogAnError(TestCase):
+ def test_something(self):
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = failure.Failure()
+ log.err(f)
+ test = LogAnError('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('logged-error', 'twisted-log'))
+
+ def test_log_err_flushed_is_success(self):
+ # An error logged during the test run is recorded as an error in the
+ # tests.
+ class LogAnError(TestCase):
+ def test_something(self):
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = failure.Failure()
+ log.err(f)
+ flush_logged_errors(ZeroDivisionError)
+ test = LogAnError('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ result._events,
+ Equals([
+ ('startTest', test),
+ ('addSuccess', test, {'twisted-log': text_content('')}),
+ ('stopTest', test)]))
+
+ def test_log_in_details(self):
+ class LogAnError(TestCase):
+ def test_something(self):
+ log.msg("foo")
+ 1/0
+ test = LogAnError('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+ def test_debugging_unchanged_during_test_by_default(self):
+ debugging = [(defer.Deferred.debug, DelayedCall.debug)]
+ class SomeCase(TestCase):
+ def test_debugging_enabled(self):
+ debugging.append((defer.Deferred.debug, DelayedCall.debug))
+ test = SomeCase('test_debugging_enabled')
+ runner = AsynchronousDeferredRunTest(
+ test, handlers=test.exception_handlers,
+ reactor=self.make_reactor(), timeout=self.make_timeout())
+ runner.run(self.make_result())
+ self.assertEqual(debugging[0], debugging[1])
+
+ def test_debugging_enabled_during_test_with_debug_flag(self):
+ self.patch(defer.Deferred, 'debug', False)
+ self.patch(DelayedCall, 'debug', False)
+ debugging = []
+ class SomeCase(TestCase):
+ def test_debugging_enabled(self):
+ debugging.append((defer.Deferred.debug, DelayedCall.debug))
+ test = SomeCase('test_debugging_enabled')
+ runner = AsynchronousDeferredRunTest(
+ test, handlers=test.exception_handlers,
+ reactor=self.make_reactor(), timeout=self.make_timeout(),
+ debug=True)
+ runner.run(self.make_result())
+ self.assertEqual([(True, True)], debugging)
+ self.assertEqual(False, defer.Deferred.debug)
+ self.assertEqual(False, defer.Deferred.debug)
+
+
+class TestAssertFailsWith(NeedsTwistedTestCase):
+ """Tests for `assert_fails_with`."""
+
+ if SynchronousDeferredRunTest is not None:
+ run_tests_with = SynchronousDeferredRunTest
+
+ def test_assert_fails_with_success(self):
+ # assert_fails_with fails the test if it's given a Deferred that
+ # succeeds.
+ marker = object()
+ d = assert_fails_with(defer.succeed(marker), RuntimeError)
+ def check_result(failure):
+ failure.trap(self.failureException)
+ self.assertThat(
+ str(failure.value),
+ Equals("RuntimeError not raised (%r returned)" % (marker,)))
+ d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+ return d
+
+ def test_assert_fails_with_success_multiple_types(self):
+ # assert_fails_with fails the test if it's given a Deferred that
+ # succeeds.
+ marker = object()
+ d = assert_fails_with(
+ defer.succeed(marker), RuntimeError, ZeroDivisionError)
+ def check_result(failure):
+ failure.trap(self.failureException)
+ self.assertThat(
+ str(failure.value),
+ Equals("RuntimeError, ZeroDivisionError not raised "
+ "(%r returned)" % (marker,)))
+ d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+ return d
+
+ def test_assert_fails_with_wrong_exception(self):
+ # assert_fails_with fails the test if it's given a Deferred that
+ # succeeds.
+ d = assert_fails_with(
+ defer.maybeDeferred(lambda: 1/0), RuntimeError, KeyboardInterrupt)
+ def check_result(failure):
+ failure.trap(self.failureException)
+ lines = str(failure.value).splitlines()
+ self.assertThat(
+ lines[:2],
+ Equals([
+ ("ZeroDivisionError raised instead of RuntimeError, "
+ "KeyboardInterrupt:"),
+ " Traceback (most recent call last):",
+ ]))
+ d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+ return d
+
+ def test_assert_fails_with_expected_exception(self):
+ # assert_fails_with calls back with the value of the failure if it's
+ # one of the expected types of failures.
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = failure.Failure()
+ d = assert_fails_with(defer.fail(f), ZeroDivisionError)
+ return d.addCallback(self.assertThat, Equals(f.value))
+
+ def test_custom_failure_exception(self):
+ # If assert_fails_with is passed a 'failureException' keyword
+ # argument, then it will raise that instead of `AssertionError`.
+ class CustomException(Exception):
+ pass
+ marker = object()
+ d = assert_fails_with(
+ defer.succeed(marker), RuntimeError,
+ failureException=CustomException)
+ def check_result(failure):
+ failure.trap(CustomException)
+ self.assertThat(
+ str(failure.value),
+ Equals("RuntimeError not raised (%r returned)" % (marker,)))
+ return d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+
+
+class TestRunWithLogObservers(NeedsTwistedTestCase):
+
+ def test_restores_observers(self):
+ from testtools.deferredruntest import run_with_log_observers
+ from twisted.python import log
+ # Make sure there's at least one observer. This reproduces bug
+ # #926189.
+ log.addObserver(lambda *args: None)
+ observers = list(log.theLogPublisher.observers)
+ run_with_log_observers([], lambda: None)
+ self.assertEqual(observers, log.theLogPublisher.observers)
+
+
+def test_suite():
+ from unittest import TestLoader, TestSuite
+ return TestSuite(
+ [TestLoader().loadTestsFromName(__name__),
+ make_integration_tests()])
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py
new file mode 100644
index 00000000000..7bfc1fa267b
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py
@@ -0,0 +1,100 @@
+# Copyright (c) 2010-2011 Testtools authors. See LICENSE for details.
+
+"""Tests for the distutils test command logic."""
+
+from distutils.dist import Distribution
+
+from extras import try_import
+
+from testtools.compat import (
+ _b,
+ _u,
+ BytesIO,
+ )
+fixtures = try_import('fixtures')
+
+import testtools
+from testtools import TestCase
+from testtools.distutilscmd import TestCommand
+from testtools.matchers import MatchesRegex
+
+
+if fixtures:
+ class SampleTestFixture(fixtures.Fixture):
+ """Creates testtools.runexample temporarily."""
+
+ def __init__(self):
+ self.package = fixtures.PythonPackage(
+ 'runexample', [('__init__.py', _b("""
+from testtools import TestCase
+
+class TestFoo(TestCase):
+ def test_bar(self):
+ pass
+ def test_quux(self):
+ pass
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
+"""))])
+
+ def setUp(self):
+ super(SampleTestFixture, self).setUp()
+ self.useFixture(self.package)
+ testtools.__path__.append(self.package.base)
+ self.addCleanup(testtools.__path__.remove, self.package.base)
+
+
+class TestCommandTest(TestCase):
+
+ def setUp(self):
+ super(TestCommandTest, self).setUp()
+ if fixtures is None:
+ self.skipTest("Need fixtures")
+
+ def test_test_module(self):
+ self.useFixture(SampleTestFixture())
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
+ dist = Distribution()
+ dist.script_name = 'setup.py'
+ dist.script_args = ['test']
+ dist.cmdclass = {'test': TestCommand}
+ dist.command_options = {
+ 'test': {'test_module': ('command line', 'testtools.runexample')}}
+ cmd = dist.reinitialize_command('test')
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ dist.run_command('test')
+ self.assertThat(
+ stdout.getDetails()['stdout'].as_text(),
+ MatchesRegex(_u("""Tests running...
+
+Ran 2 tests in \\d.\\d\\d\\ds
+OK
+""")))
+
+ def test_test_suite(self):
+ self.useFixture(SampleTestFixture())
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
+ dist = Distribution()
+ dist.script_name = 'setup.py'
+ dist.script_args = ['test']
+ dist.cmdclass = {'test': TestCommand}
+ dist.command_options = {
+ 'test': {
+ 'test_suite': (
+ 'command line', 'testtools.runexample.test_suite')}}
+ cmd = dist.reinitialize_command('test')
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ dist.run_command('test')
+ self.assertThat(
+ stdout.getDetails()['stdout'].as_text(),
+ MatchesRegex(_u("""Tests running...
+
+Ran 2 tests in \\d.\\d\\d\\ds
+OK
+""")))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py
new file mode 100644
index 00000000000..2ccd1e853a0
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py
@@ -0,0 +1,118 @@
+# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
+
+import unittest
+
+from extras import try_import
+
+from testtools import (
+ TestCase,
+ content,
+ content_type,
+ )
+from testtools.compat import _b, _u
+from testtools.testresult.doubles import (
+ ExtendedTestResult,
+ )
+
+fixtures = try_import('fixtures')
+LoggingFixture = try_import('fixtures.tests.helpers.LoggingFixture')
+
+
+class TestFixtureSupport(TestCase):
+
+ def setUp(self):
+ super(TestFixtureSupport, self).setUp()
+ if fixtures is None or LoggingFixture is None:
+ self.skipTest("Need fixtures")
+
+ def test_useFixture(self):
+ fixture = LoggingFixture()
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ result = unittest.TestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertTrue(result.wasSuccessful())
+ self.assertEqual(['setUp', 'cleanUp'], fixture.calls)
+
+ def test_useFixture_cleanups_raise_caught(self):
+ calls = []
+ def raiser(ignored):
+ calls.append('called')
+ raise Exception('foo')
+ fixture = fixtures.FunctionFixture(lambda:None, raiser)
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ result = unittest.TestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertFalse(result.wasSuccessful())
+ self.assertEqual(['called'], calls)
+
+ def test_useFixture_details_captured(self):
+ class DetailsFixture(fixtures.Fixture):
+ def setUp(self):
+ fixtures.Fixture.setUp(self)
+ self.addCleanup(delattr, self, 'content')
+ self.content = [_b('content available until cleanUp')]
+ self.addDetail('content',
+ content.Content(content_type.UTF8_TEXT, self.get_content))
+ def get_content(self):
+ return self.content
+ fixture = DetailsFixture()
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ # Add a colliding detail (both should show up)
+ self.addDetail('content',
+ content.Content(content_type.UTF8_TEXT, lambda:[_b('foo')]))
+ result = ExtendedTestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertEqual('addSuccess', result._events[-2][0])
+ details = result._events[-2][2]
+ self.assertEqual(['content', 'content-1'], sorted(details.keys()))
+ self.assertEqual('foo', details['content'].as_text())
+ self.assertEqual('content available until cleanUp',
+ details['content-1'].as_text())
+
+ def test_useFixture_multiple_details_captured(self):
+ class DetailsFixture(fixtures.Fixture):
+ def setUp(self):
+ fixtures.Fixture.setUp(self)
+ self.addDetail('aaa', content.text_content("foo"))
+ self.addDetail('bbb', content.text_content("bar"))
+ fixture = DetailsFixture()
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ result = ExtendedTestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertEqual('addSuccess', result._events[-2][0])
+ details = result._events[-2][2]
+ self.assertEqual(['aaa', 'bbb'], sorted(details))
+ self.assertEqual(_u('foo'), details['aaa'].as_text())
+ self.assertEqual(_u('bar'), details['bbb'].as_text())
+
+ def test_useFixture_details_captured_from_setUp(self):
+ # Details added during fixture set-up are gathered even if setUp()
+ # fails with an exception.
+ class BrokenFixture(fixtures.Fixture):
+ def setUp(self):
+ fixtures.Fixture.setUp(self)
+ self.addDetail('content', content.text_content("foobar"))
+ raise Exception()
+ fixture = BrokenFixture()
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ result = ExtendedTestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertEqual('addError', result._events[-2][0])
+ details = result._events[-2][2]
+ self.assertEqual(['content', 'traceback'], sorted(details))
+ self.assertEqual('foobar', ''.join(details['content'].iter_text()))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py
new file mode 100644
index 00000000000..848c2f0b489
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.tests.helpers import (
+ FullStackRunTest,
+ hide_testtools_stack,
+ is_stack_hidden,
+ )
+
+
+class TestStackHiding(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def setUp(self):
+ super(TestStackHiding, self).setUp()
+ self.addCleanup(hide_testtools_stack, is_stack_hidden())
+
+ def test_is_stack_hidden_consistent_true(self):
+ hide_testtools_stack(True)
+ self.assertEqual(True, is_stack_hidden())
+
+ def test_is_stack_hidden_consistent_false(self):
+ hide_testtools_stack(False)
+ self.assertEqual(False, is_stack_hidden())
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py
new file mode 100644
index 00000000000..540a2ee909f
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py
@@ -0,0 +1,167 @@
+# Copyright (c) 2010 Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""Tests for testtools.monkey."""
+
+from testtools import TestCase
+from testtools.matchers import MatchesException, Raises
+from testtools.monkey import MonkeyPatcher, patch
+
+
+class TestObj:
+
+ def __init__(self):
+ self.foo = 'foo value'
+ self.bar = 'bar value'
+ self.baz = 'baz value'
+
+
+class MonkeyPatcherTest(TestCase):
+ """
+ Tests for 'MonkeyPatcher' monkey-patching class.
+ """
+
+ def setUp(self):
+ super(MonkeyPatcherTest, self).setUp()
+ self.test_object = TestObj()
+ self.original_object = TestObj()
+ self.monkey_patcher = MonkeyPatcher()
+
+ def test_empty(self):
+ # A monkey patcher without patches doesn't change a thing.
+ self.monkey_patcher.patch()
+
+ # We can't assert that all state is unchanged, but at least we can
+ # check our test object.
+ self.assertEquals(self.original_object.foo, self.test_object.foo)
+ self.assertEquals(self.original_object.bar, self.test_object.bar)
+ self.assertEquals(self.original_object.baz, self.test_object.baz)
+
+ def test_construct_with_patches(self):
+ # Constructing a 'MonkeyPatcher' with patches adds all of the given
+ # patches to the patch list.
+ patcher = MonkeyPatcher((self.test_object, 'foo', 'haha'),
+ (self.test_object, 'bar', 'hehe'))
+ patcher.patch()
+ self.assertEquals('haha', self.test_object.foo)
+ self.assertEquals('hehe', self.test_object.bar)
+ self.assertEquals(self.original_object.baz, self.test_object.baz)
+
+ def test_patch_existing(self):
+ # Patching an attribute that exists sets it to the value defined in the
+ # patch.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ self.monkey_patcher.patch()
+ self.assertEquals(self.test_object.foo, 'haha')
+
+ def test_patch_non_existing(self):
+ # Patching a non-existing attribute sets it to the value defined in
+ # the patch.
+ self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
+ self.monkey_patcher.patch()
+ self.assertEquals(self.test_object.doesntexist, 'value')
+
+ def test_restore_non_existing(self):
+ # Restoring a value that didn't exist before the patch deletes the
+ # value.
+ self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
+ self.monkey_patcher.patch()
+ self.monkey_patcher.restore()
+ marker = object()
+ self.assertIs(marker, getattr(self.test_object, 'doesntexist', marker))
+
+ def test_patch_already_patched(self):
+ # Adding a patch for an object and attribute that already have a patch
+ # overrides the existing patch.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'BLAH')
+ self.monkey_patcher.patch()
+ self.assertEquals(self.test_object.foo, 'BLAH')
+ self.monkey_patcher.restore()
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+
+ def test_restore_twice_is_a_no_op(self):
+ # Restoring an already-restored monkey patch is a no-op.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
+ self.monkey_patcher.patch()
+ self.monkey_patcher.restore()
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+ self.monkey_patcher.restore()
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+
+ def test_run_with_patches_decoration(self):
+ # run_with_patches runs the given callable, passing in all arguments
+ # and keyword arguments, and returns the return value of the callable.
+ log = []
+
+ def f(a, b, c=None):
+ log.append((a, b, c))
+ return 'foo'
+
+ result = self.monkey_patcher.run_with_patches(f, 1, 2, c=10)
+ self.assertEquals('foo', result)
+ self.assertEquals([(1, 2, 10)], log)
+
+ def test_repeated_run_with_patches(self):
+ # We can call the same function with run_with_patches more than
+ # once. All patches apply for each call.
+ def f():
+ return (self.test_object.foo, self.test_object.bar,
+ self.test_object.baz)
+
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ result = self.monkey_patcher.run_with_patches(f)
+ self.assertEquals(
+ ('haha', self.original_object.bar, self.original_object.baz),
+ result)
+ result = self.monkey_patcher.run_with_patches(f)
+ self.assertEquals(
+ ('haha', self.original_object.bar, self.original_object.baz),
+ result)
+
+ def test_run_with_patches_restores(self):
+ # run_with_patches restores the original values after the function has
+ # executed.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ self.assertEquals(self.original_object.foo, self.test_object.foo)
+ self.monkey_patcher.run_with_patches(lambda: None)
+ self.assertEquals(self.original_object.foo, self.test_object.foo)
+
+ def test_run_with_patches_restores_on_exception(self):
+ # run_with_patches restores the original values even when the function
+ # raises an exception.
+ def _():
+ self.assertEquals(self.test_object.foo, 'haha')
+ self.assertEquals(self.test_object.bar, 'blahblah')
+ raise RuntimeError("Something went wrong!")
+
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ self.monkey_patcher.add_patch(self.test_object, 'bar', 'blahblah')
+
+ self.assertThat(lambda:self.monkey_patcher.run_with_patches(_),
+ Raises(MatchesException(RuntimeError("Something went wrong!"))))
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+ self.assertEquals(self.test_object.bar, self.original_object.bar)
+
+
+class TestPatchHelper(TestCase):
+
+ def test_patch_patches(self):
+ # patch(obj, name, value) sets obj.name to value.
+ test_object = TestObj()
+ patch(test_object, 'foo', 42)
+ self.assertEqual(42, test_object.foo)
+
+ def test_patch_returns_cleanup(self):
+ # patch(obj, name, value) returns a nullary callable that restores obj
+ # to its original state when run.
+ test_object = TestObj()
+ original = test_object.foo
+ cleanup = patch(test_object, 'foo', 42)
+ cleanup()
+ self.assertEqual(original, test_object.foo)
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py
new file mode 100644
index 00000000000..e89ecdc26a4
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py
@@ -0,0 +1,248 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Tests for the test runner logic."""
+
+from unittest import TestSuite
+import sys
+
+from extras import try_import
+fixtures = try_import('fixtures')
+testresources = try_import('testresources')
+
+import testtools
+from testtools import TestCase, run
+from testtools.compat import (
+ _b,
+ StringIO,
+ )
+from testtools.matchers import Contains
+
+
+if fixtures:
+ class SampleTestFixture(fixtures.Fixture):
+ """Creates testtools.runexample temporarily."""
+
+ def __init__(self, broken=False):
+ """Create a SampleTestFixture.
+
+ :param broken: If True, the sample file will not be importable.
+ """
+ if not broken:
+ init_contents = _b("""\
+from testtools import TestCase
+
+class TestFoo(TestCase):
+ def test_bar(self):
+ pass
+ def test_quux(self):
+ pass
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
+""")
+ else:
+ init_contents = b"class not in\n"
+ self.package = fixtures.PythonPackage(
+ 'runexample', [('__init__.py', init_contents)])
+
+ def setUp(self):
+ super(SampleTestFixture, self).setUp()
+ self.useFixture(self.package)
+ testtools.__path__.append(self.package.base)
+ self.addCleanup(testtools.__path__.remove, self.package.base)
+ self.addCleanup(sys.modules.pop, 'testtools.runexample', None)
+
+
+if fixtures and testresources:
+ class SampleResourcedFixture(fixtures.Fixture):
+ """Creates a test suite that uses testresources."""
+
+ def __init__(self):
+ super(SampleResourcedFixture, self).__init__()
+ self.package = fixtures.PythonPackage(
+ 'resourceexample', [('__init__.py', _b("""
+from fixtures import Fixture
+from testresources import (
+ FixtureResource,
+ OptimisingTestSuite,
+ ResourcedTestCase,
+ )
+from testtools import TestCase
+
+class Printer(Fixture):
+
+ def setUp(self):
+ super(Printer, self).setUp()
+ print('Setting up Printer')
+
+ def reset(self):
+ pass
+
+class TestFoo(TestCase, ResourcedTestCase):
+ # When run, this will print just one Setting up Printer, unless the
+ # OptimisingTestSuite is not honoured, when one per test case will print.
+ resources=[('res', FixtureResource(Printer()))]
+ def test_bar(self):
+ pass
+ def test_foo(self):
+ pass
+ def test_quux(self):
+ pass
+def test_suite():
+ from unittest import TestLoader
+ return OptimisingTestSuite(TestLoader().loadTestsFromName(__name__))
+"""))])
+
+ def setUp(self):
+ super(SampleResourcedFixture, self).setUp()
+ self.useFixture(self.package)
+ self.addCleanup(testtools.__path__.remove, self.package.base)
+ testtools.__path__.append(self.package.base)
+
+
+class TestRun(TestCase):
+
+ def setUp(self):
+ super(TestRun, self).setUp()
+ if fixtures is None:
+ self.skipTest("Need fixtures")
+
+ def test_run_custom_list(self):
+ self.useFixture(SampleTestFixture())
+ tests = []
+ class CaptureList(run.TestToolsTestRunner):
+ def list(self, test):
+ tests.append(set([case.id() for case
+ in testtools.testsuite.iterate_tests(test)]))
+ out = StringIO()
+ try:
+ program = run.TestProgram(
+ argv=['prog', '-l', 'testtools.runexample.test_suite'],
+ stdout=out, testRunner=CaptureList)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual([set(['testtools.runexample.TestFoo.test_bar',
+ 'testtools.runexample.TestFoo.test_quux'])], tests)
+
+ def test_run_list(self):
+ self.useFixture(SampleTestFixture())
+ out = StringIO()
+ try:
+ run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual("""testtools.runexample.TestFoo.test_bar
+testtools.runexample.TestFoo.test_quux
+""", out.getvalue())
+
+ def test_run_list_failed_import(self):
+ if not run.have_discover:
+ self.skipTest("Need discover")
+ broken = self.useFixture(SampleTestFixture(broken=True))
+ out = StringIO()
+ exc = self.assertRaises(
+ SystemExit,
+ run.main, ['prog', 'discover', '-l', broken.package.base, '*.py'], out)
+ self.assertEqual(2, exc.args[0])
+ self.assertEqual("""Failed to import
+runexample.__init__
+""", out.getvalue())
+
+ def test_run_orders_tests(self):
+ self.useFixture(SampleTestFixture())
+ out = StringIO()
+ # We load two tests - one that exists and one that doesn't, and we
+ # should get the one that exists and neither the one that doesn't nor
+ # the unmentioned one that does.
+ tempdir = self.useFixture(fixtures.TempDir())
+ tempname = tempdir.path + '/tests.list'
+ f = open(tempname, 'wb')
+ try:
+ f.write(_b("""
+testtools.runexample.TestFoo.test_bar
+testtools.runexample.missingtest
+"""))
+ finally:
+ f.close()
+ try:
+ run.main(['prog', '-l', '--load-list', tempname,
+ 'testtools.runexample.test_suite'], out)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual("""testtools.runexample.TestFoo.test_bar
+""", out.getvalue())
+
+ def test_run_load_list(self):
+ self.useFixture(SampleTestFixture())
+ out = StringIO()
+ # We load two tests - one that exists and one that doesn't, and we
+ # should get the one that exists and neither the one that doesn't nor
+ # the unmentioned one that does.
+ tempdir = self.useFixture(fixtures.TempDir())
+ tempname = tempdir.path + '/tests.list'
+ f = open(tempname, 'wb')
+ try:
+ f.write(_b("""
+testtools.runexample.TestFoo.test_bar
+testtools.runexample.missingtest
+"""))
+ finally:
+ f.close()
+ try:
+ run.main(['prog', '-l', '--load-list', tempname,
+ 'testtools.runexample.test_suite'], out)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual("""testtools.runexample.TestFoo.test_bar
+""", out.getvalue())
+
+ def test_load_list_preserves_custom_suites(self):
+ if testresources is None:
+ self.skipTest("Need testresources")
+ self.useFixture(SampleResourcedFixture())
+ # We load two tests, not loading one. Both share a resource, so we
+ # should see just one resource setup occur.
+ tempdir = self.useFixture(fixtures.TempDir())
+ tempname = tempdir.path + '/tests.list'
+ f = open(tempname, 'wb')
+ try:
+ f.write(_b("""
+testtools.resourceexample.TestFoo.test_bar
+testtools.resourceexample.TestFoo.test_foo
+"""))
+ finally:
+ f.close()
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ try:
+ run.main(['prog', '--load-list', tempname,
+ 'testtools.resourceexample.test_suite'], stdout.stream)
+ except SystemExit:
+ # Evil resides in TestProgram.
+ pass
+ out = stdout.getDetails()['stdout'].as_text()
+ self.assertEqual(1, out.count('Setting up Printer'), "%r" % out)
+
+ def test_run_failfast(self):
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
+
+ class Failing(TestCase):
+ def test_a(self):
+ self.fail('a')
+ def test_b(self):
+ self.fail('b')
+ runner = run.TestToolsTestRunner(failfast=True)
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ runner.run(TestSuite([Failing('test_a'), Failing('test_b')]))
+ self.assertThat(
+ stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test'))
+
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py
new file mode 100644
index 00000000000..afbb8baf395
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py
@@ -0,0 +1,303 @@
+# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+
+"""Tests for the RunTest single test execution logic."""
+
+from testtools import (
+ ExtendedToOriginalDecorator,
+ run_test_with,
+ RunTest,
+ TestCase,
+ TestResult,
+ )
+from testtools.matchers import MatchesException, Is, Raises
+from testtools.testresult.doubles import ExtendedTestResult
+from testtools.tests.helpers import FullStackRunTest
+
+
+class TestRunTest(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def make_case(self):
+ class Case(TestCase):
+ def test(self):
+ pass
+ return Case('test')
+
+ def test___init___short(self):
+ run = RunTest("bar")
+ self.assertEqual("bar", run.case)
+ self.assertEqual([], run.handlers)
+
+ def test__init____handlers(self):
+ handlers = [("quux", "baz")]
+ run = RunTest("bar", handlers)
+ self.assertEqual(handlers, run.handlers)
+
+ def test_run_with_result(self):
+ # test.run passes result down to _run_test_method.
+ log = []
+ class Case(TestCase):
+ def _run_test_method(self, result):
+ log.append(result)
+ case = Case('_run_test_method')
+ run = RunTest(case, lambda x: log.append(x))
+ result = TestResult()
+ run.run(result)
+ self.assertEqual(1, len(log))
+ self.assertEqual(result, log[0].decorated)
+
+ def test_run_no_result_manages_new_result(self):
+ log = []
+ run = RunTest(self.make_case(), lambda x: log.append(x) or x)
+ result = run.run()
+ self.assertIsInstance(result.decorated, TestResult)
+
+ def test__run_core_called(self):
+ case = self.make_case()
+ log = []
+ run = RunTest(case, lambda x: x)
+ run._run_core = lambda: log.append('foo')
+ run.run()
+ self.assertEqual(['foo'], log)
+
+ def test__run_user_does_not_catch_keyboard(self):
+ case = self.make_case()
+ def raises():
+ raise KeyboardInterrupt("yo")
+ run = RunTest(case, None)
+ run.result = ExtendedTestResult()
+ self.assertThat(lambda: run._run_user(raises),
+ Raises(MatchesException(KeyboardInterrupt)))
+ self.assertEqual([], run.result._events)
+
+ def test__run_user_calls_onException(self):
+ case = self.make_case()
+ log = []
+ def handler(exc_info):
+ log.append("got it")
+ self.assertEqual(3, len(exc_info))
+ self.assertIsInstance(exc_info[1], KeyError)
+ self.assertIs(KeyError, exc_info[0])
+ case.addOnException(handler)
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ run = RunTest(case, [(KeyError, None)])
+ run.result = ExtendedTestResult()
+ status = run._run_user(raises)
+ self.assertEqual(run.exception_caught, status)
+ self.assertEqual([], run.result._events)
+ self.assertEqual(["got it"], log)
+
+ def test__run_user_can_catch_Exception(self):
+ case = self.make_case()
+ e = Exception('Yo')
+ def raises():
+ raise e
+ log = []
+ run = RunTest(case, [(Exception, None)])
+ run.result = ExtendedTestResult()
+ status = run._run_user(raises)
+ self.assertEqual(run.exception_caught, status)
+ self.assertEqual([], run.result._events)
+ self.assertEqual([], log)
+
+ def test__run_user_uncaught_Exception_raised(self):
+ case = self.make_case()
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ log = []
+ def log_exc(self, result, err):
+ log.append((result, err))
+ run = RunTest(case, [(ValueError, log_exc)])
+ run.result = ExtendedTestResult()
+ self.assertThat(lambda: run._run_user(raises),
+ Raises(MatchesException(KeyError)))
+ self.assertEqual([], run.result._events)
+ self.assertEqual([], log)
+
+ def test__run_user_uncaught_Exception_from_exception_handler_raised(self):
+ case = self.make_case()
+ def broken_handler(exc_info):
+ # ValueError because thats what we know how to catch - and must
+ # not.
+ raise ValueError('boo')
+ case.addOnException(broken_handler)
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ log = []
+ def log_exc(self, result, err):
+ log.append((result, err))
+ run = RunTest(case, [(ValueError, log_exc)])
+ run.result = ExtendedTestResult()
+ self.assertThat(lambda: run._run_user(raises),
+ Raises(MatchesException(ValueError)))
+ self.assertEqual([], run.result._events)
+ self.assertEqual([], log)
+
+ def test__run_user_returns_result(self):
+ case = self.make_case()
+ def returns():
+ return 1
+ run = RunTest(case)
+ run.result = ExtendedTestResult()
+ self.assertEqual(1, run._run_user(returns))
+ self.assertEqual([], run.result._events)
+
+ def test__run_one_decorates_result(self):
+ log = []
+ class Run(RunTest):
+ def _run_prepared_result(self, result):
+ log.append(result)
+ return result
+ run = Run(self.make_case(), lambda x: x)
+ result = run._run_one('foo')
+ self.assertEqual([result], log)
+ self.assertIsInstance(log[0], ExtendedToOriginalDecorator)
+ self.assertEqual('foo', result.decorated)
+
+ def test__run_prepared_result_calls_start_and_stop_test(self):
+ result = ExtendedTestResult()
+ case = self.make_case()
+ run = RunTest(case, lambda x: x)
+ run.run(result)
+ self.assertEqual([
+ ('startTest', case),
+ ('addSuccess', case),
+ ('stopTest', case),
+ ], result._events)
+
+ def test__run_prepared_result_calls_stop_test_always(self):
+ result = ExtendedTestResult()
+ case = self.make_case()
+ def inner():
+ raise Exception("foo")
+ run = RunTest(case, lambda x: x)
+ run._run_core = inner
+ self.assertThat(lambda: run.run(result),
+ Raises(MatchesException(Exception("foo"))))
+ self.assertEqual([
+ ('startTest', case),
+ ('stopTest', case),
+ ], result._events)
+
+
+class CustomRunTest(RunTest):
+
+ marker = object()
+
+ def run(self, result=None):
+ return self.marker
+
+
+class TestTestCaseSupportForRunTest(TestCase):
+
+ def test_pass_custom_run_test(self):
+ class SomeCase(TestCase):
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo', runTest=CustomRunTest)
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_default_is_runTest_class_variable(self):
+ class SomeCase(TestCase):
+ run_tests_with = CustomRunTest
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_constructor_argument_overrides_class_variable(self):
+ # If a 'runTest' argument is passed to the test's constructor, that
+ # overrides the class variable.
+ marker = object()
+ class DifferentRunTest(RunTest):
+ def run(self, result=None):
+ return marker
+ class SomeCase(TestCase):
+ run_tests_with = CustomRunTest
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo', runTest=DifferentRunTest)
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(marker))
+
+ def test_decorator_for_run_test(self):
+ # Individual test methods can be marked as needing a special runner.
+ class SomeCase(TestCase):
+ @run_test_with(CustomRunTest)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_extended_decorator_for_run_test(self):
+ # Individual test methods can be marked as needing a special runner.
+ # Extra arguments can be passed to the decorator which will then be
+ # passed on to the RunTest object.
+ marker = object()
+ class FooRunTest(RunTest):
+ def __init__(self, case, handlers=None, bar=None):
+ super(FooRunTest, self).__init__(case, handlers)
+ self.bar = bar
+ def run(self, result=None):
+ return self.bar
+ class SomeCase(TestCase):
+ @run_test_with(FooRunTest, bar=marker)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(marker))
+
+ def test_works_as_inner_decorator(self):
+ # Even if run_test_with is the innermost decorator, it will be
+ # respected.
+ def wrapped(function):
+ """Silly, trivial decorator."""
+ def decorated(*args, **kwargs):
+ return function(*args, **kwargs)
+ decorated.__name__ = function.__name__
+ decorated.__dict__.update(function.__dict__)
+ return decorated
+ class SomeCase(TestCase):
+ @wrapped
+ @run_test_with(CustomRunTest)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_constructor_overrides_decorator(self):
+ # If a 'runTest' argument is passed to the test's constructor, that
+ # overrides the decorator.
+ marker = object()
+ class DifferentRunTest(RunTest):
+ def run(self, result=None):
+ return marker
+ class SomeCase(TestCase):
+ @run_test_with(CustomRunTest)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo', runTest=DifferentRunTest)
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(marker))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py
new file mode 100644
index 00000000000..6112252acd9
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py
@@ -0,0 +1,333 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Tests for the evil Twisted reactor-spinning we do."""
+
+import os
+import signal
+
+from extras import try_import
+
+from testtools import (
+ skipIf,
+ TestCase,
+ )
+from testtools.matchers import (
+ Equals,
+ Is,
+ MatchesException,
+ Raises,
+ )
+
+_spinner = try_import('testtools._spinner')
+
+defer = try_import('twisted.internet.defer')
+Failure = try_import('twisted.python.failure.Failure')
+
+
+class NeedsTwistedTestCase(TestCase):
+
+ def setUp(self):
+ super(NeedsTwistedTestCase, self).setUp()
+ if defer is None or Failure is None:
+ self.skipTest("Need Twisted to run")
+
+
+class TestNotReentrant(NeedsTwistedTestCase):
+
+ def test_not_reentrant(self):
+ # A function decorated as not being re-entrant will raise a
+ # _spinner.ReentryError if it is called while it is running.
+ calls = []
+ @_spinner.not_reentrant
+ def log_something():
+ calls.append(None)
+ if len(calls) < 5:
+ log_something()
+ self.assertThat(
+ log_something, Raises(MatchesException(_spinner.ReentryError)))
+ self.assertEqual(1, len(calls))
+
+ def test_deeper_stack(self):
+ calls = []
+ @_spinner.not_reentrant
+ def g():
+ calls.append(None)
+ if len(calls) < 5:
+ f()
+ @_spinner.not_reentrant
+ def f():
+ calls.append(None)
+ if len(calls) < 5:
+ g()
+ self.assertThat(f, Raises(MatchesException(_spinner.ReentryError)))
+ self.assertEqual(2, len(calls))
+
+
+class TestExtractResult(NeedsTwistedTestCase):
+
+ def test_not_fired(self):
+ # _spinner.extract_result raises _spinner.DeferredNotFired if it's
+ # given a Deferred that has not fired.
+ self.assertThat(lambda:_spinner.extract_result(defer.Deferred()),
+ Raises(MatchesException(_spinner.DeferredNotFired)))
+
+ def test_success(self):
+ # _spinner.extract_result returns the value of the Deferred if it has
+ # fired successfully.
+ marker = object()
+ d = defer.succeed(marker)
+ self.assertThat(_spinner.extract_result(d), Equals(marker))
+
+ def test_failure(self):
+ # _spinner.extract_result raises the failure's exception if it's given
+ # a Deferred that is failing.
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = Failure()
+ d = defer.fail(f)
+ self.assertThat(lambda:_spinner.extract_result(d),
+ Raises(MatchesException(ZeroDivisionError)))
+
+
+class TestTrapUnhandledErrors(NeedsTwistedTestCase):
+
+ def test_no_deferreds(self):
+ marker = object()
+ result, errors = _spinner.trap_unhandled_errors(lambda: marker)
+ self.assertEqual([], errors)
+ self.assertIs(marker, result)
+
+ def test_unhandled_error(self):
+ failures = []
+ def make_deferred_but_dont_handle():
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = Failure()
+ failures.append(f)
+ defer.fail(f)
+ result, errors = _spinner.trap_unhandled_errors(
+ make_deferred_but_dont_handle)
+ self.assertIs(None, result)
+ self.assertEqual(failures, [error.failResult for error in errors])
+
+
+class TestRunInReactor(NeedsTwistedTestCase):
+
+ def make_reactor(self):
+ from twisted.internet import reactor
+ return reactor
+
+ def make_spinner(self, reactor=None):
+ if reactor is None:
+ reactor = self.make_reactor()
+ return _spinner.Spinner(reactor)
+
+ def make_timeout(self):
+ return 0.01
+
+ def test_function_called(self):
+ # run_in_reactor actually calls the function given to it.
+ calls = []
+ marker = object()
+ self.make_spinner().run(self.make_timeout(), calls.append, marker)
+ self.assertThat(calls, Equals([marker]))
+
+ def test_return_value_returned(self):
+ # run_in_reactor returns the value returned by the function given to
+ # it.
+ marker = object()
+ result = self.make_spinner().run(self.make_timeout(), lambda: marker)
+ self.assertThat(result, Is(marker))
+
+ def test_exception_reraised(self):
+ # If the given function raises an error, run_in_reactor re-raises that
+ # error.
+ self.assertThat(
+ lambda:self.make_spinner().run(self.make_timeout(), lambda: 1/0),
+ Raises(MatchesException(ZeroDivisionError)))
+
+ def test_keyword_arguments(self):
+ # run_in_reactor passes keyword arguments on.
+ calls = []
+ function = lambda *a, **kw: calls.extend([a, kw])
+ self.make_spinner().run(self.make_timeout(), function, foo=42)
+ self.assertThat(calls, Equals([(), {'foo': 42}]))
+
+ def test_not_reentrant(self):
+ # run_in_reactor raises an error if it is called inside another call
+ # to run_in_reactor.
+ spinner = self.make_spinner()
+ self.assertThat(lambda: spinner.run(
+ self.make_timeout(), spinner.run, self.make_timeout(),
+ lambda: None), Raises(MatchesException(_spinner.ReentryError)))
+
+ def test_deferred_value_returned(self):
+ # If the given function returns a Deferred, run_in_reactor returns the
+ # value in the Deferred at the end of the callback chain.
+ marker = object()
+ result = self.make_spinner().run(
+ self.make_timeout(), lambda: defer.succeed(marker))
+ self.assertThat(result, Is(marker))
+
+ def test_preserve_signal_handler(self):
+ signals = ['SIGINT', 'SIGTERM', 'SIGCHLD']
+ signals = filter(
+ None, (getattr(signal, name, None) for name in signals))
+ for sig in signals:
+ self.addCleanup(signal.signal, sig, signal.getsignal(sig))
+ new_hdlrs = list(lambda *a: None for _ in signals)
+ for sig, hdlr in zip(signals, new_hdlrs):
+ signal.signal(sig, hdlr)
+ spinner = self.make_spinner()
+ spinner.run(self.make_timeout(), lambda: None)
+ self.assertEqual(new_hdlrs, map(signal.getsignal, signals))
+
+ def test_timeout(self):
+ # If the function takes too long to run, we raise a
+ # _spinner.TimeoutError.
+ timeout = self.make_timeout()
+ self.assertThat(
+ lambda:self.make_spinner().run(timeout, lambda: defer.Deferred()),
+ Raises(MatchesException(_spinner.TimeoutError)))
+
+ def test_no_junk_by_default(self):
+ # If the reactor hasn't spun yet, then there cannot be any junk.
+ spinner = self.make_spinner()
+ self.assertThat(spinner.get_junk(), Equals([]))
+
+ def test_clean_do_nothing(self):
+ # If there's nothing going on in the reactor, then clean does nothing
+ # and returns an empty list.
+ spinner = self.make_spinner()
+ result = spinner._clean()
+ self.assertThat(result, Equals([]))
+
+ def test_clean_delayed_call(self):
+ # If there's a delayed call in the reactor, then clean cancels it and
+ # returns an empty list.
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ call = reactor.callLater(10, lambda: None)
+ results = spinner._clean()
+ self.assertThat(results, Equals([call]))
+ self.assertThat(call.active(), Equals(False))
+
+ def test_clean_delayed_call_cancelled(self):
+ # If there's a delayed call that's just been cancelled, then it's no
+ # longer there.
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ call = reactor.callLater(10, lambda: None)
+ call.cancel()
+ results = spinner._clean()
+ self.assertThat(results, Equals([]))
+
+ def test_clean_selectables(self):
+ # If there's still a selectable (e.g. a listening socket), then
+ # clean() removes it from the reactor's registry.
+ #
+ # Note that the socket is left open. This emulates a bug in trial.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ port = reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
+ spinner.run(self.make_timeout(), lambda: None)
+ results = spinner.get_junk()
+ self.assertThat(results, Equals([port]))
+
+ def test_clean_running_threads(self):
+ import threading
+ import time
+ current_threads = list(threading.enumerate())
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ spinner = self.make_spinner(reactor)
+ spinner.run(timeout, reactor.callInThread, time.sleep, timeout / 2.0)
+ # Python before 2.5 has a race condition with thread handling where
+ # join() does not remove threads from enumerate before returning - the
+ # thread being joined does the removal. This was fixed in Python 2.5
+ # but we still support 2.4, so we have to workaround the issue.
+ # http://bugs.python.org/issue1703448.
+ self.assertThat(
+ [thread for thread in threading.enumerate() if thread.isAlive()],
+ Equals(current_threads))
+
+ def test_leftover_junk_available(self):
+ # If 'run' is given a function that leaves the reactor dirty in some
+ # way, 'run' will clean up the reactor and then store information
+ # about the junk. This information can be got using get_junk.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ port = spinner.run(
+ self.make_timeout(), reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
+ self.assertThat(spinner.get_junk(), Equals([port]))
+
+ def test_will_not_run_with_previous_junk(self):
+ # If 'run' is called and there's still junk in the spinner's junk
+ # list, then the spinner will refuse to run.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
+ self.assertThat(lambda: spinner.run(timeout, lambda: None),
+ Raises(MatchesException(_spinner.StaleJunkError)))
+
+ def test_clear_junk_clears_previous_junk(self):
+ # If 'run' is called and there's still junk in the spinner's junk
+ # list, then the spinner will refuse to run.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
+ junk = spinner.clear_junk()
+ self.assertThat(junk, Equals([port]))
+ self.assertThat(spinner.get_junk(), Equals([]))
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_sigint_raises_no_result_error(self):
+ # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ self.skipTest("SIGINT not available")
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
+ Raises(MatchesException(_spinner.NoResultError)))
+ self.assertEqual([], spinner._clean())
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_sigint_raises_no_result_error_second_time(self):
+ # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+ # This test is exactly the same as test_sigint_raises_no_result_error,
+ # and exists to make sure we haven't futzed with state.
+ self.test_sigint_raises_no_result_error()
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_fast_sigint_raises_no_result_error(self):
+ # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ self.skipTest("SIGINT not available")
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
+ Raises(MatchesException(_spinner.NoResultError)))
+ self.assertEqual([], spinner._clean())
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_fast_sigint_raises_no_result_error_second_time(self):
+ self.test_fast_sigint_raises_no_result_error()
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py
new file mode 100644
index 00000000000..5010f9ac12c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2012 testtools developers. See LICENSE for details.
+
+"""Test tag support."""
+
+
+from testtools import TestCase
+from testtools.tags import TagContext
+
+
+class TestTags(TestCase):
+
+ def test_no_tags(self):
+ # A tag context has no tags initially.
+ tag_context = TagContext()
+ self.assertEqual(set(), tag_context.get_current_tags())
+
+ def test_add_tag(self):
+ # A tag added with change_tags appears in get_current_tags.
+ tag_context = TagContext()
+ tag_context.change_tags(set(['foo']), set())
+ self.assertEqual(set(['foo']), tag_context.get_current_tags())
+
+ def test_add_tag_twice(self):
+ # Calling change_tags twice to add tags adds both tags to the current
+ # tags.
+ tag_context = TagContext()
+ tag_context.change_tags(set(['foo']), set())
+ tag_context.change_tags(set(['bar']), set())
+ self.assertEqual(
+ set(['foo', 'bar']), tag_context.get_current_tags())
+
+ def test_change_tags_returns_tags(self):
+ # change_tags returns the current tags. This is a convenience.
+ tag_context = TagContext()
+ tags = tag_context.change_tags(set(['foo']), set())
+ self.assertEqual(set(['foo']), tags)
+
+ def test_remove_tag(self):
+ # change_tags can remove tags from the context.
+ tag_context = TagContext()
+ tag_context.change_tags(set(['foo']), set())
+ tag_context.change_tags(set(), set(['foo']))
+ self.assertEqual(set(), tag_context.get_current_tags())
+
+ def test_child_context(self):
+ # A TagContext can have a parent. If so, its tags are the tags of the
+ # parent at the moment of construction.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ self.assertEqual(
+ parent.get_current_tags(), child.get_current_tags())
+
+ def test_add_to_child(self):
+ # Adding a tag to the child context doesn't affect the parent.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ child.change_tags(set(['bar']), set())
+ self.assertEqual(set(['foo', 'bar']), child.get_current_tags())
+ self.assertEqual(set(['foo']), parent.get_current_tags())
+
+ def test_remove_in_child(self):
+ # A tag that was in the parent context can be removed from the child
+ # context without affect the parent.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ child.change_tags(set(), set(['foo']))
+ self.assertEqual(set(), child.get_current_tags())
+ self.assertEqual(set(['foo']), parent.get_current_tags())
+
+ def test_parent(self):
+ # The parent can be retrieved from a child context.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ child.change_tags(set(), set(['foo']))
+ self.assertEqual(parent, child.parent)
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py
new file mode 100644
index 00000000000..680368db4a1
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py
@@ -0,0 +1,1550 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Tests for extensions to the base test library."""
+
+from doctest import ELLIPSIS
+from pprint import pformat
+import sys
+import unittest
+
+from testtools import (
+ DecorateTestCaseResult,
+ ErrorHolder,
+ MultipleExceptions,
+ PlaceHolder,
+ TestCase,
+ clone_test_with_new_id,
+ content,
+ skip,
+ skipIf,
+ skipUnless,
+ testcase,
+ )
+from testtools.compat import (
+ _b,
+ _u,
+ )
+from testtools.content import (
+ text_content,
+ TracebackContent,
+ )
+from testtools.matchers import (
+ Annotate,
+ DocTestMatches,
+ Equals,
+ HasLength,
+ MatchesException,
+ Raises,
+ )
+from testtools.testcase import (
+ attr,
+ Nullary,
+ WithAttributes,
+ )
+from testtools.testresult.doubles import (
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ )
+from testtools.tests.helpers import (
+ an_exc_info,
+ FullStackRunTest,
+ LoggingResult,
+ )
+try:
+ exec('from __future__ import with_statement')
+except SyntaxError:
+ pass
+else:
+ from testtools.tests.test_with_with import *
+
+
+class TestPlaceHolder(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ def makePlaceHolder(self, test_id="foo", short_description=None):
+ return PlaceHolder(test_id, short_description)
+
+ def test_id_comes_from_constructor(self):
+ # The id() of a PlaceHolder is whatever you pass into the constructor.
+ test = PlaceHolder("test id")
+ self.assertEqual("test id", test.id())
+
+ def test_shortDescription_is_id(self):
+ # The shortDescription() of a PlaceHolder is the id, by default.
+ test = PlaceHolder("test id")
+ self.assertEqual(test.id(), test.shortDescription())
+
+ def test_shortDescription_specified(self):
+ # If a shortDescription is provided to the constructor, then
+ # shortDescription() returns that instead.
+ test = PlaceHolder("test id", "description")
+ self.assertEqual("description", test.shortDescription())
+
+ def test_repr_just_id(self):
+ # repr(placeholder) shows you how the object was constructed.
+ test = PlaceHolder("test id")
+ self.assertEqual(
+ "<testtools.testcase.PlaceHolder('addSuccess', %s, {})>" % repr(
+ test.id()), repr(test))
+
+ def test_repr_with_description(self):
+ # repr(placeholder) shows you how the object was constructed.
+ test = PlaceHolder("test id", "description")
+ self.assertEqual(
+ "<testtools.testcase.PlaceHolder('addSuccess', %r, {}, %r)>" % (
+ test.id(), test.shortDescription()), repr(test))
+
+ def test_repr_custom_outcome(self):
+ test = PlaceHolder("test id", outcome='addSkip')
+ self.assertEqual(
+ "<testtools.testcase.PlaceHolder('addSkip', %r, {})>" % (
+ test.id()), repr(test))
+
+ def test_counts_as_one_test(self):
+ # A placeholder test counts as one test.
+ test = self.makePlaceHolder()
+ self.assertEqual(1, test.countTestCases())
+
+ def test_str_is_id(self):
+ # str(placeholder) is always the id(). We are not barbarians.
+ test = self.makePlaceHolder()
+ self.assertEqual(test.id(), str(test))
+
+ def test_runs_as_success(self):
+ # When run, a PlaceHolder test records a success.
+ test = self.makePlaceHolder()
+ log = []
+ test.run(LoggingResult(log))
+ self.assertEqual(
+ [('tags', set(), set()), ('startTest', test), ('addSuccess', test),
+ ('stopTest', test), ('tags', set(), set()),],
+ log)
+
+ def test_supplies_details(self):
+ details = {'quux':None}
+ test = PlaceHolder('foo', details=details)
+ result = ExtendedTestResult()
+ test.run(result)
+ self.assertEqual(
+ [('tags', set(), set()),
+ ('startTest', test),
+ ('addSuccess', test, details),
+ ('stopTest', test),
+ ('tags', set(), set()),
+ ],
+ result._events)
+
+ def test_supplies_timestamps(self):
+ test = PlaceHolder('foo', details={}, timestamps=["A", "B"])
+ result = ExtendedTestResult()
+ test.run(result)
+ self.assertEqual(
+ [('time', "A"),
+ ('tags', set(), set()),
+ ('startTest', test),
+ ('time', "B"),
+ ('addSuccess', test),
+ ('stopTest', test),
+ ('tags', set(), set()),
+ ],
+ result._events)
+
+ def test_call_is_run(self):
+ # A PlaceHolder can be called, in which case it behaves like run.
+ test = self.makePlaceHolder()
+ run_log = []
+ test.run(LoggingResult(run_log))
+ call_log = []
+ test(LoggingResult(call_log))
+ self.assertEqual(run_log, call_log)
+
+ def test_runs_without_result(self):
+ # A PlaceHolder can be run without a result, in which case there's no
+ # way to actually get at the result.
+ self.makePlaceHolder().run()
+
+ def test_debug(self):
+ # A PlaceHolder can be debugged.
+ self.makePlaceHolder().debug()
+
+ def test_supports_tags(self):
+ result = ExtendedTestResult()
+ tags = set(['foo', 'bar'])
+ case = PlaceHolder("foo", tags=tags)
+ case.run(result)
+ self.assertEqual([
+ ('tags', tags, set()),
+ ('startTest', case),
+ ('addSuccess', case),
+ ('stopTest', case),
+ ('tags', set(), tags),
+ ], result._events)
+
+
+class TestErrorHolder(TestCase):
+ # Note that these tests exist because ErrorHolder exists - it could be
+ # deprecated and dropped at this point.
+
+ run_test_with = FullStackRunTest
+
+ def makeException(self):
+ try:
+ raise RuntimeError("danger danger")
+ except:
+ return sys.exc_info()
+
+ def makePlaceHolder(self, test_id="foo", error=None,
+ short_description=None):
+ if error is None:
+ error = self.makeException()
+ return ErrorHolder(test_id, error, short_description)
+
+ def test_id_comes_from_constructor(self):
+ # The id() of a PlaceHolder is whatever you pass into the constructor.
+ test = ErrorHolder("test id", self.makeException())
+ self.assertEqual("test id", test.id())
+
+ def test_shortDescription_is_id(self):
+ # The shortDescription() of a PlaceHolder is the id, by default.
+ test = ErrorHolder("test id", self.makeException())
+ self.assertEqual(test.id(), test.shortDescription())
+
+ def test_shortDescription_specified(self):
+ # If a shortDescription is provided to the constructor, then
+ # shortDescription() returns that instead.
+ test = ErrorHolder("test id", self.makeException(), "description")
+ self.assertEqual("description", test.shortDescription())
+
+ def test_counts_as_one_test(self):
+ # A placeholder test counts as one test.
+ test = self.makePlaceHolder()
+ self.assertEqual(1, test.countTestCases())
+
+ def test_str_is_id(self):
+ # str(placeholder) is always the id(). We are not barbarians.
+ test = self.makePlaceHolder()
+ self.assertEqual(test.id(), str(test))
+
+ def test_runs_as_error(self):
+ # When run, an ErrorHolder test records an error.
+ error = self.makeException()
+ test = self.makePlaceHolder(error=error)
+ result = ExtendedTestResult()
+ log = result._events
+ test.run(result)
+ self.assertEqual(
+ [('tags', set(), set()),
+ ('startTest', test),
+ ('addError', test, test._details),
+ ('stopTest', test),
+ ('tags', set(), set())], log)
+
+ def test_call_is_run(self):
+ # A PlaceHolder can be called, in which case it behaves like run.
+ test = self.makePlaceHolder()
+ run_log = []
+ test.run(LoggingResult(run_log))
+ call_log = []
+ test(LoggingResult(call_log))
+ self.assertEqual(run_log, call_log)
+
+ def test_runs_without_result(self):
+ # A PlaceHolder can be run without a result, in which case there's no
+ # way to actually get at the result.
+ self.makePlaceHolder().run()
+
+ def test_debug(self):
+ # A PlaceHolder can be debugged.
+ self.makePlaceHolder().debug()
+
+
+class TestEquality(TestCase):
+ """Test ``TestCase``'s equality implementation."""
+
+ run_test_with = FullStackRunTest
+
+ def test_identicalIsEqual(self):
+ # TestCase's are equal if they are identical.
+ self.assertEqual(self, self)
+
+ def test_nonIdenticalInUnequal(self):
+ # TestCase's are not equal if they are not identical.
+ self.assertNotEqual(TestCase(methodName='run'),
+ TestCase(methodName='skip'))
+
+
+class TestAssertions(TestCase):
+ """Test assertions in TestCase."""
+
+ run_test_with = FullStackRunTest
+
+ def raiseError(self, exceptionFactory, *args, **kwargs):
+ raise exceptionFactory(*args, **kwargs)
+
+ def test_formatTypes_single(self):
+ # Given a single class, _formatTypes returns the name.
+ class Foo(object):
+ pass
+ self.assertEqual('Foo', self._formatTypes(Foo))
+
+ def test_formatTypes_multiple(self):
+ # Given multiple types, _formatTypes returns the names joined by
+ # commas.
+ class Foo(object):
+ pass
+ class Bar(object):
+ pass
+ self.assertEqual('Foo, Bar', self._formatTypes([Foo, Bar]))
+
+ def test_assertRaises(self):
+ # assertRaises asserts that a callable raises a particular exception.
+ self.assertRaises(RuntimeError, self.raiseError, RuntimeError)
+
+ def test_assertRaises_exception_w_metaclass(self):
+ # assertRaises works when called for exceptions with custom metaclasses
+ class MyExMeta(type):
+ def __init__(cls, name, bases, dct):
+ """ Do some dummy metaclass stuff """
+ dct.update({'answer': 42})
+ type.__init__(cls, name, bases, dct)
+
+ class MyEx(Exception):
+ __metaclass__ = MyExMeta
+
+ self.assertRaises(MyEx, self.raiseError, MyEx)
+
+ def test_assertRaises_fails_when_no_error_raised(self):
+ # assertRaises raises self.failureException when it's passed a
+ # callable that raises no error.
+ ret = ('orange', 42)
+ self.assertFails(
+ "<function ...<lambda> at ...> returned ('orange', 42)",
+ self.assertRaises, RuntimeError, lambda: ret)
+
+ def test_assertRaises_fails_when_different_error_raised(self):
+ # assertRaises re-raises an exception that it didn't expect.
+ self.assertThat(lambda: self.assertRaises(RuntimeError,
+ self.raiseError, ZeroDivisionError),
+ Raises(MatchesException(ZeroDivisionError)))
+
+ def test_assertRaises_returns_the_raised_exception(self):
+ # assertRaises returns the exception object that was raised. This is
+ # useful for testing that exceptions have the right message.
+
+ # This contraption stores the raised exception, so we can compare it
+ # to the return value of assertRaises.
+ raisedExceptions = []
+ def raiseError():
+ try:
+ raise RuntimeError('Deliberate error')
+ except RuntimeError:
+ raisedExceptions.append(sys.exc_info()[1])
+ raise
+
+ exception = self.assertRaises(RuntimeError, raiseError)
+ self.assertEqual(1, len(raisedExceptions))
+ self.assertTrue(
+ exception is raisedExceptions[0],
+ "%r is not %r" % (exception, raisedExceptions[0]))
+
+ def test_assertRaises_with_multiple_exceptions(self):
+ # assertRaises((ExceptionOne, ExceptionTwo), function) asserts that
+ # function raises one of ExceptionTwo or ExceptionOne.
+ expectedExceptions = (RuntimeError, ZeroDivisionError)
+ self.assertRaises(
+ expectedExceptions, self.raiseError, expectedExceptions[0])
+ self.assertRaises(
+ expectedExceptions, self.raiseError, expectedExceptions[1])
+
+ def test_assertRaises_with_multiple_exceptions_failure_mode(self):
+ # If assertRaises is called expecting one of a group of exceptions and
+ # a callable that doesn't raise an exception, then fail with an
+ # appropriate error message.
+ expectedExceptions = (RuntimeError, ZeroDivisionError)
+ self.assertRaises(
+ self.failureException,
+ self.assertRaises, expectedExceptions, lambda: None)
+ self.assertFails('<function ...<lambda> at ...> returned None',
+ self.assertRaises, expectedExceptions, lambda: None)
+
+ def test_assertRaises_function_repr_in_exception(self):
+ # When assertRaises fails, it includes the repr of the invoked
+ # function in the error message, so it's easy to locate the problem.
+ def foo():
+ """An arbitrary function."""
+ pass
+ self.assertThat(
+ lambda: self.assertRaises(Exception, foo),
+ Raises(
+ MatchesException(self.failureException, '.*%r.*' % (foo,))))
+
+ def assertFails(self, message, function, *args, **kwargs):
+ """Assert that function raises a failure with the given message."""
+ failure = self.assertRaises(
+ self.failureException, function, *args, **kwargs)
+ self.assertThat(failure, DocTestMatches(message, ELLIPSIS))
+
+ def test_assertIn_success(self):
+ # assertIn(needle, haystack) asserts that 'needle' is in 'haystack'.
+ self.assertIn(3, range(10))
+ self.assertIn('foo', 'foo bar baz')
+ self.assertIn('foo', 'foo bar baz'.split())
+
+ def test_assertIn_failure(self):
+ # assertIn(needle, haystack) fails the test when 'needle' is not in
+ # 'haystack'.
+ self.assertFails('3 not in [0, 1, 2]', self.assertIn, 3, [0, 1, 2])
+ self.assertFails(
+ '%r not in %r' % ('qux', 'foo bar baz'),
+ self.assertIn, 'qux', 'foo bar baz')
+
+ def test_assertNotIn_success(self):
+ # assertNotIn(needle, haystack) asserts that 'needle' is not in
+ # 'haystack'.
+ self.assertNotIn(3, [0, 1, 2])
+ self.assertNotIn('qux', 'foo bar baz')
+
+ def test_assertNotIn_failure(self):
+ # assertNotIn(needle, haystack) fails the test when 'needle' is in
+ # 'haystack'.
+ self.assertFails('[1, 2, 3] matches Contains(3)', self.assertNotIn,
+ 3, [1, 2, 3])
+ self.assertFails(
+ "'foo bar baz' matches Contains('foo')",
+ self.assertNotIn, 'foo', 'foo bar baz')
+
+ def test_assertIsInstance(self):
+ # assertIsInstance asserts that an object is an instance of a class.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ foo = Foo()
+ self.assertIsInstance(foo, Foo)
+
+ def test_assertIsInstance_multiple_classes(self):
+ # assertIsInstance asserts that an object is an instance of one of a
+ # group of classes.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ class Bar(object):
+ """Another simple class for testing assertIsInstance."""
+
+ foo = Foo()
+ self.assertIsInstance(foo, (Foo, Bar))
+ self.assertIsInstance(Bar(), (Foo, Bar))
+
+ def test_assertIsInstance_failure(self):
+ # assertIsInstance(obj, klass) fails the test when obj is not an
+ # instance of klass.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ self.assertFails(
+ "'42' is not an instance of %s" % self._formatTypes(Foo),
+ self.assertIsInstance, 42, Foo)
+
+ def test_assertIsInstance_failure_multiple_classes(self):
+ # assertIsInstance(obj, (klass1, klass2)) fails the test when obj is
+ # not an instance of klass1 or klass2.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ class Bar(object):
+ """Another simple class for testing assertIsInstance."""
+
+ self.assertFails(
+ "'42' is not an instance of any of (%s)" % self._formatTypes([Foo, Bar]),
+ self.assertIsInstance, 42, (Foo, Bar))
+
+ def test_assertIsInstance_overridden_message(self):
+ # assertIsInstance(obj, klass, msg) permits a custom message.
+ self.assertFails("'42' is not an instance of str: foo",
+ self.assertIsInstance, 42, str, "foo")
+
+ def test_assertIs(self):
+ # assertIs asserts that an object is identical to another object.
+ self.assertIs(None, None)
+ some_list = [42]
+ self.assertIs(some_list, some_list)
+ some_object = object()
+ self.assertIs(some_object, some_object)
+
+ def test_assertIs_fails(self):
+ # assertIs raises assertion errors if one object is not identical to
+ # another.
+ self.assertFails('None is not 42', self.assertIs, None, 42)
+ self.assertFails('[42] is not [42]', self.assertIs, [42], [42])
+
+ def test_assertIs_fails_with_message(self):
+ # assertIs raises assertion errors if one object is not identical to
+ # another, and includes a user-supplied message, if it's provided.
+ self.assertFails(
+ 'None is not 42: foo bar', self.assertIs, None, 42, 'foo bar')
+
+ def test_assertIsNot(self):
+ # assertIsNot asserts that an object is not identical to another
+ # object.
+ self.assertIsNot(None, 42)
+ self.assertIsNot([42], [42])
+ self.assertIsNot(object(), object())
+
+ def test_assertIsNot_fails(self):
+ # assertIsNot raises assertion errors if one object is identical to
+ # another.
+ self.assertFails('None matches Is(None)', self.assertIsNot, None, None)
+ some_list = [42]
+ self.assertFails(
+ '[42] matches Is([42])', self.assertIsNot, some_list, some_list)
+
+ def test_assertIsNot_fails_with_message(self):
+ # assertIsNot raises assertion errors if one object is identical to
+ # another, and includes a user-supplied message if it's provided.
+ self.assertFails(
+ 'None matches Is(None): foo bar', self.assertIsNot, None, None,
+ "foo bar")
+
+ def test_assertThat_matches_clean(self):
+ class Matcher(object):
+ def match(self, foo):
+ return None
+ self.assertThat("foo", Matcher())
+
+ def test_assertThat_mismatch_raises_description(self):
+ calls = []
+ class Mismatch(object):
+ def __init__(self, thing):
+ self.thing = thing
+ def describe(self):
+ calls.append(('describe_diff', self.thing))
+ return "object is not a thing"
+ def get_details(self):
+ return {}
+ class Matcher(object):
+ def match(self, thing):
+ calls.append(('match', thing))
+ return Mismatch(thing)
+ def __str__(self):
+ calls.append(('__str__',))
+ return "a description"
+ class Test(TestCase):
+ def test(self):
+ self.assertThat("foo", Matcher())
+ result = Test("test").run()
+ self.assertEqual([
+ ('match', "foo"),
+ ('describe_diff', "foo"),
+ ], calls)
+ self.assertFalse(result.wasSuccessful())
+
+ def test_assertThat_output(self):
+ matchee = 'foo'
+ matcher = Equals('bar')
+ expected = matcher.match(matchee).describe()
+ self.assertFails(expected, self.assertThat, matchee, matcher)
+
+ def test_assertThat_message_is_annotated(self):
+ matchee = 'foo'
+ matcher = Equals('bar')
+ expected = Annotate('woo', matcher).match(matchee).describe()
+ self.assertFails(expected, self.assertThat, matchee, matcher, 'woo')
+
+ def test_assertThat_verbose_output(self):
+ matchee = 'foo'
+ matcher = Equals('bar')
+ expected = (
+ 'Match failed. Matchee: %r\n'
+ 'Matcher: %s\n'
+ 'Difference: %s\n' % (
+ matchee,
+ matcher,
+ matcher.match(matchee).describe(),
+ ))
+ self.assertFails(
+ expected, self.assertThat, matchee, matcher, verbose=True)
+
+ def test__force_failure_fails_test(self):
+ class Test(TestCase):
+ def test_foo(self):
+ self.force_failure = True
+ self.remaining_code_run = True
+ test = Test('test_foo')
+ result = test.run()
+ self.assertFalse(result.wasSuccessful())
+ self.assertTrue(test.remaining_code_run)
+
+ def get_error_string(self, e):
+ """Get the string showing how 'e' would be formatted in test output.
+
+ This is a little bit hacky, since it's designed to give consistent
+ output regardless of Python version.
+
+ In testtools, TestResult._exc_info_to_unicode is the point of dispatch
+ between various different implementations of methods that format
+ exceptions, so that's what we have to call. However, that method cares
+ about stack traces and formats the exception class. We don't care
+ about either of these, so we take its output and parse it a little.
+ """
+ error = TracebackContent((e.__class__, e, None), self).as_text()
+ # We aren't at all interested in the traceback.
+ if error.startswith('Traceback (most recent call last):\n'):
+ lines = error.splitlines(True)[1:]
+ for i, line in enumerate(lines):
+ if not line.startswith(' '):
+ break
+ error = ''.join(lines[i:])
+ # We aren't interested in how the exception type is formatted.
+ exc_class, error = error.split(': ', 1)
+ return error
+
+ def test_assertThat_verbose_unicode(self):
+ # When assertThat is given matchees or matchers that contain non-ASCII
+ # unicode strings, we can still provide a meaningful error.
+ matchee = _u('\xa7')
+ matcher = Equals(_u('a'))
+ expected = (
+ 'Match failed. Matchee: %s\n'
+ 'Matcher: %s\n'
+ 'Difference: %s\n\n' % (
+ repr(matchee).replace("\\xa7", matchee),
+ matcher,
+ matcher.match(matchee).describe(),
+ ))
+ e = self.assertRaises(
+ self.failureException, self.assertThat, matchee, matcher,
+ verbose=True)
+ self.assertEqual(expected, self.get_error_string(e))
+
+ def test_assertEqual_nice_formatting(self):
+ message = "These things ought not be equal."
+ a = ['apple', 'banana', 'cherry']
+ b = {'Thatcher': 'One who mends roofs of straw',
+ 'Major': 'A military officer, ranked below colonel',
+ 'Blair': 'To shout loudly',
+ 'Brown': 'The colour of healthy human faeces'}
+ expected_error = '\n'.join([
+ '!=:',
+ 'reference = %s' % pformat(a),
+ 'actual = %s' % pformat(b),
+ ': ' + message,
+ ])
+ self.assertFails(expected_error, self.assertEqual, a, b, message)
+ self.assertFails(expected_error, self.assertEquals, a, b, message)
+ self.assertFails(expected_error, self.failUnlessEqual, a, b, message)
+
+ def test_assertEqual_formatting_no_message(self):
+ a = "cat"
+ b = "dog"
+ expected_error = "'cat' != 'dog'"
+ self.assertFails(expected_error, self.assertEqual, a, b)
+ self.assertFails(expected_error, self.assertEquals, a, b)
+ self.assertFails(expected_error, self.failUnlessEqual, a, b)
+
+ def test_assertEqual_non_ascii_str_with_newlines(self):
+ message = _u("Be careful mixing unicode and bytes")
+ a = "a\n\xa7\n"
+ b = "Just a longish string so the more verbose output form is used."
+ expected_error = '\n'.join([
+ '!=:',
+ "reference = '''\\",
+ 'a',
+ repr('\xa7')[1:-1],
+ "'''",
+ 'actual = %r' % (b,),
+ ': ' + message,
+ ])
+ self.assertFails(expected_error, self.assertEqual, a, b, message)
+
+ def test_assertIsNone(self):
+ self.assertIsNone(None)
+
+ expected_error = 'None is not 0'
+ self.assertFails(expected_error, self.assertIsNone, 0)
+
+ def test_assertIsNotNone(self):
+ self.assertIsNotNone(0)
+ self.assertIsNotNone("0")
+
+ expected_error = 'None matches Is(None)'
+ self.assertFails(expected_error, self.assertIsNotNone, None)
+
+
+ def test_fail_preserves_traceback_detail(self):
+ class Test(TestCase):
+ def test(self):
+ self.addDetail('traceback', text_content('foo'))
+ self.fail('bar')
+ test = Test('test')
+ result = ExtendedTestResult()
+ test.run(result)
+ self.assertEqual(set(['traceback', 'traceback-1']),
+ set(result._events[1][2].keys()))
+
+
+class TestAddCleanup(TestCase):
+ """Tests for TestCase.addCleanup."""
+
+ run_test_with = FullStackRunTest
+
+ class LoggingTest(TestCase):
+ """A test that logs calls to setUp, runTest and tearDown."""
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self._calls = ['setUp']
+
+ def brokenSetUp(self):
+ # A tearDown that deliberately fails.
+ self._calls = ['brokenSetUp']
+ raise RuntimeError('Deliberate Failure')
+
+ def runTest(self):
+ self._calls.append('runTest')
+
+ def brokenTest(self):
+ raise RuntimeError('Deliberate broken test')
+
+ def tearDown(self):
+ self._calls.append('tearDown')
+ TestCase.tearDown(self)
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self._result_calls = []
+ self.test = TestAddCleanup.LoggingTest('runTest')
+ self.logging_result = LoggingResult(self._result_calls)
+
+ def assertErrorLogEqual(self, messages):
+ self.assertEqual(messages, [call[0] for call in self._result_calls])
+
+ def assertTestLogEqual(self, messages):
+ """Assert that the call log equals 'messages'."""
+ case = self._result_calls[0][1]
+ self.assertEqual(messages, case._calls)
+
+ def logAppender(self, message):
+ """A cleanup that appends 'message' to the tests log.
+
+ Cleanups are callables that are added to a test by addCleanup. To
+ verify that our cleanups run in the right order, we add strings to a
+ list that acts as a log. This method returns a cleanup that will add
+ the given message to that log when run.
+ """
+ self.test._calls.append(message)
+
+ def test_fixture(self):
+ # A normal run of self.test logs 'setUp', 'runTest' and 'tearDown'.
+ # This test doesn't test addCleanup itself, it just sanity checks the
+ # fixture.
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
+
+ def test_cleanup_run_before_tearDown(self):
+ # Cleanup functions added with 'addCleanup' are called before tearDown
+ # runs.
+ self.test.addCleanup(self.logAppender, 'cleanup')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown', 'cleanup'])
+
+ def test_add_cleanup_called_if_setUp_fails(self):
+ # Cleanup functions added with 'addCleanup' are called even if setUp
+ # fails. Note that tearDown has a different behavior: it is only
+ # called when setUp succeeds.
+ self.test.setUp = self.test.brokenSetUp
+ self.test.addCleanup(self.logAppender, 'cleanup')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['brokenSetUp', 'cleanup'])
+
+ def test_addCleanup_called_in_reverse_order(self):
+ # Cleanup functions added with 'addCleanup' are called in reverse
+ # order.
+ #
+ # One of the main uses of addCleanup is to dynamically create
+ # resources that need some sort of explicit tearDown. Often one
+ # resource will be created in terms of another, e.g.,
+ # self.first = self.makeFirst()
+ # self.second = self.makeSecond(self.first)
+ #
+ # When this happens, we generally want to clean up the second resource
+ # before the first one, since the second depends on the first.
+ self.test.addCleanup(self.logAppender, 'first')
+ self.test.addCleanup(self.logAppender, 'second')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(
+ ['setUp', 'runTest', 'tearDown', 'second', 'first'])
+
+ def test_tearDown_runs_after_cleanup_failure(self):
+ # tearDown runs even if a cleanup function fails.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
+
+ def test_cleanups_continue_running_after_error(self):
+ # All cleanups are always run, even if one or two of them fail.
+ self.test.addCleanup(self.logAppender, 'first')
+ self.test.addCleanup(lambda: 1/0)
+ self.test.addCleanup(self.logAppender, 'second')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(
+ ['setUp', 'runTest', 'tearDown', 'second', 'first'])
+
+ def test_error_in_cleanups_are_captured(self):
+ # If a cleanup raises an error, we want to record it and fail the the
+ # test, even though we go on to run other cleanups.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.run(self.logging_result)
+ self.assertErrorLogEqual(['startTest', 'addError', 'stopTest'])
+
+ def test_keyboard_interrupt_not_caught(self):
+ # If a cleanup raises KeyboardInterrupt, it gets reraised.
+ def raiseKeyboardInterrupt():
+ raise KeyboardInterrupt()
+ self.test.addCleanup(raiseKeyboardInterrupt)
+ self.assertThat(lambda:self.test.run(self.logging_result),
+ Raises(MatchesException(KeyboardInterrupt)))
+
+ def test_all_errors_from_MultipleExceptions_reported(self):
+ # When a MultipleExceptions exception is caught, all the errors are
+ # reported.
+ def raiseMany():
+ try:
+ 1/0
+ except Exception:
+ exc_info1 = sys.exc_info()
+ try:
+ 1/0
+ except Exception:
+ exc_info2 = sys.exc_info()
+ raise MultipleExceptions(exc_info1, exc_info2)
+ self.test.addCleanup(raiseMany)
+ self.logging_result = ExtendedTestResult()
+ self.test.run(self.logging_result)
+ self.assertEqual(['startTest', 'addError', 'stopTest'],
+ [event[0] for event in self.logging_result._events])
+ self.assertEqual(set(['traceback', 'traceback-1']),
+ set(self.logging_result._events[1][2].keys()))
+
+ def test_multipleCleanupErrorsReported(self):
+ # Errors from all failing cleanups are reported as separate backtraces.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.addCleanup(lambda: 1/0)
+ self.logging_result = ExtendedTestResult()
+ self.test.run(self.logging_result)
+ self.assertEqual(['startTest', 'addError', 'stopTest'],
+ [event[0] for event in self.logging_result._events])
+ self.assertEqual(set(['traceback', 'traceback-1']),
+ set(self.logging_result._events[1][2].keys()))
+
+ def test_multipleErrorsCoreAndCleanupReported(self):
+ # Errors from all failing cleanups are reported, with stopTest,
+ # startTest inserted.
+ self.test = TestAddCleanup.LoggingTest('brokenTest')
+ self.test.addCleanup(lambda: 1/0)
+ self.test.addCleanup(lambda: 1/0)
+ self.logging_result = ExtendedTestResult()
+ self.test.run(self.logging_result)
+ self.assertEqual(['startTest', 'addError', 'stopTest'],
+ [event[0] for event in self.logging_result._events])
+ self.assertEqual(set(['traceback', 'traceback-1', 'traceback-2']),
+ set(self.logging_result._events[1][2].keys()))
+
+
+class TestWithDetails(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ def assertDetailsProvided(self, case, expected_outcome, expected_keys):
+ """Assert that when case is run, details are provided to the result.
+
+ :param case: A TestCase to run.
+ :param expected_outcome: The call that should be made.
+ :param expected_keys: The keys to look for.
+ """
+ result = ExtendedTestResult()
+ case.run(result)
+ case = result._events[0][1]
+ expected = [
+ ('startTest', case),
+ (expected_outcome, case),
+ ('stopTest', case),
+ ]
+ self.assertEqual(3, len(result._events))
+ self.assertEqual(expected[0], result._events[0])
+ self.assertEqual(expected[1], result._events[1][0:2])
+ # Checking the TB is right is rather tricky. doctest line matching
+ # would help, but 'meh'.
+ self.assertEqual(sorted(expected_keys),
+ sorted(result._events[1][2].keys()))
+ self.assertEqual(expected[-1], result._events[-1])
+
+ def get_content(self):
+ return content.Content(
+ content.ContentType("text", "foo"), lambda: [_b('foo')])
+
+
+class TestExpectedFailure(TestWithDetails):
+ """Tests for expected failures and unexpected successess."""
+
+ run_test_with = FullStackRunTest
+
+ def make_unexpected_case(self):
+ class Case(TestCase):
+ def test(self):
+ raise testcase._UnexpectedSuccess
+ case = Case('test')
+ return case
+
+ def test_raising__UnexpectedSuccess_py27(self):
+ case = self.make_unexpected_case()
+ result = Python27TestResult()
+ case.run(result)
+ case = result._events[0][1]
+ self.assertEqual([
+ ('startTest', case),
+ ('addUnexpectedSuccess', case),
+ ('stopTest', case),
+ ], result._events)
+
+ def test_raising__UnexpectedSuccess_extended(self):
+ case = self.make_unexpected_case()
+ result = ExtendedTestResult()
+ case.run(result)
+ case = result._events[0][1]
+ self.assertEqual([
+ ('startTest', case),
+ ('addUnexpectedSuccess', case, {}),
+ ('stopTest', case),
+ ], result._events)
+
+ def make_xfail_case_xfails(self):
+ content = self.get_content()
+ class Case(TestCase):
+ def test(self):
+ self.addDetail("foo", content)
+ self.expectFailure("we are sad", self.assertEqual,
+ 1, 0)
+ case = Case('test')
+ return case
+
+ def make_xfail_case_succeeds(self):
+ content = self.get_content()
+ class Case(TestCase):
+ def test(self):
+ self.addDetail("foo", content)
+ self.expectFailure("we are sad", self.assertEqual,
+ 1, 1)
+ case = Case('test')
+ return case
+
+ def test_expectFailure_KnownFailure_extended(self):
+ case = self.make_xfail_case_xfails()
+ self.assertDetailsProvided(case, "addExpectedFailure",
+ ["foo", "traceback", "reason"])
+
+ def test_expectFailure_KnownFailure_unexpected_success(self):
+ case = self.make_xfail_case_succeeds()
+ self.assertDetailsProvided(case, "addUnexpectedSuccess",
+ ["foo", "reason"])
+
+
+class TestUniqueFactories(TestCase):
+ """Tests for getUniqueString and getUniqueInteger."""
+
+ run_test_with = FullStackRunTest
+
+ def test_getUniqueInteger(self):
+ # getUniqueInteger returns an integer that increments each time you
+ # call it.
+ one = self.getUniqueInteger()
+ self.assertEqual(1, one)
+ two = self.getUniqueInteger()
+ self.assertEqual(2, two)
+
+ def test_getUniqueString(self):
+ # getUniqueString returns the current test id followed by a unique
+ # integer.
+ name_one = self.getUniqueString()
+ self.assertEqual('%s-%d' % (self.id(), 1), name_one)
+ name_two = self.getUniqueString()
+ self.assertEqual('%s-%d' % (self.id(), 2), name_two)
+
+ def test_getUniqueString_prefix(self):
+ # If getUniqueString is given an argument, it uses that argument as
+ # the prefix of the unique string, rather than the test id.
+ name_one = self.getUniqueString('foo')
+ self.assertThat(name_one, Equals('foo-1'))
+ name_two = self.getUniqueString('bar')
+ self.assertThat(name_two, Equals('bar-2'))
+
+
+class TestCloneTestWithNewId(TestCase):
+ """Tests for clone_test_with_new_id."""
+
+ run_test_with = FullStackRunTest
+
+ def test_clone_test_with_new_id(self):
+ class FooTestCase(TestCase):
+ def test_foo(self):
+ pass
+ test = FooTestCase('test_foo')
+ oldName = test.id()
+ newName = self.getUniqueString()
+ newTest = clone_test_with_new_id(test, newName)
+ self.assertEqual(newName, newTest.id())
+ self.assertEqual(oldName, test.id(),
+ "the original test instance should be unchanged.")
+
+ def test_cloned_testcase_does_not_share_details(self):
+ """A cloned TestCase does not share the details dict."""
+ class Test(TestCase):
+ def test_foo(self):
+ self.addDetail(
+ 'foo', content.Content('text/plain', lambda: 'foo'))
+ orig_test = Test('test_foo')
+ cloned_test = clone_test_with_new_id(orig_test, self.getUniqueString())
+ orig_test.run(unittest.TestResult())
+ self.assertEqual('foo', orig_test.getDetails()['foo'].iter_bytes())
+ self.assertEqual(None, cloned_test.getDetails().get('foo'))
+
+
+class TestDetailsProvided(TestWithDetails):
+
+ run_test_with = FullStackRunTest
+
+ def test_addDetail(self):
+ mycontent = self.get_content()
+ self.addDetail("foo", mycontent)
+ details = self.getDetails()
+ self.assertEqual({"foo": mycontent}, details)
+
+ def test_addError(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ 1/0
+ self.assertDetailsProvided(Case("test"), "addError",
+ ["foo", "traceback"])
+
+ def test_addFailure(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.fail('yo')
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["foo", "traceback"])
+
+ def test_addSkip(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.skip('yo')
+ self.assertDetailsProvided(Case("test"), "addSkip",
+ ["foo", "reason"])
+
+ def test_addSucccess(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.assertDetailsProvided(Case("test"), "addSuccess",
+ ["foo"])
+
+ def test_addUnexpectedSuccess(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ raise testcase._UnexpectedSuccess()
+ self.assertDetailsProvided(Case("test"), "addUnexpectedSuccess",
+ ["foo"])
+
+ def test_addDetails_from_Mismatch(self):
+ content = self.get_content()
+ class Mismatch(object):
+ def describe(self):
+ return "Mismatch"
+ def get_details(self):
+ return {"foo": content}
+ class Matcher(object):
+ def match(self, thing):
+ return Mismatch()
+ def __str__(self):
+ return "a description"
+ class Case(TestCase):
+ def test(self):
+ self.assertThat("foo", Matcher())
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["foo", "traceback"])
+
+ def test_multiple_addDetails_from_Mismatch(self):
+ content = self.get_content()
+ class Mismatch(object):
+ def describe(self):
+ return "Mismatch"
+ def get_details(self):
+ return {"foo": content, "bar": content}
+ class Matcher(object):
+ def match(self, thing):
+ return Mismatch()
+ def __str__(self):
+ return "a description"
+ class Case(TestCase):
+ def test(self):
+ self.assertThat("foo", Matcher())
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["bar", "foo", "traceback"])
+
+ def test_addDetails_with_same_name_as_key_from_get_details(self):
+ content = self.get_content()
+ class Mismatch(object):
+ def describe(self):
+ return "Mismatch"
+ def get_details(self):
+ return {"foo": content}
+ class Matcher(object):
+ def match(self, thing):
+ return Mismatch()
+ def __str__(self):
+ return "a description"
+ class Case(TestCase):
+ def test(self):
+ self.addDetail("foo", content)
+ self.assertThat("foo", Matcher())
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["foo", "foo-1", "traceback"])
+
+ def test_addDetailUniqueName_works(self):
+ content = self.get_content()
+ class Case(TestCase):
+ def test(self):
+ self.addDetailUniqueName("foo", content)
+ self.addDetailUniqueName("foo", content)
+ self.assertDetailsProvided(Case("test"), "addSuccess",
+ ["foo", "foo-1"])
+
+
+class TestSetupTearDown(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ def test_setUpNotCalled(self):
+ class DoesnotcallsetUp(TestCase):
+ def setUp(self):
+ pass
+ def test_method(self):
+ pass
+ result = unittest.TestResult()
+ DoesnotcallsetUp('test_method').run(result)
+ self.assertThat(result.errors, HasLength(1))
+ self.assertThat(result.errors[0][1],
+ DocTestMatches(
+ "...ValueError...File...testtools/tests/test_testcase.py...",
+ ELLIPSIS))
+
+ def test_tearDownNotCalled(self):
+ class DoesnotcalltearDown(TestCase):
+ def test_method(self):
+ pass
+ def tearDown(self):
+ pass
+ result = unittest.TestResult()
+ DoesnotcalltearDown('test_method').run(result)
+ self.assertThat(result.errors, HasLength(1))
+ self.assertThat(result.errors[0][1],
+ DocTestMatches(
+ "...ValueError...File...testtools/tests/test_testcase.py...",
+ ELLIPSIS))
+
+
+class TestSkipping(TestCase):
+ """Tests for skipping of tests functionality."""
+
+ run_test_with = FullStackRunTest
+
+ def test_skip_causes_skipException(self):
+ self.assertThat(lambda:self.skip("Skip this test"),
+ Raises(MatchesException(self.skipException)))
+
+ def test_can_use_skipTest(self):
+ self.assertThat(lambda:self.skipTest("Skip this test"),
+ Raises(MatchesException(self.skipException)))
+
+ def test_skip_without_reason_works(self):
+ class Test(TestCase):
+ def test(self):
+ raise self.skipException()
+ case = Test("test")
+ result = ExtendedTestResult()
+ case.run(result)
+ self.assertEqual('addSkip', result._events[1][0])
+ self.assertEqual('no reason given.',
+ result._events[1][2]['reason'].as_text())
+
+ def test_skipException_in_setup_calls_result_addSkip(self):
+ class TestThatRaisesInSetUp(TestCase):
+ def setUp(self):
+ TestCase.setUp(self)
+ self.skip("skipping this test")
+ def test_that_passes(self):
+ pass
+ calls = []
+ result = LoggingResult(calls)
+ test = TestThatRaisesInSetUp("test_that_passes")
+ test.run(result)
+ case = result._events[0][1]
+ self.assertEqual([('startTest', case),
+ ('addSkip', case, "skipping this test"), ('stopTest', case)],
+ calls)
+
+ def test_skipException_in_test_method_calls_result_addSkip(self):
+ class SkippingTest(TestCase):
+ def test_that_raises_skipException(self):
+ self.skip("skipping this test")
+ result = Python27TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ case = result._events[0][1]
+ self.assertEqual([('startTest', case),
+ ('addSkip', case, "skipping this test"), ('stopTest', case)],
+ result._events)
+
+ def test_skip__in_setup_with_old_result_object_calls_addSuccess(self):
+ class SkippingTest(TestCase):
+ def setUp(self):
+ TestCase.setUp(self)
+ raise self.skipException("skipping this test")
+ def test_that_raises_skipException(self):
+ pass
+ result = Python26TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skip_with_old_result_object_calls_addError(self):
+ class SkippingTest(TestCase):
+ def test_that_raises_skipException(self):
+ raise self.skipException("skipping this test")
+ result = Python26TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skip_decorator(self):
+ class SkippingTest(TestCase):
+ @skip("skipping this test")
+ def test_that_is_decorated_with_skip(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skip")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skipIf_decorator(self):
+ class SkippingTest(TestCase):
+ @skipIf(True, "skipping this test")
+ def test_that_is_decorated_with_skipIf(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skipIf")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skipUnless_decorator(self):
+ class SkippingTest(TestCase):
+ @skipUnless(False, "skipping this test")
+ def test_that_is_decorated_with_skipUnless(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skipUnless")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+
+class TestOnException(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ def test_default_works(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.onException(an_exc_info)
+ events.append(True)
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([True]))
+
+ def test_added_handler_works(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.addOnException(events.append)
+ self.onException(an_exc_info)
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([an_exc_info]))
+
+ def test_handler_that_raises_is_not_caught(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.addOnException(events.index)
+ self.assertThat(lambda: self.onException(an_exc_info),
+ Raises(MatchesException(ValueError)))
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([]))
+
+
+class TestPatchSupport(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ class Case(TestCase):
+ def test(self):
+ pass
+
+ def test_patch(self):
+ # TestCase.patch masks obj.attribute with the new value.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ self.assertEqual('patched', self.foo)
+
+ def test_patch_restored_after_run(self):
+ # TestCase.patch masks obj.attribute with the new value, but restores
+ # the original value after the test is finished.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ test.run()
+ self.assertEqual('original', self.foo)
+
+ def test_successive_patches_apply(self):
+ # TestCase.patch can be called multiple times per test. Each time you
+ # call it, it overrides the original value.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ test.patch(self, 'foo', 'second')
+ self.assertEqual('second', self.foo)
+
+ def test_successive_patches_restored_after_run(self):
+ # TestCase.patch restores the original value, no matter how many times
+ # it was called.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ test.patch(self, 'foo', 'second')
+ test.run()
+ self.assertEqual('original', self.foo)
+
+ def test_patch_nonexistent_attribute(self):
+ # TestCase.patch can be used to patch a non-existent attribute.
+ test = self.Case('test')
+ test.patch(self, 'doesntexist', 'patched')
+ self.assertEqual('patched', self.doesntexist)
+
+ def test_restore_nonexistent_attribute(self):
+ # TestCase.patch can be used to patch a non-existent attribute, after
+ # the test run, the attribute is then removed from the object.
+ test = self.Case('test')
+ test.patch(self, 'doesntexist', 'patched')
+ test.run()
+ marker = object()
+ value = getattr(self, 'doesntexist', marker)
+ self.assertIs(marker, value)
+
+
+class TestTestCaseSuper(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ def test_setup_uses_super(self):
+ class OtherBaseCase(unittest.TestCase):
+ setup_called = False
+ def setUp(self):
+ self.setup_called = True
+ super(OtherBaseCase, self).setUp()
+ class OurCase(TestCase, OtherBaseCase):
+ def runTest(self):
+ pass
+ test = OurCase()
+ test.setUp()
+ test.tearDown()
+ self.assertTrue(test.setup_called)
+
+ def test_teardown_uses_super(self):
+ class OtherBaseCase(unittest.TestCase):
+ teardown_called = False
+ def tearDown(self):
+ self.teardown_called = True
+ super(OtherBaseCase, self).tearDown()
+ class OurCase(TestCase, OtherBaseCase):
+ def runTest(self):
+ pass
+ test = OurCase()
+ test.setUp()
+ test.tearDown()
+ self.assertTrue(test.teardown_called)
+
+
+class TestNullary(TestCase):
+
+ def test_repr(self):
+ # The repr() of nullary is the same as the repr() of the wrapped
+ # function.
+ def foo():
+ pass
+ wrapped = Nullary(foo)
+ self.assertEqual(repr(wrapped), repr(foo))
+
+ def test_called_with_arguments(self):
+ # The function is called with the arguments given to Nullary's
+ # constructor.
+ l = []
+ def foo(*args, **kwargs):
+ l.append((args, kwargs))
+ wrapped = Nullary(foo, 1, 2, a="b")
+ wrapped()
+ self.assertEqual(l, [((1, 2), {'a': 'b'})])
+
+ def test_returns_wrapped(self):
+ # Calling Nullary returns whatever the function returns.
+ ret = object()
+ wrapped = Nullary(lambda: ret)
+ self.assertIs(ret, wrapped())
+
+ def test_raises(self):
+ # If the function raises, so does Nullary when called.
+ wrapped = Nullary(lambda: 1/0)
+ self.assertRaises(ZeroDivisionError, wrapped)
+
+
+class TestAttributes(TestCase):
+
+ def test_simple_attr(self):
+ # Adding an attr to a test changes its id().
+ class MyTest(WithAttributes, TestCase):
+ @attr('foo')
+ def test_bar(self):
+ pass
+ case = MyTest('test_bar')
+ self.assertEqual('testtools.tests.test_testcase.MyTest.test_bar[foo]',
+ case.id())
+
+ def test_multiple_attributes(self):
+ class MyTest(WithAttributes, TestCase):
+ # Not sorted here, forward or backwards.
+ @attr('foo', 'quux', 'bar')
+ def test_bar(self):
+ pass
+ case = MyTest('test_bar')
+ self.assertEqual(
+ 'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
+ case.id())
+
+ def test_multiple_attr_decorators(self):
+ class MyTest(WithAttributes, TestCase):
+ # Not sorted here, forward or backwards.
+ @attr('bar')
+ @attr('quux')
+ @attr('foo')
+ def test_bar(self):
+ pass
+ case = MyTest('test_bar')
+ self.assertEqual(
+ 'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
+ case.id())
+
+
+class TestDecorateTestCaseResult(TestCase):
+
+ def setUp(self):
+ super(TestDecorateTestCaseResult, self).setUp()
+ self.log = []
+
+ def make_result(self, result):
+ self.log.append(('result', result))
+ return LoggingResult(self.log)
+
+ def test___call__(self):
+ case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
+ case(None)
+ case('something')
+ self.assertEqual([('result', None),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ ('result', 'something'),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set())
+ ], self.log)
+
+ def test_run(self):
+ case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
+ case.run(None)
+ case.run('something')
+ self.assertEqual([('result', None),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ ('result', 'something'),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set())
+ ], self.log)
+
+ def test_before_after_hooks(self):
+ case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result,
+ before_run=lambda result: self.log.append('before'),
+ after_run=lambda result: self.log.append('after'))
+ case.run(None)
+ case(None)
+ self.assertEqual([
+ ('result', None),
+ 'before',
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ 'after',
+ ('result', None),
+ 'before',
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ 'after',
+ ], self.log)
+
+ def test_other_attribute(self):
+ orig = PlaceHolder('foo')
+ orig.thing = 'fred'
+ case = DecorateTestCaseResult(orig, self.make_result)
+ self.assertEqual('fred', case.thing)
+ self.assertRaises(AttributeError, getattr, case, 'other')
+ case.other = 'barbara'
+ self.assertEqual('barbara', orig.other)
+ del case.thing
+ self.assertRaises(AttributeError, getattr, orig, 'thing')
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py
new file mode 100644
index 00000000000..04aa0873ccd
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py
@@ -0,0 +1,2919 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Test TestResults and related things."""
+
+__metaclass__ = type
+
+import codecs
+import datetime
+import doctest
+from itertools import chain, combinations
+import os
+import shutil
+import sys
+import tempfile
+import threading
+from unittest import TestSuite
+import warnings
+
+from extras import safe_hasattr, try_imports
+
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
+from testtools import (
+ CopyStreamResult,
+ ExtendedToOriginalDecorator,
+ ExtendedToStreamDecorator,
+ MultiTestResult,
+ PlaceHolder,
+ StreamFailFast,
+ StreamResult,
+ StreamResultRouter,
+ StreamSummary,
+ StreamTagger,
+ StreamToDict,
+ StreamToExtendedDecorator,
+ StreamToQueue,
+ Tagger,
+ TestCase,
+ TestControl,
+ TestResult,
+ TestResultDecorator,
+ TestByTestResult,
+ TextTestResult,
+ ThreadsafeForwardingResult,
+ TimestampingStreamResult,
+ testresult,
+ )
+from testtools.compat import (
+ _b,
+ _get_exception_encoding,
+ _r,
+ _u,
+ advance_iterator,
+ str_is_unicode,
+ StringIO,
+ )
+from testtools.content import (
+ Content,
+ content_from_stream,
+ text_content,
+ TracebackContent,
+ )
+from testtools.content_type import ContentType, UTF8_TEXT
+from testtools.matchers import (
+ AllMatch,
+ Contains,
+ DocTestMatches,
+ Equals,
+ HasLength,
+ MatchesAny,
+ MatchesException,
+ Raises,
+ )
+from testtools.tests.helpers import (
+ an_exc_info,
+ FullStackRunTest,
+ LoggingResult,
+ run_with_stack_hidden,
+ )
+from testtools.testresult.doubles import (
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ StreamResult as LoggingStreamResult,
+ )
+from testtools.testresult.real import (
+ _details_to_str,
+ _merge_tags,
+ utc,
+ )
+
+
+def make_erroring_test():
+ class Test(TestCase):
+ def error(self):
+ 1/0
+ return Test("error")
+
+
+def make_failing_test():
+ class Test(TestCase):
+ def failed(self):
+ self.fail("yo!")
+ return Test("failed")
+
+
+def make_mismatching_test():
+ class Test(TestCase):
+ def mismatch(self):
+ self.assertEqual(1, 2)
+ return Test("mismatch")
+
+
+def make_unexpectedly_successful_test():
+ class Test(TestCase):
+ def succeeded(self):
+ self.expectFailure("yo!", lambda: None)
+ return Test("succeeded")
+
+
+def make_test():
+ class Test(TestCase):
+ def test(self):
+ pass
+ return Test("test")
+
+
+def make_exception_info(exceptionFactory, *args, **kwargs):
+ try:
+ raise exceptionFactory(*args, **kwargs)
+ except:
+ return sys.exc_info()
+
+
+class Python26Contract(object):
+
+ def test_fresh_result_is_successful(self):
+ # A result is considered successful before any tests are run.
+ result = self.makeResult()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addError_is_failure(self):
+ # addError fails the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, an_exc_info)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+ def test_addFailure_is_failure(self):
+ # addFailure fails the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, an_exc_info)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+ def test_addSuccess_is_success(self):
+ # addSuccess does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSuccess(self)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_stop_sets_shouldStop(self):
+ result = self.makeResult()
+ result.stop()
+ self.assertTrue(result.shouldStop)
+
+
+class Python27Contract(Python26Contract):
+
+ def test_addExpectedFailure(self):
+ # Calling addExpectedFailure(test, exc_info) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, an_exc_info)
+
+ def test_addExpectedFailure_is_success(self):
+ # addExpectedFailure does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, an_exc_info)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addSkipped(self):
+ # Calling addSkip(test, reason) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, _u("Skipped for some reason"))
+
+ def test_addSkip_is_success(self):
+ # addSkip does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, _u("Skipped for some reason"))
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addUnexpectedSuccess(self):
+ # Calling addUnexpectedSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+
+ def test_addUnexpectedSuccess_was_successful(self):
+ # addUnexpectedSuccess does not fail the test run in Python 2.7.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startStopTestRun(self):
+ # Calling startTestRun completes ok.
+ result = self.makeResult()
+ result.startTestRun()
+ result.stopTestRun()
+
+ def test_failfast(self):
+ result = self.makeResult()
+ result.failfast = True
+ class Failing(TestCase):
+ def test_a(self):
+ self.fail('a')
+ def test_b(self):
+ self.fail('b')
+ TestSuite([Failing('test_a'), Failing('test_b')]).run(result)
+ self.assertEqual(1, result.testsRun)
+
+
+class TagsContract(Python27Contract):
+ """Tests to ensure correct tagging behaviour.
+
+ See the subunit docs for guidelines on how this is supposed to work.
+ """
+
+ def test_no_tags_by_default(self):
+ # Results initially have no tags.
+ result = self.makeResult()
+ result.startTestRun()
+ self.assertEqual(frozenset(), result.current_tags)
+
+ def test_adding_tags(self):
+ # Tags are added using 'tags' and thus become visible in
+ # 'current_tags'.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ self.assertEqual(set(['foo']), result.current_tags)
+
+ def test_removing_tags(self):
+ # Tags are removed using 'tags'.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.tags(set(), set(['foo']))
+ self.assertEqual(set(), result.current_tags)
+
+ def test_startTestRun_resets_tags(self):
+ # startTestRun makes a new test run, and thus clears all the tags.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTestRun()
+ self.assertEqual(set(), result.current_tags)
+
+ def test_add_tags_within_test(self):
+ # Tags can be added after a test has run.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(['bar']), set())
+ self.assertEqual(set(['foo', 'bar']), result.current_tags)
+
+ def test_tags_added_in_test_are_reverted(self):
+ # Tags added during a test run are then reverted once that test has
+ # finished.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(['bar']), set())
+ result.addSuccess(self)
+ result.stopTest(self)
+ self.assertEqual(set(['foo']), result.current_tags)
+
+ def test_tags_removed_in_test(self):
+ # Tags can be removed during tests.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(), set(['foo']))
+ self.assertEqual(set(), result.current_tags)
+
+ def test_tags_removed_in_test_are_restored(self):
+ # Tags removed during tests are restored once that test has finished.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(), set(['foo']))
+ result.addSuccess(self)
+ result.stopTest(self)
+ self.assertEqual(set(['foo']), result.current_tags)
+
+
+class DetailsContract(TagsContract):
+ """Tests for the details API of TestResults."""
+
+ def test_addExpectedFailure_details(self):
+ # Calling addExpectedFailure(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, details={})
+
+ def test_addError_details(self):
+ # Calling addError(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, details={})
+
+ def test_addFailure_details(self):
+ # Calling addFailure(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, details={})
+
+ def test_addSkipped_details(self):
+ # Calling addSkip(test, reason) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, details={})
+
+ def test_addUnexpectedSuccess_details(self):
+ # Calling addUnexpectedSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self, details={})
+
+ def test_addSuccess_details(self):
+ # Calling addSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSuccess(self, details={})
+
+
+class FallbackContract(DetailsContract):
+ """When we fallback we take our policy choice to map calls.
+
+ For instance, we map unexpectedSuccess to an error code, not to success.
+ """
+
+ def test_addUnexpectedSuccess_was_successful(self):
+ # addUnexpectedSuccess fails test run in testtools.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+
+class StartTestRunContract(FallbackContract):
+ """Defines the contract for testtools policy choices.
+
+ That is things which are not simply extensions to unittest but choices we
+ have made differently.
+ """
+
+ def test_startTestRun_resets_unexpected_success(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startTestRun_resets_failure(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, an_exc_info)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startTestRun_resets_errors(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, an_exc_info)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+
+class TestTestResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return TestResult()
+
+
+class TestMultiTestResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return MultiTestResult(TestResult(), TestResult())
+
+
+class TestTextTestResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return TextTestResult(StringIO())
+
+
+class TestThreadSafeForwardingResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ result_semaphore = threading.Semaphore(1)
+ target = TestResult()
+ return ThreadsafeForwardingResult(target, result_semaphore)
+
+
+class TestExtendedTestResultContract(TestCase, StartTestRunContract):
+
+ def makeResult(self):
+ return ExtendedTestResult()
+
+
+class TestPython26TestResultContract(TestCase, Python26Contract):
+
+ def makeResult(self):
+ return Python26TestResult()
+
+
+class TestAdaptedPython26TestResultContract(TestCase, FallbackContract):
+
+ def makeResult(self):
+ return ExtendedToOriginalDecorator(Python26TestResult())
+
+
+class TestPython27TestResultContract(TestCase, Python27Contract):
+
+ def makeResult(self):
+ return Python27TestResult()
+
+
+class TestAdaptedPython27TestResultContract(TestCase, DetailsContract):
+
+ def makeResult(self):
+ return ExtendedToOriginalDecorator(Python27TestResult())
+
+
+class TestAdaptedStreamResult(TestCase, DetailsContract):
+
+ def makeResult(self):
+ return ExtendedToStreamDecorator(StreamResult())
+
+
+class TestTestResultDecoratorContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return TestResultDecorator(TestResult())
+
+
+# DetailsContract because ExtendedToStreamDecorator follows Python for
+# uxsuccess handling.
+class TestStreamToExtendedContract(TestCase, DetailsContract):
+
+ def makeResult(self):
+ return ExtendedToStreamDecorator(
+ StreamToExtendedDecorator(ExtendedTestResult()))
+
+
+class TestStreamResultContract(object):
+
+ def _make_result(self):
+ raise NotImplementedError(self._make_result)
+
+ def test_startTestRun(self):
+ result = self._make_result()
+ result.startTestRun()
+ result.stopTestRun()
+
+ def test_files(self):
+ # Test parameter combinations when files are being emitted.
+ result = self._make_result()
+ result.startTestRun()
+ self.addCleanup(result.stopTestRun)
+ now = datetime.datetime.now(utc)
+ inputs = list(dict(
+ eof=True,
+ mime_type="text/plain",
+ route_code=_u("1234"),
+ test_id=_u("foo"),
+ timestamp=now,
+ ).items())
+ param_dicts = self._power_set(inputs)
+ for kwargs in param_dicts:
+ result.status(file_name=_u("foo"), file_bytes=_b(""), **kwargs)
+ result.status(file_name=_u("foo"), file_bytes=_b("bar"), **kwargs)
+
+ def test_test_status(self):
+ # Tests non-file attachment parameter combinations.
+ result = self._make_result()
+ result.startTestRun()
+ self.addCleanup(result.stopTestRun)
+ now = datetime.datetime.now(utc)
+ args = [[_u("foo"), s] for s in ['exists', 'inprogress', 'xfail',
+ 'uxsuccess', 'success', 'fail', 'skip']]
+ inputs = list(dict(
+ runnable=False,
+ test_tags=set(['quux']),
+ route_code=_u("1234"),
+ timestamp=now,
+ ).items())
+ param_dicts = self._power_set(inputs)
+ for kwargs in param_dicts:
+ for arg in args:
+ result.status(test_id=arg[0], test_status=arg[1], **kwargs)
+
+ def _power_set(self, iterable):
+ "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
+ s = list(iterable)
+ param_dicts = []
+ for ss in chain.from_iterable(combinations(s, r) for r in range(len(s)+1)):
+ param_dicts.append(dict(ss))
+ return param_dicts
+
+
+class TestBaseStreamResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamResult()
+
+
+class TestCopyStreamResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return CopyStreamResult([StreamResult(), StreamResult()])
+
+
+class TestDoubleStreamResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return LoggingStreamResult()
+
+
+class TestExtendedToStreamDecoratorContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return ExtendedToStreamDecorator(StreamResult())
+
+
+class TestStreamSummaryResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamSummary()
+
+
+class TestStreamTaggerContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamTagger([StreamResult()], add=set(), discard=set())
+
+
+class TestStreamToDictContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamToDict(lambda x:None)
+
+
+class TestStreamToExtendedDecoratorContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamToExtendedDecorator(ExtendedTestResult())
+
+
+class TestStreamToQueueContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ queue = Queue()
+ return StreamToQueue(queue, "foo")
+
+
+class TestStreamFailFastContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamFailFast(lambda:None)
+
+
+class TestStreamResultRouterContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamResultRouter(StreamResult())
+
+
+class TestDoubleStreamResultEvents(TestCase):
+
+ def test_startTestRun(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ self.assertEqual([('startTestRun',)], result._events)
+
+ def test_stopTestRun(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ result.stopTestRun()
+ self.assertEqual([('startTestRun',), ('stopTestRun',)], result._events)
+
+ def test_file(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ now = datetime.datetime.now(utc)
+ result.status(file_name="foo", file_bytes="bar", eof=True, mime_type="text/json",
+ test_id="id", route_code='abc', timestamp=now)
+ self.assertEqual(
+ [('startTestRun',),
+ ('status', 'id', None, None, True, 'foo', 'bar', True, 'text/json', 'abc', now)],
+ result._events)
+
+ def test_status(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ now = datetime.datetime.now(utc)
+ result.status("foo", "success", test_tags=set(['tag']),
+ runnable=False, route_code='abc', timestamp=now)
+ self.assertEqual(
+ [('startTestRun',),
+ ('status', 'foo', 'success', set(['tag']), False, None, None, False, None, 'abc', now)],
+ result._events)
+
+
+class TestCopyStreamResultCopies(TestCase):
+
+ def setUp(self):
+ super(TestCopyStreamResultCopies, self).setUp()
+ self.target1 = LoggingStreamResult()
+ self.target2 = LoggingStreamResult()
+ self.targets = [self.target1._events, self.target2._events]
+ self.result = CopyStreamResult([self.target1, self.target2])
+
+ def test_startTestRun(self):
+ self.result.startTestRun()
+ self.assertThat(self.targets, AllMatch(Equals([('startTestRun',)])))
+
+ def test_stopTestRun(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertThat(self.targets,
+ AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+
+ def test_status(self):
+ self.result.startTestRun()
+ now = datetime.datetime.now(utc)
+ self.result.status("foo", "success", test_tags=set(['tag']),
+ runnable=False, file_name="foo", file_bytes=b'bar', eof=True,
+ mime_type="text/json", route_code='abc', timestamp=now)
+ self.assertThat(self.targets,
+ AllMatch(Equals([('startTestRun',),
+ ('status', 'foo', 'success', set(['tag']), False, "foo",
+ b'bar', True, "text/json", 'abc', now)
+ ])))
+
+
+class TestStreamTagger(TestCase):
+
+ def test_adding(self):
+ log = LoggingStreamResult()
+ result = StreamTagger([log], add=['foo'])
+ result.startTestRun()
+ result.status()
+ result.status(test_tags=set(['bar']))
+ result.status(test_tags=None)
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
+ ('status', None, None, set(['foo', 'bar']), True, None, None, False, None, None, None),
+ ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
+ ('stopTestRun',),
+ ], log._events)
+
+ def test_discarding(self):
+ log = LoggingStreamResult()
+ result = StreamTagger([log], discard=['foo'])
+ result.startTestRun()
+ result.status()
+ result.status(test_tags=None)
+ result.status(test_tags=set(['foo']))
+ result.status(test_tags=set(['bar']))
+ result.status(test_tags=set(['foo', 'bar']))
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status', None, None, None, True, None, None, False, None, None, None),
+ ('status', None, None, None, True, None, None, False, None, None, None),
+ ('status', None, None, None, True, None, None, False, None, None, None),
+ ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
+ ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
+ ('stopTestRun',),
+ ], log._events)
+
+
+class TestStreamToDict(TestCase):
+
+ def test_hung_test(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status('foo', 'inprogress')
+ self.assertEqual([], tests)
+ result.stopTestRun()
+ self.assertEqual([
+ {'id': 'foo', 'tags': set(), 'details': {}, 'status': 'inprogress',
+ 'timestamps': [None, None]}
+ ], tests)
+
+ def test_all_terminal_states_reported(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status('success', 'success')
+ result.status('skip', 'skip')
+ result.status('exists', 'exists')
+ result.status('fail', 'fail')
+ result.status('xfail', 'xfail')
+ result.status('uxsuccess', 'uxsuccess')
+ self.assertThat(tests, HasLength(6))
+ self.assertEqual(
+ ['success', 'skip', 'exists', 'fail', 'xfail', 'uxsuccess'],
+ [test['id'] for test in tests])
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(6))
+
+ def test_files_reported(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status(file_name="some log.txt",
+ file_bytes=_b("1234 log message"), eof=True,
+ mime_type="text/plain; charset=utf8", test_id="foo.bar")
+ result.status(file_name="another file",
+ file_bytes=_b("""Traceback..."""), test_id="foo.bar")
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(1))
+ test = tests[0]
+ self.assertEqual("foo.bar", test['id'])
+ self.assertEqual("unknown", test['status'])
+ details = test['details']
+ self.assertEqual(
+ _u("1234 log message"), details['some log.txt'].as_text())
+ self.assertEqual(
+ _b("Traceback..."),
+ _b('').join(details['another file'].iter_bytes()))
+ self.assertEqual(
+ "application/octet-stream", repr(details['another file'].content_type))
+
+ def test_bad_mime(self):
+ # Testtools was making bad mime types, this tests that the specific
+ # corruption is catered for.
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status(file_name="file", file_bytes=b'a',
+ mime_type='text/plain; charset=utf8, language=python',
+ test_id='id')
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(1))
+ test = tests[0]
+ self.assertEqual("id", test['id'])
+ details = test['details']
+ self.assertEqual(_u("a"), details['file'].as_text())
+ self.assertEqual(
+ "text/plain; charset=\"utf8\"",
+ repr(details['file'].content_type))
+
+ def test_timestamps(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status(test_id='foo', test_status='inprogress', timestamp="A")
+ result.status(test_id='foo', test_status='success', timestamp="B")
+ result.status(test_id='bar', test_status='inprogress', timestamp="C")
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(2))
+ self.assertEqual(["A", "B"], tests[0]['timestamps'])
+ self.assertEqual(["C", None], tests[1]['timestamps'])
+
+
+class TestExtendedToStreamDecorator(TestCase):
+
+ def test_explicit_time(self):
+ log = LoggingStreamResult()
+ result = ExtendedToStreamDecorator(log)
+ result.startTestRun()
+ now = datetime.datetime.now(utc)
+ result.time(now)
+ result.startTest(self)
+ result.addSuccess(self)
+ result.stopTest(self)
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status',
+ 'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
+ 'inprogress',
+ None,
+ True,
+ None,
+ None,
+ False,
+ None,
+ None,
+ now),
+ ('status',
+ 'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
+ 'success',
+ set(),
+ True,
+ None,
+ None,
+ False,
+ None,
+ None,
+ now),
+ ('stopTestRun',)], log._events)
+
+ def test_wasSuccessful_after_stopTestRun(self):
+ log = LoggingStreamResult()
+ result = ExtendedToStreamDecorator(log)
+ result.startTestRun()
+ result.status(test_id='foo', test_status='fail')
+ result.stopTestRun()
+ self.assertEqual(False, result.wasSuccessful())
+
+
+class TestStreamFailFast(TestCase):
+
+ def test_inprogress(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'inprogress')
+
+ def test_exists(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'exists')
+
+ def test_xfail(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'xfail')
+
+ def test_uxsuccess(self):
+ calls = []
+ def hook():
+ calls.append("called")
+ result = StreamFailFast(hook)
+ result.status('foo', 'uxsuccess')
+ result.status('foo', 'uxsuccess')
+ self.assertEqual(['called', 'called'], calls)
+
+ def test_success(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'success')
+
+ def test_fail(self):
+ calls = []
+ def hook():
+ calls.append("called")
+ result = StreamFailFast(hook)
+ result.status('foo', 'fail')
+ result.status('foo', 'fail')
+ self.assertEqual(['called', 'called'], calls)
+
+ def test_skip(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'skip')
+
+
+class TestStreamSummary(TestCase):
+
+ def test_attributes(self):
+ result = StreamSummary()
+ result.startTestRun()
+ self.assertEqual([], result.failures)
+ self.assertEqual([], result.errors)
+ self.assertEqual([], result.skipped)
+ self.assertEqual([], result.expectedFailures)
+ self.assertEqual([], result.unexpectedSuccesses)
+ self.assertEqual(0, result.testsRun)
+
+ def test_startTestRun(self):
+ result = StreamSummary()
+ result.startTestRun()
+ result.failures.append('x')
+ result.errors.append('x')
+ result.skipped.append('x')
+ result.expectedFailures.append('x')
+ result.unexpectedSuccesses.append('x')
+ result.testsRun = 1
+ result.startTestRun()
+ self.assertEqual([], result.failures)
+ self.assertEqual([], result.errors)
+ self.assertEqual([], result.skipped)
+ self.assertEqual([], result.expectedFailures)
+ self.assertEqual([], result.unexpectedSuccesses)
+ self.assertEqual(0, result.testsRun)
+
+ def test_wasSuccessful(self):
+ # wasSuccessful returns False if any of
+ # failures/errors is non-empty.
+ result = StreamSummary()
+ result.startTestRun()
+ self.assertEqual(True, result.wasSuccessful())
+ result.failures.append('x')
+ self.assertEqual(False, result.wasSuccessful())
+ result.startTestRun()
+ result.errors.append('x')
+ self.assertEqual(False, result.wasSuccessful())
+ result.startTestRun()
+ result.skipped.append('x')
+ self.assertEqual(True, result.wasSuccessful())
+ result.startTestRun()
+ result.expectedFailures.append('x')
+ self.assertEqual(True, result.wasSuccessful())
+ result.startTestRun()
+ result.unexpectedSuccesses.append('x')
+ self.assertEqual(True, result.wasSuccessful())
+
+ def test_stopTestRun(self):
+ result = StreamSummary()
+ # terminal successful codes.
+ result.startTestRun()
+ result.status("foo", "inprogress")
+ result.status("foo", "success")
+ result.status("bar", "skip")
+ result.status("baz", "exists")
+ result.stopTestRun()
+ self.assertEqual(True, result.wasSuccessful())
+ # Existence is terminal but doesn't count as 'running' a test.
+ self.assertEqual(2, result.testsRun)
+
+ def test_stopTestRun_inprogress_test_fails(self):
+ # Tests inprogress at stopTestRun trigger a failure.
+ result = StreamSummary()
+ result.startTestRun()
+ result.status("foo", "inprogress")
+ result.stopTestRun()
+ self.assertEqual(False, result.wasSuccessful())
+ self.assertThat(result.errors, HasLength(1))
+ self.assertEqual("foo", result.errors[0][0].id())
+ self.assertEqual("Test did not complete", result.errors[0][1])
+ # interim state detection handles route codes - while duplicate ids in
+ # one run is undesirable, it may happen (e.g. with repeated tests).
+ result.startTestRun()
+ result.status("foo", "inprogress")
+ result.status("foo", "inprogress", route_code="A")
+ result.status("foo", "success", route_code="A")
+ result.stopTestRun()
+ self.assertEqual(False, result.wasSuccessful())
+
+ def test_status_skip(self):
+ # when skip is seen, a synthetic test is reported with reason captured
+ # from the 'reason' file attachment if any.
+ result = StreamSummary()
+ result.startTestRun()
+ result.status(file_name="reason",
+ file_bytes=_b("Missing dependency"), eof=True,
+ mime_type="text/plain; charset=utf8", test_id="foo.bar")
+ result.status("foo.bar", "skip")
+ self.assertThat(result.skipped, HasLength(1))
+ self.assertEqual("foo.bar", result.skipped[0][0].id())
+ self.assertEqual(_u("Missing dependency"), result.skipped[0][1])
+
+ def _report_files(self, result):
+ result.status(file_name="some log.txt",
+ file_bytes=_b("1234 log message"), eof=True,
+ mime_type="text/plain; charset=utf8", test_id="foo.bar")
+ result.status(file_name="traceback",
+ file_bytes=_b("""Traceback (most recent call last):
+ File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
+ AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+testtools.matchers._impl.MismatchError: Differences: [
+[('startTestRun',), ('stopTestRun',)] != []
+[('startTestRun',), ('stopTestRun',)] != []
+]
+"""), eof=True, mime_type="text/plain; charset=utf8", test_id="foo.bar")
+
+ files_message = Equals(_u("""some log.txt: {{{1234 log message}}}
+
+Traceback (most recent call last):
+ File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
+ AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+testtools.matchers._impl.MismatchError: Differences: [
+[('startTestRun',), ('stopTestRun',)] != []
+[('startTestRun',), ('stopTestRun',)] != []
+]
+"""))
+
+ def test_status_fail(self):
+ # when fail is seen, a synthetic test is reported with all files
+ # attached shown as the message.
+ result = StreamSummary()
+ result.startTestRun()
+ self._report_files(result)
+ result.status("foo.bar", "fail")
+ self.assertThat(result.errors, HasLength(1))
+ self.assertEqual("foo.bar", result.errors[0][0].id())
+ self.assertThat(result.errors[0][1], self.files_message)
+
+ def test_status_xfail(self):
+ # when xfail is seen, a synthetic test is reported with all files
+ # attached shown as the message.
+ result = StreamSummary()
+ result.startTestRun()
+ self._report_files(result)
+ result.status("foo.bar", "xfail")
+ self.assertThat(result.expectedFailures, HasLength(1))
+ self.assertEqual("foo.bar", result.expectedFailures[0][0].id())
+ self.assertThat(result.expectedFailures[0][1], self.files_message)
+
+ def test_status_uxsuccess(self):
+ # when uxsuccess is seen, a synthetic test is reported.
+ result = StreamSummary()
+ result.startTestRun()
+ result.status("foo.bar", "uxsuccess")
+ self.assertThat(result.unexpectedSuccesses, HasLength(1))
+ self.assertEqual("foo.bar", result.unexpectedSuccesses[0].id())
+
+
+class TestTestControl(TestCase):
+
+ def test_default(self):
+ self.assertEqual(False, TestControl().shouldStop)
+
+ def test_stop(self):
+ control = TestControl()
+ control.stop()
+ self.assertEqual(True, control.shouldStop)
+
+
+class TestTestResult(TestCase):
+ """Tests for 'TestResult'."""
+
+ run_tests_with = FullStackRunTest
+
+ def makeResult(self):
+ """Make an arbitrary result for testing."""
+ return TestResult()
+
+ def test_addSkipped(self):
+ # Calling addSkip on a TestResult records the test that was skipped in
+ # its skip_reasons dict.
+ result = self.makeResult()
+ result.addSkip(self, _u("Skipped for some reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self]},
+ result.skip_reasons)
+ result.addSkip(self, _u("Skipped for some reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self, self]},
+ result.skip_reasons)
+ result.addSkip(self, _u("Skipped for another reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self, self],
+ _u("Skipped for another reason"):[self]},
+ result.skip_reasons)
+
+ def test_now_datetime_now(self):
+ result = self.makeResult()
+ olddatetime = testresult.real.datetime
+ def restore():
+ testresult.real.datetime = olddatetime
+ self.addCleanup(restore)
+ class Module:
+ pass
+ now = datetime.datetime.now(utc)
+ stubdatetime = Module()
+ stubdatetime.datetime = Module()
+ stubdatetime.datetime.now = lambda tz: now
+ testresult.real.datetime = stubdatetime
+ # Calling _now() looks up the time.
+ self.assertEqual(now, result._now())
+ then = now + datetime.timedelta(0, 1)
+ # Set an explicit datetime, which gets returned from then on.
+ result.time(then)
+ self.assertNotEqual(now, result._now())
+ self.assertEqual(then, result._now())
+ # go back to looking it up.
+ result.time(None)
+ self.assertEqual(now, result._now())
+
+ def test_now_datetime_time(self):
+ result = self.makeResult()
+ now = datetime.datetime.now(utc)
+ result.time(now)
+ self.assertEqual(now, result._now())
+
+ def test_traceback_formatting_without_stack_hidden(self):
+ # During the testtools test run, we show our levels of the stack,
+ # because we want to be able to use our test suite to debug our own
+ # code.
+ result = self.makeResult()
+ test = make_erroring_test()
+ test.run(result)
+ self.assertThat(
+ result.errors[0][1],
+ DocTestMatches(
+ 'Traceback (most recent call last):\n'
+ ' File "...testtools...runtest.py", line ..., in _run_user\n'
+ ' return fn(*args, **kwargs)\n'
+ ' File "...testtools...testcase.py", line ..., in _run_test_method\n'
+ ' return self._get_test_method()()\n'
+ ' File "...testtools...tests...test_testresult.py", line ..., in error\n'
+ ' 1/0\n'
+ 'ZeroDivisionError: ...\n',
+ doctest.ELLIPSIS | doctest.REPORT_UDIFF))
+
+ def test_traceback_formatting_with_stack_hidden(self):
+ result = self.makeResult()
+ test = make_erroring_test()
+ run_with_stack_hidden(True, test.run, result)
+ self.assertThat(
+ result.errors[0][1],
+ DocTestMatches(
+ 'Traceback (most recent call last):\n'
+ ' File "...testtools...tests...test_testresult.py", line ..., in error\n'
+ ' 1/0\n'
+ 'ZeroDivisionError: ...\n',
+ doctest.ELLIPSIS))
+
+ def test_traceback_formatting_with_stack_hidden_mismatch(self):
+ result = self.makeResult()
+ test = make_mismatching_test()
+ run_with_stack_hidden(True, test.run, result)
+ self.assertThat(
+ result.failures[0][1],
+ DocTestMatches(
+ 'Traceback (most recent call last):\n'
+ ' File "...testtools...tests...test_testresult.py", line ..., in mismatch\n'
+ ' self.assertEqual(1, 2)\n'
+ '...MismatchError: 1 != 2\n',
+ doctest.ELLIPSIS))
+
+ def test_exc_info_to_unicode(self):
+ # subunit upcalls to TestResult._exc_info_to_unicode, so we need to
+ # make sure that it's there.
+ #
+ # See <https://bugs.launchpad.net/testtools/+bug/929063>.
+ test = make_erroring_test()
+ exc_info = make_exception_info(RuntimeError, "foo")
+ result = self.makeResult()
+ text_traceback = result._exc_info_to_unicode(exc_info, test)
+ self.assertEqual(
+ TracebackContent(exc_info, test).as_text(), text_traceback)
+
+
+class TestMultiTestResult(TestCase):
+ """Tests for 'MultiTestResult'."""
+
+ def setUp(self):
+ super(TestMultiTestResult, self).setUp()
+ self.result1 = LoggingResult([])
+ self.result2 = LoggingResult([])
+ self.multiResult = MultiTestResult(self.result1, self.result2)
+
+ def assertResultLogsEqual(self, expectedEvents):
+ """Assert that our test results have received the expected events."""
+ self.assertEqual(expectedEvents, self.result1._events)
+ self.assertEqual(expectedEvents, self.result2._events)
+
+ def test_repr(self):
+ self.assertEqual(
+ '<MultiTestResult (%r, %r)>' % (
+ ExtendedToOriginalDecorator(self.result1),
+ ExtendedToOriginalDecorator(self.result2)),
+ repr(self.multiResult))
+
+ def test_empty(self):
+ # Initializing a `MultiTestResult` doesn't do anything to its
+ # `TestResult`s.
+ self.assertResultLogsEqual([])
+
+ def test_failfast_get(self):
+ # Reading reads from the first one - arbitrary choice.
+ self.assertEqual(False, self.multiResult.failfast)
+ self.result1.failfast = True
+ self.assertEqual(True, self.multiResult.failfast)
+
+ def test_failfast_set(self):
+ # Writing writes to all.
+ self.multiResult.failfast = True
+ self.assertEqual(True, self.result1.failfast)
+ self.assertEqual(True, self.result2.failfast)
+
+ def test_shouldStop(self):
+ self.assertFalse(self.multiResult.shouldStop)
+ self.result2.stop()
+ # NB: result1 is not stopped: MultiTestResult has to combine the
+ # values.
+ self.assertTrue(self.multiResult.shouldStop)
+
+ def test_startTest(self):
+ # Calling `startTest` on a `MultiTestResult` calls `startTest` on all
+ # its `TestResult`s.
+ self.multiResult.startTest(self)
+ self.assertResultLogsEqual([('startTest', self)])
+
+ def test_stop(self):
+ self.assertFalse(self.multiResult.shouldStop)
+ self.multiResult.stop()
+ self.assertResultLogsEqual(['stop'])
+
+ def test_stopTest(self):
+ # Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all
+ # its `TestResult`s.
+ self.multiResult.stopTest(self)
+ self.assertResultLogsEqual([('stopTest', self)])
+
+ def test_addSkipped(self):
+ # Calling `addSkip` on a `MultiTestResult` calls addSkip on its
+ # results.
+ reason = _u("Skipped for some reason")
+ self.multiResult.addSkip(self, reason)
+ self.assertResultLogsEqual([('addSkip', self, reason)])
+
+ def test_addSuccess(self):
+ # Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on
+ # all its `TestResult`s.
+ self.multiResult.addSuccess(self)
+ self.assertResultLogsEqual([('addSuccess', self)])
+
+ def test_done(self):
+ # Calling `done` on a `MultiTestResult` calls `done` on all its
+ # `TestResult`s.
+ self.multiResult.done()
+ self.assertResultLogsEqual([('done')])
+
+ def test_addFailure(self):
+ # Calling `addFailure` on a `MultiTestResult` calls `addFailure` on
+ # all its `TestResult`s.
+ exc_info = make_exception_info(AssertionError, 'failure')
+ self.multiResult.addFailure(self, exc_info)
+ self.assertResultLogsEqual([('addFailure', self, exc_info)])
+
+ def test_addError(self):
+ # Calling `addError` on a `MultiTestResult` calls `addError` on all
+ # its `TestResult`s.
+ exc_info = make_exception_info(RuntimeError, 'error')
+ self.multiResult.addError(self, exc_info)
+ self.assertResultLogsEqual([('addError', self, exc_info)])
+
+ def test_startTestRun(self):
+ # Calling `startTestRun` on a `MultiTestResult` forwards to all its
+ # `TestResult`s.
+ self.multiResult.startTestRun()
+ self.assertResultLogsEqual([('startTestRun')])
+
+ def test_stopTestRun(self):
+ # Calling `stopTestRun` on a `MultiTestResult` forwards to all its
+ # `TestResult`s.
+ self.multiResult.stopTestRun()
+ self.assertResultLogsEqual([('stopTestRun')])
+
+ def test_stopTestRun_returns_results(self):
+ # `MultiTestResult.stopTestRun` returns a tuple of all of the return
+ # values the `stopTestRun`s that it forwards to.
+ class Result(LoggingResult):
+ def stopTestRun(self):
+ super(Result, self).stopTestRun()
+ return 'foo'
+ multi_result = MultiTestResult(Result([]), Result([]))
+ result = multi_result.stopTestRun()
+ self.assertEqual(('foo', 'foo'), result)
+
+ def test_tags(self):
+ # Calling `tags` on a `MultiTestResult` calls `tags` on all its
+ # `TestResult`s.
+ added_tags = set(['foo', 'bar'])
+ removed_tags = set(['eggs'])
+ self.multiResult.tags(added_tags, removed_tags)
+ self.assertResultLogsEqual([('tags', added_tags, removed_tags)])
+
+ def test_time(self):
+ # the time call is dispatched, not eaten by the base class
+ self.multiResult.time('foo')
+ self.assertResultLogsEqual([('time', 'foo')])
+
+
+class TestTextTestResult(TestCase):
+ """Tests for 'TextTestResult'."""
+
+ def setUp(self):
+ super(TestTextTestResult, self).setUp()
+ self.result = TextTestResult(StringIO())
+
+ def getvalue(self):
+ return self.result.stream.getvalue()
+
+ def test__init_sets_stream(self):
+ result = TextTestResult("fp")
+ self.assertEqual("fp", result.stream)
+
+ def reset_output(self):
+ self.result.stream = StringIO()
+
+ def test_startTestRun(self):
+ self.result.startTestRun()
+ self.assertEqual("Tests running...\n", self.getvalue())
+
+ def test_stopTestRun_count_many(self):
+ test = make_test()
+ self.result.startTestRun()
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.result.stream = StringIO()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("\nRan 2 tests in ...s\n...", doctest.ELLIPSIS))
+
+ def test_stopTestRun_count_single(self):
+ test = make_test()
+ self.result.startTestRun()
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("\nRan 1 test in ...s\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_count_zero(self):
+ self.result.startTestRun()
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("\nRan 0 tests in ...s\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_current_time(self):
+ test = make_test()
+ now = datetime.datetime.now(utc)
+ self.result.time(now)
+ self.result.startTestRun()
+ self.result.startTest(test)
+ now = now + datetime.timedelta(0, 0, 0, 1)
+ self.result.time(now)
+ self.result.stopTest(test)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS))
+
+ def test_stopTestRun_successful(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_failure(self):
+ test = make_failing_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_error(self):
+ test = make_erroring_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_unexpected_success(self):
+ test = make_unexpectedly_successful_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_shows_details(self):
+ self.skip("Disabled per bug 1188420")
+ def run_tests():
+ self.result.startTestRun()
+ make_erroring_test().run(self.result)
+ make_unexpectedly_successful_test().run(self.result)
+ make_failing_test().run(self.result)
+ self.reset_output()
+ self.result.stopTestRun()
+ run_with_stack_hidden(True, run_tests)
+ self.assertThat(self.getvalue(),
+ DocTestMatches("""...======================================================================
+ERROR: testtools.tests.test_testresult.Test.error
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "...testtools...tests...test_testresult.py", line ..., in error
+ 1/0
+ZeroDivisionError:... divi... by zero...
+======================================================================
+FAIL: testtools.tests.test_testresult.Test.failed
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "...testtools...tests...test_testresult.py", line ..., in failed
+ self.fail("yo!")
+AssertionError: yo!
+======================================================================
+UNEXPECTED SUCCESS: testtools.tests.test_testresult.Test.succeeded
+----------------------------------------------------------------------
+...""", doctest.ELLIPSIS | doctest.REPORT_NDIFF))
+
+
+class TestThreadSafeForwardingResult(TestCase):
+ """Tests for `TestThreadSafeForwardingResult`."""
+
+ def make_results(self, n):
+ events = []
+ target = LoggingResult(events)
+ semaphore = threading.Semaphore(1)
+ return [
+ ThreadsafeForwardingResult(target, semaphore)
+ for i in range(n)], events
+
+ def test_nonforwarding_methods(self):
+ # startTest and stopTest are not forwarded because they need to be
+ # batched.
+ [result], events = self.make_results(1)
+ result.startTest(self)
+ result.stopTest(self)
+ self.assertEqual([], events)
+
+ def test_tags_not_forwarded(self):
+ # Tags need to be batched for each test, so they aren't forwarded
+ # until a test runs.
+ [result], events = self.make_results(1)
+ result.tags(set(['foo']), set(['bar']))
+ self.assertEqual([], events)
+
+ def test_global_tags_simple(self):
+ # Tags specified outside of a test result are global. When a test's
+ # results are finally forwarded, we send through these global tags
+ # *as* test specific tags, because as a multiplexer there should be no
+ # way for a global tag on an input stream to affect tests from other
+ # streams - we can just always issue test local tags.
+ [result], events = self.make_results(1)
+ result.tags(set(['foo']), set())
+ result.time(1)
+ result.startTest(self)
+ result.time(2)
+ result.addSuccess(self)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', self),
+ ('time', 2),
+ ('tags', set(['foo']), set()),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_global_tags_complex(self):
+ # Multiple calls to tags() in a global context are buffered until the
+ # next test completes and are issued as part of of the test context,
+ # because they cannot be issued until the output result is locked.
+ # The sample data shows them being merged together, this is, strictly
+ # speaking incidental - they could be issued separately (in-order) and
+ # still be legitimate.
+ [result], events = self.make_results(1)
+ result.tags(set(['foo', 'bar']), set(['baz', 'qux']))
+ result.tags(set(['cat', 'qux']), set(['bar', 'dog']))
+ result.time(1)
+ result.startTest(self)
+ result.time(2)
+ result.addSuccess(self)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', self),
+ ('time', 2),
+ ('tags', set(['cat', 'foo', 'qux']), set(['dog', 'bar', 'baz'])),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_local_tags(self):
+ # Any tags set within a test context are forwarded in that test
+ # context when the result is finally forwarded. This means that the
+ # tags for the test are part of the atomic message communicating
+ # everything about that test.
+ [result], events = self.make_results(1)
+ result.time(1)
+ result.startTest(self)
+ result.tags(set(['foo']), set([]))
+ result.tags(set(), set(['bar']))
+ result.time(2)
+ result.addSuccess(self)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', self),
+ ('time', 2),
+ ('tags', set(['foo']), set(['bar'])),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_local_tags_dont_leak(self):
+ # A tag set during a test is local to that test and is not set during
+ # the tests that follow.
+ [result], events = self.make_results(1)
+ a, b = PlaceHolder('a'), PlaceHolder('b')
+ result.time(1)
+ result.startTest(a)
+ result.tags(set(['foo']), set([]))
+ result.time(2)
+ result.addSuccess(a)
+ result.stopTest(a)
+ result.time(3)
+ result.startTest(b)
+ result.time(4)
+ result.addSuccess(b)
+ result.stopTest(b)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', a),
+ ('time', 2),
+ ('tags', set(['foo']), set()),
+ ('addSuccess', a),
+ ('stopTest', a),
+ ('time', 3),
+ ('startTest', b),
+ ('time', 4),
+ ('addSuccess', b),
+ ('stopTest', b),
+ ], events)
+
+ def test_startTestRun(self):
+ # Calls to startTestRun are not batched, because we are only
+ # interested in sending tests atomically, not the whole run.
+ [result1, result2], events = self.make_results(2)
+ result1.startTestRun()
+ result2.startTestRun()
+ self.assertEqual(["startTestRun", "startTestRun"], events)
+
+ def test_stopTestRun(self):
+ # Calls to stopTestRun are not batched, because we are only
+ # interested in sending tests atomically, not the whole run.
+ [result1, result2], events = self.make_results(2)
+ result1.stopTestRun()
+ result2.stopTestRun()
+ self.assertEqual(["stopTestRun", "stopTestRun"], events)
+
+ def test_forward_addError(self):
+ # Once we receive an addError event, we forward all of the events for
+ # that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ exc_info = make_exception_info(RuntimeError, 'error')
+ start_time = datetime.datetime.utcfromtimestamp(1.489)
+ end_time = datetime.datetime.utcfromtimestamp(51.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addError(self, exc_info)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addError', self, exc_info),
+ ('stopTest', self),
+ ], events)
+
+ def test_forward_addFailure(self):
+ # Once we receive an addFailure event, we forward all of the events
+ # for that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ exc_info = make_exception_info(AssertionError, 'failure')
+ start_time = datetime.datetime.utcfromtimestamp(2.489)
+ end_time = datetime.datetime.utcfromtimestamp(3.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addFailure(self, exc_info)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addFailure', self, exc_info),
+ ('stopTest', self),
+ ], events)
+
+ def test_forward_addSkip(self):
+ # Once we receive an addSkip event, we forward all of the events for
+ # that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ reason = _u("Skipped for some reason")
+ start_time = datetime.datetime.utcfromtimestamp(4.489)
+ end_time = datetime.datetime.utcfromtimestamp(5.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addSkip(self, reason)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addSkip', self, reason),
+ ('stopTest', self),
+ ], events)
+
+ def test_forward_addSuccess(self):
+ # Once we receive an addSuccess event, we forward all of the events
+ # for that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ start_time = datetime.datetime.utcfromtimestamp(6.489)
+ end_time = datetime.datetime.utcfromtimestamp(7.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addSuccess(self)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_only_one_test_at_a_time(self):
+ # Even if there are multiple ThreadsafeForwardingResults forwarding to
+ # the same target result, the target result only receives the complete
+ # events for one test at a time.
+ [result1, result2], events = self.make_results(2)
+ test1, test2 = self, make_test()
+ start_time1 = datetime.datetime.utcfromtimestamp(1.489)
+ end_time1 = datetime.datetime.utcfromtimestamp(2.476)
+ start_time2 = datetime.datetime.utcfromtimestamp(3.489)
+ end_time2 = datetime.datetime.utcfromtimestamp(4.489)
+ result1.time(start_time1)
+ result2.time(start_time2)
+ result1.startTest(test1)
+ result2.startTest(test2)
+ result1.time(end_time1)
+ result2.time(end_time2)
+ result2.addSuccess(test2)
+ result1.addSuccess(test1)
+ self.assertEqual([
+ # test2 finishes first, and so is flushed first.
+ ('time', start_time2),
+ ('startTest', test2),
+ ('time', end_time2),
+ ('addSuccess', test2),
+ ('stopTest', test2),
+ # test1 finishes next, and thus follows.
+ ('time', start_time1),
+ ('startTest', test1),
+ ('time', end_time1),
+ ('addSuccess', test1),
+ ('stopTest', test1),
+ ], events)
+
+
+class TestMergeTags(TestCase):
+
+ def test_merge_unseen_gone_tag(self):
+ # If an incoming "gone" tag isn't currently tagged one way or the
+ # other, add it to the "gone" tags.
+ current_tags = set(['present']), set(['missing'])
+ changing_tags = set(), set(['going'])
+ expected = set(['present']), set(['missing', 'going'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+ def test_merge_incoming_gone_tag_with_current_new_tag(self):
+ # If one of the incoming "gone" tags is one of the existing "new"
+ # tags, then it overrides the "new" tag, leaving it marked as "gone".
+ current_tags = set(['present', 'going']), set(['missing'])
+ changing_tags = set(), set(['going'])
+ expected = set(['present']), set(['missing', 'going'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+ def test_merge_unseen_new_tag(self):
+ current_tags = set(['present']), set(['missing'])
+ changing_tags = set(['coming']), set()
+ expected = set(['coming', 'present']), set(['missing'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+ def test_merge_incoming_new_tag_with_current_gone_tag(self):
+ # If one of the incoming "new" tags is currently marked as "gone",
+ # then it overrides the "gone" tag, leaving it marked as "new".
+ current_tags = set(['present']), set(['coming', 'missing'])
+ changing_tags = set(['coming']), set()
+ expected = set(['coming', 'present']), set(['missing'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+
+class TestStreamResultRouter(TestCase):
+
+ def test_start_stop_test_run_no_fallback(self):
+ result = StreamResultRouter()
+ result.startTestRun()
+ result.stopTestRun()
+
+ def test_no_fallback_errors(self):
+ self.assertRaises(Exception, StreamResultRouter().status, test_id='f')
+
+ def test_fallback_calls(self):
+ fallback = LoggingStreamResult()
+ result = StreamResultRouter(fallback)
+ result.startTestRun()
+ result.status(test_id='foo')
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None),
+ ('stopTestRun',),
+ ],
+ fallback._events)
+
+ def test_fallback_no_do_start_stop_run(self):
+ fallback = LoggingStreamResult()
+ result = StreamResultRouter(fallback, do_start_stop_run=False)
+ result.startTestRun()
+ result.status(test_id='foo')
+ result.stopTestRun()
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None)
+ ],
+ fallback._events)
+
+ def test_add_rule_bad_policy(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(ValueError, router.add_rule, target, 'route_code_prefixa',
+ route_prefix='0')
+
+ def test_add_rule_extra_policy_arg(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
+ route_prefix='0', foo=1)
+
+ def test_add_rule_missing_prefix(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix')
+
+ def test_add_rule_slash_in_prefix(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
+ route_prefix='0/')
+
+ def test_add_rule_route_code_consume_False(self):
+ fallback = LoggingStreamResult()
+ target = LoggingStreamResult()
+ router = StreamResultRouter(fallback)
+ router.add_rule(target, 'route_code_prefix', route_prefix='0')
+ router.status(test_id='foo', route_code='0')
+ router.status(test_id='foo', route_code='0/1')
+ router.status(test_id='foo')
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, '0',
+ None),
+ ('status', 'foo', None, None, True, None, None, False, None, '0/1',
+ None),
+ ],
+ target._events)
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None),
+ ],
+ fallback._events)
+
+ def test_add_rule_route_code_consume_True(self):
+ fallback = LoggingStreamResult()
+ target = LoggingStreamResult()
+ router = StreamResultRouter(fallback)
+ router.add_rule(
+ target, 'route_code_prefix', route_prefix='0', consume_route=True)
+ router.status(test_id='foo', route_code='0') # -> None
+ router.status(test_id='foo', route_code='0/1') # -> 1
+ router.status(test_id='foo', route_code='1') # -> fallback as-is.
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None),
+ ('status', 'foo', None, None, True, None, None, False, None, '1',
+ None),
+ ],
+ target._events)
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, '1',
+ None),
+ ],
+ fallback._events)
+
+ def test_add_rule_test_id(self):
+ nontest = LoggingStreamResult()
+ test = LoggingStreamResult()
+ router = StreamResultRouter(test)
+ router.add_rule(nontest, 'test_id', test_id=None)
+ router.status(test_id='foo', file_name="bar", file_bytes=b'')
+ router.status(file_name="bar", file_bytes=b'')
+ self.assertEqual([
+ ('status', 'foo', None, None, True, 'bar', b'', False, None, None,
+ None),], test._events)
+ self.assertEqual([
+ ('status', None, None, None, True, 'bar', b'', False, None, None,
+ None),], nontest._events)
+
+ def test_add_rule_do_start_stop_run(self):
+ nontest = LoggingStreamResult()
+ router = StreamResultRouter()
+ router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
+ router.startTestRun()
+ router.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('stopTestRun',),
+ ], nontest._events)
+
+ def test_add_rule_do_start_stop_run_after_startTestRun(self):
+ nontest = LoggingStreamResult()
+ router = StreamResultRouter()
+ router.startTestRun()
+ router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
+ router.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('stopTestRun',),
+ ], nontest._events)
+
+
+class TestStreamToQueue(TestCase):
+
+ def make_result(self):
+ queue = Queue()
+ return queue, StreamToQueue(queue, "foo")
+
+ def test_status(self):
+ def check_event(event_dict, route=None, time=None):
+ self.assertEqual("status", event_dict['event'])
+ self.assertEqual("test", event_dict['test_id'])
+ self.assertEqual("fail", event_dict['test_status'])
+ self.assertEqual(set(["quux"]), event_dict['test_tags'])
+ self.assertEqual(False, event_dict['runnable'])
+ self.assertEqual("file", event_dict['file_name'])
+ self.assertEqual(_b("content"), event_dict['file_bytes'])
+ self.assertEqual(True, event_dict['eof'])
+ self.assertEqual("quux", event_dict['mime_type'])
+ self.assertEqual("test", event_dict['test_id'])
+ self.assertEqual(route, event_dict['route_code'])
+ self.assertEqual(time, event_dict['timestamp'])
+ queue, result = self.make_result()
+ result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
+ file_name="file", file_bytes=_b("content"), eof=True,
+ mime_type="quux", route_code=None, timestamp=None)
+ self.assertEqual(1, queue.qsize())
+ a_time = datetime.datetime.now(utc)
+ result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
+ file_name="file", file_bytes=_b("content"), eof=True,
+ mime_type="quux", route_code="bar", timestamp=a_time)
+ self.assertEqual(2, queue.qsize())
+ check_event(queue.get(False), route="foo", time=None)
+ check_event(queue.get(False), route="foo/bar", time=a_time)
+
+ def testStartTestRun(self):
+ queue, result = self.make_result()
+ result.startTestRun()
+ self.assertEqual(
+ {'event':'startTestRun', 'result':result}, queue.get(False))
+ self.assertTrue(queue.empty())
+
+ def testStopTestRun(self):
+ queue, result = self.make_result()
+ result.stopTestRun()
+ self.assertEqual(
+ {'event':'stopTestRun', 'result':result}, queue.get(False))
+ self.assertTrue(queue.empty())
+
+
+class TestExtendedToOriginalResultDecoratorBase(TestCase):
+
+ def make_26_result(self):
+ self.result = Python26TestResult()
+ self.make_converter()
+
+ def make_27_result(self):
+ self.result = Python27TestResult()
+ self.make_converter()
+
+ def make_converter(self):
+ self.converter = ExtendedToOriginalDecorator(self.result)
+
+ def make_extended_result(self):
+ self.result = ExtendedTestResult()
+ self.make_converter()
+
+ def check_outcome_details(self, outcome):
+ """Call an outcome with a details dict to be passed through."""
+ # This dict is /not/ convertible - thats deliberate, as it should
+ # not hit the conversion code path.
+ details = {'foo': 'bar'}
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, details)], self.result._events)
+
+ def get_details_and_string(self):
+ """Get a details dict and expected string."""
+ text1 = lambda: [_b("1\n2\n")]
+ text2 = lambda: [_b("3\n4\n")]
+ bin1 = lambda: [_b("5\n")]
+ details = {'text 1': Content(ContentType('text', 'plain'), text1),
+ 'text 2': Content(ContentType('text', 'strange'), text2),
+ 'bin 1': Content(ContentType('application', 'binary'), bin1)}
+ return (details,
+ ("Binary content:\n"
+ " bin 1 (application/binary)\n"
+ "\n"
+ "text 1: {{{\n"
+ "1\n"
+ "2\n"
+ "}}}\n"
+ "\n"
+ "text 2: {{{\n"
+ "3\n"
+ "4\n"
+ "}}}\n"))
+
+ def check_outcome_details_to_exec_info(self, outcome, expected=None):
+ """Call an outcome with a details dict to be made into exc_info."""
+ # The conversion is a done using RemoteError and the string contents
+ # of the text types in the details dict.
+ if not expected:
+ expected = outcome
+ details, err_str = self.get_details_and_string()
+ getattr(self.converter, outcome)(self, details=details)
+ err = self.converter._details_to_exc_info(details)
+ self.assertEqual([(expected, self, err)], self.result._events)
+
+ def check_outcome_details_to_nothing(self, outcome, expected=None):
+ """Call an outcome with a details dict to be swallowed."""
+ if not expected:
+ expected = outcome
+ details = {'foo': 'bar'}
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_details_to_string(self, outcome):
+ """Call an outcome with a details dict to be stringified."""
+ details, err_str = self.get_details_and_string()
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, err_str)], self.result._events)
+
+ def check_outcome_details_to_arg(self, outcome, arg, extra_detail=None):
+ """Call an outcome with a details dict to have an arg extracted."""
+ details, _ = self.get_details_and_string()
+ if extra_detail:
+ details.update(extra_detail)
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, arg)], self.result._events)
+
+ def check_outcome_exc_info(self, outcome, expected=None):
+ """Check that calling a legacy outcome still works."""
+ # calling some outcome with the legacy exc_info style api (no keyword
+ # parameters) gets passed through.
+ if not expected:
+ expected = outcome
+ err = sys.exc_info()
+ getattr(self.converter, outcome)(self, err)
+ self.assertEqual([(expected, self, err)], self.result._events)
+
+ def check_outcome_exc_info_to_nothing(self, outcome, expected=None):
+ """Check that calling a legacy outcome on a fallback works."""
+ # calling some outcome with the legacy exc_info style api (no keyword
+ # parameters) gets passed through.
+ if not expected:
+ expected = outcome
+ err = sys.exc_info()
+ getattr(self.converter, outcome)(self, err)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_nothing(self, outcome, expected=None):
+ """Check that calling a legacy outcome still works."""
+ if not expected:
+ expected = outcome
+ getattr(self.converter, outcome)(self)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_string_nothing(self, outcome, expected):
+ """Check that calling outcome with a string calls expected."""
+ getattr(self.converter, outcome)(self, "foo")
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_string(self, outcome):
+ """Check that calling outcome with a string works."""
+ getattr(self.converter, outcome)(self, "foo")
+ self.assertEqual([(outcome, self, "foo")], self.result._events)
+
+
+class TestExtendedToOriginalResultDecorator(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ def test_failfast_py26(self):
+ self.make_26_result()
+ self.assertEqual(False, self.converter.failfast)
+ self.converter.failfast = True
+ self.assertFalse(safe_hasattr(self.converter.decorated, 'failfast'))
+
+ def test_failfast_py27(self):
+ self.make_27_result()
+ self.assertEqual(False, self.converter.failfast)
+ # setting it should write it to the backing result
+ self.converter.failfast = True
+ self.assertEqual(True, self.converter.decorated.failfast)
+
+ def test_progress_py26(self):
+ self.make_26_result()
+ self.converter.progress(1, 2)
+
+ def test_progress_py27(self):
+ self.make_27_result()
+ self.converter.progress(1, 2)
+
+ def test_progress_pyextended(self):
+ self.make_extended_result()
+ self.converter.progress(1, 2)
+ self.assertEqual([('progress', 1, 2)], self.result._events)
+
+ def test_shouldStop(self):
+ self.make_26_result()
+ self.assertEqual(False, self.converter.shouldStop)
+ self.converter.decorated.stop()
+ self.assertEqual(True, self.converter.shouldStop)
+
+ def test_startTest_py26(self):
+ self.make_26_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTest_py27(self):
+ self.make_27_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTest_pyextended(self):
+ self.make_extended_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTestRun_py26(self):
+ self.make_26_result()
+ self.converter.startTestRun()
+ self.assertEqual([], self.result._events)
+
+ def test_startTestRun_py27(self):
+ self.make_27_result()
+ self.converter.startTestRun()
+ self.assertEqual([('startTestRun',)], self.result._events)
+
+ def test_startTestRun_pyextended(self):
+ self.make_extended_result()
+ self.converter.startTestRun()
+ self.assertEqual([('startTestRun',)], self.result._events)
+
+ def test_stopTest_py26(self):
+ self.make_26_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTest_py27(self):
+ self.make_27_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTest_pyextended(self):
+ self.make_extended_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTestRun_py26(self):
+ self.make_26_result()
+ self.converter.stopTestRun()
+ self.assertEqual([], self.result._events)
+
+ def test_stopTestRun_py27(self):
+ self.make_27_result()
+ self.converter.stopTestRun()
+ self.assertEqual([('stopTestRun',)], self.result._events)
+
+ def test_stopTestRun_pyextended(self):
+ self.make_extended_result()
+ self.converter.stopTestRun()
+ self.assertEqual([('stopTestRun',)], self.result._events)
+
+ def test_tags_py26(self):
+ self.make_26_result()
+ self.converter.tags(set([1]), set([2]))
+
+ def test_tags_py27(self):
+ self.make_27_result()
+ self.converter.tags(set([1]), set([2]))
+
+ def test_tags_pyextended(self):
+ self.make_extended_result()
+ self.converter.tags(set([1]), set([2]))
+ self.assertEqual([('tags', set([1]), set([2]))], self.result._events)
+
+ def test_time_py26(self):
+ self.make_26_result()
+ self.converter.time(1)
+
+ def test_time_py27(self):
+ self.make_27_result()
+ self.converter.time(1)
+
+ def test_time_pyextended(self):
+ self.make_extended_result()
+ self.converter.time(1)
+ self.assertEqual([('time', 1)], self.result._events)
+
+
+class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addError'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_exec_info(self.outcome)
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_exec_info(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+ def test_outcome__no_details(self):
+ self.make_extended_result()
+ self.assertThat(
+ lambda: getattr(self.converter, self.outcome)(self),
+ Raises(MatchesException(ValueError)))
+
+
+class TestExtendedToOriginalAddFailure(
+ TestExtendedToOriginalAddError):
+
+ outcome = 'addFailure'
+
+
+class TestExtendedToOriginalAddExpectedFailure(
+ TestExtendedToOriginalAddError):
+
+ outcome = 'addExpectedFailure'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_nothing(self.outcome, 'addSuccess')
+
+
+
+class TestExtendedToOriginalAddSkip(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addSkip'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_string(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_string(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Extended_py27_no_reason(self):
+ self.make_27_result()
+ self.check_outcome_details_to_string(self.outcome)
+
+ def test_outcome_Extended_py27_reason(self):
+ self.make_27_result()
+ self.check_outcome_details_to_arg(self.outcome, 'foo',
+ {'reason': Content(UTF8_TEXT, lambda:[_b('foo')])})
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+ def test_outcome__no_details(self):
+ self.make_extended_result()
+ self.assertThat(
+ lambda: getattr(self.converter, self.outcome)(self),
+ Raises(MatchesException(ValueError)))
+
+
+class TestExtendedToOriginalAddSuccess(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addSuccess'
+ expected = 'addSuccess'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_nothing(self.outcome, self.expected)
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_nothing(self.outcome, self.expected)
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_nothing(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+
+class TestExtendedToOriginalAddUnexpectedSuccess(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addUnexpectedSuccess'
+ expected = 'addFailure'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ getattr(self.converter, self.outcome)(self)
+ [event] = self.result._events
+ self.assertEqual((self.expected, self), event[:2])
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ getattr(self.converter, self.outcome)(self)
+ [event] = self.result._events
+ self.assertEqual((self.expected, self), event[:2])
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_nothing(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+
+class TestExtendedToOriginalResultOtherAttributes(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ def test_other_attribute(self):
+ class OtherExtendedResult:
+ def foo(self):
+ return 2
+ bar = 1
+ self.result = OtherExtendedResult()
+ self.make_converter()
+ self.assertEqual(1, self.converter.bar)
+ self.assertEqual(2, self.converter.foo())
+
+
+class TestNonAsciiResults(TestCase):
+ """Test all kinds of tracebacks are cleanly interpreted as unicode
+
+ Currently only uses weak "contains" assertions, would be good to be much
+ stricter about the expected output. This would add a few failures for the
+ current release of IronPython for instance, which gets some traceback
+ lines muddled.
+ """
+
+ _sample_texts = (
+ _u("pa\u026a\u03b8\u0259n"), # Unicode encodings only
+ _u("\u5357\u7121"), # In ISO 2022 encodings
+ _u("\xa7\xa7\xa7"), # In ISO 8859 encodings
+ )
+
+ _is_pypy = "__pypy__" in sys.builtin_module_names
+ # Everything but Jython shows syntax errors on the current character
+ _error_on_character = os.name != "java" and not _is_pypy
+
+ def _run(self, stream, test):
+ """Run the test, the same as in testtools.run but not to stdout"""
+ result = TextTestResult(stream)
+ result.startTestRun()
+ try:
+ return test.run(result)
+ finally:
+ result.stopTestRun()
+
+ def _write_module(self, name, encoding, contents):
+ """Create Python module on disk with contents in given encoding"""
+ try:
+ # Need to pre-check that the coding is valid or codecs.open drops
+ # the file without closing it which breaks non-refcounted pythons
+ codecs.lookup(encoding)
+ except LookupError:
+ self.skip("Encoding unsupported by implementation: %r" % encoding)
+ f = codecs.open(os.path.join(self.dir, name + ".py"), "w", encoding)
+ try:
+ f.write(contents)
+ finally:
+ f.close()
+
+ def _test_external_case(self, testline, coding="ascii", modulelevel="",
+ suffix=""):
+ """Create and run a test case in a seperate module"""
+ self._setup_external_case(testline, coding, modulelevel, suffix)
+ return self._run_external_case()
+
+ def _setup_external_case(self, testline, coding="ascii", modulelevel="",
+ suffix=""):
+ """Create a test case in a seperate module"""
+ _, prefix, self.modname = self.id().rsplit(".", 2)
+ self.dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
+ self.addCleanup(shutil.rmtree, self.dir)
+ self._write_module(self.modname, coding,
+ # Older Python 2 versions don't see a coding declaration in a
+ # docstring so it has to be in a comment, but then we can't
+ # workaround bug: <http://ironpython.codeplex.com/workitem/26940>
+ "# coding: %s\n"
+ "import testtools\n"
+ "%s\n"
+ "class Test(testtools.TestCase):\n"
+ " def runTest(self):\n"
+ " %s\n" % (coding, modulelevel, testline))
+
+ def _run_external_case(self):
+ """Run the prepared test case in a seperate module"""
+ sys.path.insert(0, self.dir)
+ self.addCleanup(sys.path.remove, self.dir)
+ module = __import__(self.modname)
+ self.addCleanup(sys.modules.pop, self.modname)
+ stream = StringIO()
+ self._run(stream, module.Test())
+ return stream.getvalue()
+
+ def _silence_deprecation_warnings(self):
+ """Shut up DeprecationWarning for this test only"""
+ warnings.simplefilter("ignore", DeprecationWarning)
+ self.addCleanup(warnings.filters.remove, warnings.filters[0])
+
+ def _get_sample_text(self, encoding="unicode_internal"):
+ if encoding is None and str_is_unicode:
+ encoding = "unicode_internal"
+ for u in self._sample_texts:
+ try:
+ b = u.encode(encoding)
+ if u == b.decode(encoding):
+ if str_is_unicode:
+ return u, u
+ return u, b
+ except (LookupError, UnicodeError):
+ pass
+ self.skip("Could not find a sample text for encoding: %r" % encoding)
+
+ def _as_output(self, text):
+ return text
+
+ def test_non_ascii_failure_string(self):
+ """Assertion contents can be non-ascii and should get decoded"""
+ text, raw = self._get_sample_text(_get_exception_encoding())
+ textoutput = self._test_external_case("self.fail(%s)" % _r(raw))
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_non_ascii_failure_string_via_exec(self):
+ """Assertion via exec can be non-ascii and still gets decoded"""
+ text, raw = self._get_sample_text(_get_exception_encoding())
+ textoutput = self._test_external_case(
+ testline='exec ("self.fail(%s)")' % _r(raw))
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_control_characters_in_failure_string(self):
+ """Control characters in assertions should be escaped"""
+ textoutput = self._test_external_case("self.fail('\\a\\a\\a')")
+ self.expectFailure("Defense against the beeping horror unimplemented",
+ self.assertNotIn, self._as_output("\a\a\a"), textoutput)
+ self.assertIn(self._as_output(_u("\uFFFD\uFFFD\uFFFD")), textoutput)
+
+ def _local_os_error_matcher(self):
+ if sys.version_info > (3, 3):
+ return MatchesAny(Contains("FileExistsError: "),
+ Contains("PermissionError: "))
+ elif os.name != "nt" or sys.version_info < (2, 5):
+ return Contains(self._as_output("OSError: "))
+ else:
+ return Contains(self._as_output("WindowsError: "))
+
+ def test_os_error(self):
+ """Locale error messages from the OS shouldn't break anything"""
+ textoutput = self._test_external_case(
+ modulelevel="import os",
+ testline="os.mkdir('/')")
+ self.assertThat(textoutput, self._local_os_error_matcher())
+
+ def test_assertion_text_shift_jis(self):
+ """A terminal raw backslash in an encoded string is weird but fine"""
+ example_text = _u("\u5341")
+ textoutput = self._test_external_case(
+ coding="shift_jis",
+ testline="self.fail('%s')" % example_text)
+ if str_is_unicode:
+ output_text = example_text
+ else:
+ output_text = example_text.encode("shift_jis").decode(
+ _get_exception_encoding(), "replace")
+ self.assertIn(self._as_output("AssertionError: %s" % output_text),
+ textoutput)
+
+ def test_file_comment_iso2022_jp(self):
+ """Control character escapes must be preserved if valid encoding"""
+ example_text, _ = self._get_sample_text("iso2022_jp")
+ textoutput = self._test_external_case(
+ coding="iso2022_jp",
+ testline="self.fail('Simple') # %s" % example_text)
+ self.assertIn(self._as_output(example_text), textoutput)
+
+ def test_unicode_exception(self):
+ """Exceptions that can be formated losslessly as unicode should be"""
+ example_text, _ = self._get_sample_text()
+ exception_class = (
+ "class FancyError(Exception):\n"
+ # A __unicode__ method does nothing on py3k but the default works
+ " def __unicode__(self):\n"
+ " return self.args[0]\n")
+ textoutput = self._test_external_case(
+ modulelevel=exception_class,
+ testline="raise FancyError(%s)" % _r(example_text))
+ self.assertIn(self._as_output(example_text), textoutput)
+
+ def test_unprintable_exception(self):
+ """A totally useless exception instance still prints something"""
+ exception_class = (
+ "class UnprintableError(Exception):\n"
+ " def __str__(self):\n"
+ " raise RuntimeError\n"
+ " def __unicode__(self):\n"
+ " raise RuntimeError\n"
+ " def __repr__(self):\n"
+ " raise RuntimeError\n")
+ textoutput = self._test_external_case(
+ modulelevel=exception_class,
+ testline="raise UnprintableError")
+ self.assertIn(self._as_output(
+ "UnprintableError: <unprintable UnprintableError object>\n"),
+ textoutput)
+
+ def test_string_exception(self):
+ """Raise a string rather than an exception instance if supported"""
+ if sys.version_info > (2, 6):
+ self.skip("No string exceptions in Python 2.6 or later")
+ elif sys.version_info > (2, 5):
+ self._silence_deprecation_warnings()
+ textoutput = self._test_external_case(testline="raise 'plain str'")
+ self.assertIn(self._as_output("\nplain str\n"), textoutput)
+
+ def test_non_ascii_dirname(self):
+ """Script paths in the traceback can be non-ascii"""
+ text, raw = self._get_sample_text(sys.getfilesystemencoding())
+ textoutput = self._test_external_case(
+ # Avoid bug in Python 3 by giving a unicode source encoding rather
+ # than just ascii which raises a SyntaxError with no other details
+ coding="utf-8",
+ testline="self.fail('Simple')",
+ suffix=raw)
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_syntax_error(self):
+ """Syntax errors should still have fancy special-case formatting"""
+ textoutput = self._test_external_case("exec ('f(a, b c)')")
+ self.assertIn(self._as_output(
+ ' File "<string>", line 1\n'
+ ' f(a, b c)\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: '
+ ), textoutput)
+
+ def test_syntax_error_malformed(self):
+ """Syntax errors with bogus parameters should break anything"""
+ textoutput = self._test_external_case("raise SyntaxError(3, 2, 1)")
+ self.assertIn(self._as_output("\nSyntaxError: "), textoutput)
+
+ def test_syntax_error_import_binary(self):
+ """Importing a binary file shouldn't break SyntaxError formatting"""
+ if sys.version_info < (2, 5):
+ # Python 2.4 assumes the file is latin-1 and tells you off
+ self._silence_deprecation_warnings()
+ self._setup_external_case("import bad")
+ f = open(os.path.join(self.dir, "bad.py"), "wb")
+ try:
+ f.write(_b("x\x9c\xcb*\xcd\xcb\x06\x00\x04R\x01\xb9"))
+ finally:
+ f.close()
+ textoutput = self._run_external_case()
+ matches_error = MatchesAny(
+ Contains('\nTypeError: '), Contains('\nSyntaxError: '))
+ self.assertThat(textoutput, matches_error)
+
+ def test_syntax_error_line_iso_8859_1(self):
+ """Syntax error on a latin-1 line shows the line decoded"""
+ text, raw = self._get_sample_text("iso-8859-1")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "iso-8859-1",
+ "# coding: iso-8859-1\n! = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' ! = 0 # %s\n'
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_iso_8859_5(self):
+ """Syntax error on a iso-8859-5 line shows the line decoded"""
+ text, raw = self._get_sample_text("iso-8859-5")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "iso-8859-5",
+ "# coding: iso-8859-5\n%% = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' %% = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_euc_jp(self):
+ """Syntax error on a euc_jp line shows the line decoded"""
+ text, raw = self._get_sample_text("euc_jp")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "euc_jp",
+ "# coding: euc_jp\n$ = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ # pypy uses cpython's multibyte codecs so has their behavior here
+ if self._is_pypy:
+ self._error_on_character = True
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' $ = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_utf_8(self):
+ """Syntax error on a utf-8 line shows the line decoded"""
+ text, raw = self._get_sample_text("utf-8")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "utf-8", _u("\ufeff^ = 0 # %s\n") % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ 'bad.py", line 1\n'
+ ' ^ = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ text), textoutput)
+
+
+class TestNonAsciiResultsWithUnittest(TestNonAsciiResults):
+ """Test that running under unittest produces clean ascii strings"""
+
+ def _run(self, stream, test):
+ from unittest import TextTestRunner as _Runner
+ return _Runner(stream).run(test)
+
+ def _as_output(self, text):
+ if str_is_unicode:
+ return text
+ return text.encode("utf-8")
+
+
+class TestDetailsToStr(TestCase):
+
+ def test_no_details(self):
+ string = _details_to_str({})
+ self.assertThat(string, Equals(''))
+
+ def test_binary_content(self):
+ content = content_from_stream(
+ StringIO('foo'), content_type=ContentType('image', 'jpeg'))
+ string = _details_to_str({'attachment': content})
+ self.assertThat(
+ string, Equals("""\
+Binary content:
+ attachment (image/jpeg)
+"""))
+
+ def test_single_line_content(self):
+ content = text_content('foo')
+ string = _details_to_str({'attachment': content})
+ self.assertThat(string, Equals('attachment: {{{foo}}}\n'))
+
+ def test_multi_line_text_content(self):
+ content = text_content('foo\nbar\nbaz')
+ string = _details_to_str({'attachment': content})
+ self.assertThat(string, Equals('attachment: {{{\nfoo\nbar\nbaz\n}}}\n'))
+
+ def test_special_text_content(self):
+ content = text_content('foo')
+ string = _details_to_str({'attachment': content}, special='attachment')
+ self.assertThat(string, Equals('foo\n'))
+
+ def test_multiple_text_content(self):
+ string = _details_to_str(
+ {'attachment': text_content('foo\nfoo'),
+ 'attachment-1': text_content('bar\nbar')})
+ self.assertThat(
+ string, Equals('attachment: {{{\n'
+ 'foo\n'
+ 'foo\n'
+ '}}}\n'
+ '\n'
+ 'attachment-1: {{{\n'
+ 'bar\n'
+ 'bar\n'
+ '}}}\n'))
+
+ def test_empty_attachment(self):
+ string = _details_to_str({'attachment': text_content('')})
+ self.assertThat(
+ string, Equals("""\
+Empty attachments:
+ attachment
+"""))
+
+ def test_lots_of_different_attachments(self):
+ jpg = lambda x: content_from_stream(
+ StringIO(x), ContentType('image', 'jpeg'))
+ attachments = {
+ 'attachment': text_content('foo'),
+ 'attachment-1': text_content('traceback'),
+ 'attachment-2': jpg('pic1'),
+ 'attachment-3': text_content('bar'),
+ 'attachment-4': text_content(''),
+ 'attachment-5': jpg('pic2'),
+ }
+ string = _details_to_str(attachments, special='attachment-1')
+ self.assertThat(
+ string, Equals("""\
+Binary content:
+ attachment-2 (image/jpeg)
+ attachment-5 (image/jpeg)
+Empty attachments:
+ attachment-4
+
+attachment: {{{foo}}}
+attachment-3: {{{bar}}}
+
+traceback
+"""))
+
+
+class TestByTestResultTests(TestCase):
+
+ def setUp(self):
+ super(TestByTestResultTests, self).setUp()
+ self.log = []
+ self.result = TestByTestResult(self.on_test)
+ now = iter(range(5))
+ self.result._now = lambda: advance_iterator(now)
+
+ def assertCalled(self, **kwargs):
+ defaults = {
+ 'test': self,
+ 'tags': set(),
+ 'details': None,
+ 'start_time': 0,
+ 'stop_time': 1,
+ }
+ defaults.update(kwargs)
+ self.assertEqual([defaults], self.log)
+
+ def on_test(self, **kwargs):
+ self.log.append(kwargs)
+
+ def test_no_tests_nothing_reported(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertEqual([], self.log)
+
+ def test_add_success(self):
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success')
+
+ def test_add_success_details(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addSuccess(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', details=details)
+
+ def test_global_tags(self):
+ self.result.tags(['foo'], [])
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', tags=set(['foo']))
+
+ def test_local_tags(self):
+ self.result.tags(['foo'], [])
+ self.result.startTest(self)
+ self.result.tags(['bar'], [])
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', tags=set(['foo', 'bar']))
+
+ def test_add_error(self):
+ self.result.startTest(self)
+ try:
+ 1/0
+ except ZeroDivisionError:
+ error = sys.exc_info()
+ self.result.addError(self, error)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='error',
+ details={'traceback': TracebackContent(error, self)})
+
+ def test_add_error_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addError(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='error', details=details)
+
+ def test_add_failure(self):
+ self.result.startTest(self)
+ try:
+ self.fail("intentional failure")
+ except self.failureException:
+ failure = sys.exc_info()
+ self.result.addFailure(self, failure)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='failure',
+ details={'traceback': TracebackContent(failure, self)})
+
+ def test_add_failure_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addFailure(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='failure', details=details)
+
+ def test_add_xfail(self):
+ self.result.startTest(self)
+ try:
+ 1/0
+ except ZeroDivisionError:
+ error = sys.exc_info()
+ self.result.addExpectedFailure(self, error)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='xfail',
+ details={'traceback': TracebackContent(error, self)})
+
+ def test_add_xfail_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addExpectedFailure(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='xfail', details=details)
+
+ def test_add_unexpected_success(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addUnexpectedSuccess(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', details=details)
+
+ def test_add_skip_reason(self):
+ self.result.startTest(self)
+ reason = self.getUniqueString()
+ self.result.addSkip(self, reason)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='skip', details={'reason': text_content(reason)})
+
+ def test_add_skip_details(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addSkip(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='skip', details=details)
+
+ def test_twice(self):
+ self.result.startTest(self)
+ self.result.addSuccess(self, details={'foo': 'bar'})
+ self.result.stopTest(self)
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertEqual(
+ [{'test': self,
+ 'status': 'success',
+ 'start_time': 0,
+ 'stop_time': 1,
+ 'tags': set(),
+ 'details': {'foo': 'bar'}},
+ {'test': self,
+ 'status': 'success',
+ 'start_time': 2,
+ 'stop_time': 3,
+ 'tags': set(),
+ 'details': None},
+ ],
+ self.log)
+
+
+class TestTagger(TestCase):
+
+ def test_tags_tests(self):
+ result = ExtendedTestResult()
+ tagger = Tagger(result, set(['foo']), set(['bar']))
+ test1, test2 = self, make_test()
+ tagger.startTest(test1)
+ tagger.addSuccess(test1)
+ tagger.stopTest(test1)
+ tagger.startTest(test2)
+ tagger.addSuccess(test2)
+ tagger.stopTest(test2)
+ self.assertEqual(
+ [('startTest', test1),
+ ('tags', set(['foo']), set(['bar'])),
+ ('addSuccess', test1),
+ ('stopTest', test1),
+ ('startTest', test2),
+ ('tags', set(['foo']), set(['bar'])),
+ ('addSuccess', test2),
+ ('stopTest', test2),
+ ], result._events)
+
+
+class TestTimestampingStreamResult(TestCase):
+
+ def test_startTestRun(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.startTestRun()
+ self.assertEqual([('startTestRun',)], result.targets[0]._events)
+
+ def test_stopTestRun(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.stopTestRun()
+ self.assertEqual([('stopTestRun',)], result.targets[0]._events)
+
+ def test_status_no_timestamp(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.status(test_id="A", test_status="B", test_tags="C",
+ runnable="D", file_name="E", file_bytes=b"F", eof=True,
+ mime_type="G", route_code="H")
+ events = result.targets[0]._events
+ self.assertThat(events, HasLength(1))
+ self.assertThat(events[0], HasLength(11))
+ self.assertEqual(
+ ("status", "A", "B", "C", "D", "E", b"F", True, "G", "H"),
+ events[0][:10])
+ self.assertNotEqual(None, events[0][10])
+ self.assertIsInstance(events[0][10], datetime.datetime)
+
+ def test_status_timestamp(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.status(timestamp="F")
+ self.assertEqual("F", result.targets[0]._events[0][10])
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py
new file mode 100644
index 00000000000..e2c33062b2d
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py
@@ -0,0 +1,279 @@
+# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+
+"""Test ConcurrentTestSuite and related things."""
+
+__metaclass__ = type
+
+import doctest
+from functools import partial
+import sys
+import unittest
+
+from extras import try_import
+
+from testtools import (
+ ConcurrentTestSuite,
+ ConcurrentStreamTestSuite,
+ iterate_tests,
+ PlaceHolder,
+ TestByTestResult,
+ TestCase,
+ )
+from testtools.compat import _b, _u
+from testtools.matchers import DocTestMatches
+from testtools.testsuite import FixtureSuite, iterate_tests, sorted_tests
+from testtools.tests.helpers import LoggingResult
+from testtools.testresult.doubles import StreamResult as LoggingStream
+
+FunctionFixture = try_import('fixtures.FunctionFixture')
+
+class Sample(TestCase):
+ def __hash__(self):
+ return id(self)
+ def test_method1(self):
+ pass
+ def test_method2(self):
+ pass
+
+
+class TestConcurrentTestSuiteRun(TestCase):
+
+ def test_broken_test(self):
+ log = []
+ def on_test(test, status, start_time, stop_time, tags, details):
+ log.append((test.id(), status, set(details.keys())))
+ class BrokenTest(object):
+ # Simple break - no result parameter to run()
+ def __call__(self):
+ pass
+ run = __call__
+ original_suite = unittest.TestSuite([BrokenTest()])
+ suite = ConcurrentTestSuite(original_suite, self.split_suite)
+ suite.run(TestByTestResult(on_test))
+ self.assertEqual([('broken-runner', 'error', set(['traceback']))], log)
+
+ def test_trivial(self):
+ log = []
+ result = LoggingResult(log)
+ test1 = Sample('test_method1')
+ test2 = Sample('test_method2')
+ original_suite = unittest.TestSuite([test1, test2])
+ suite = ConcurrentTestSuite(original_suite, self.split_suite)
+ suite.run(result)
+ # log[0] is the timestamp for the first test starting.
+ test1 = log[1][1]
+ test2 = log[-1][1]
+ self.assertIsInstance(test1, Sample)
+ self.assertIsInstance(test2, Sample)
+ self.assertNotEqual(test1.id(), test2.id())
+
+ def test_wrap_result(self):
+ # ConcurrentTestSuite has a hook for wrapping the per-thread result.
+ wrap_log = []
+
+ def wrap_result(thread_safe_result, thread_number):
+ wrap_log.append(
+ (thread_safe_result.result.decorated, thread_number))
+ return thread_safe_result
+
+ result_log = []
+ result = LoggingResult(result_log)
+ test1 = Sample('test_method1')
+ test2 = Sample('test_method2')
+ original_suite = unittest.TestSuite([test1, test2])
+ suite = ConcurrentTestSuite(
+ original_suite, self.split_suite, wrap_result=wrap_result)
+ suite.run(result)
+ self.assertEqual(
+ [(result, 0),
+ (result, 1),
+ ], wrap_log)
+ # Smoke test to make sure everything ran OK.
+ self.assertNotEqual([], result_log)
+
+ def split_suite(self, suite):
+ return list(iterate_tests(suite))
+
+
+class TestConcurrentStreamTestSuiteRun(TestCase):
+
+ def test_trivial(self):
+ result = LoggingStream()
+ test1 = Sample('test_method1')
+ test2 = Sample('test_method2')
+ cases = lambda:[(test1, '0'), (test2, '1')]
+ suite = ConcurrentStreamTestSuite(cases)
+ suite.run(result)
+ def freeze(set_or_none):
+ if set_or_none is None:
+ return set_or_none
+ return frozenset(set_or_none)
+ # Ignore event order: we're testing the code is all glued together,
+ # which just means we can pump events through and they get route codes
+ # added appropriately.
+ self.assertEqual(set([
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method1',
+ 'inprogress',
+ None,
+ True,
+ None,
+ None,
+ False,
+ None,
+ '0',
+ None,
+ ),
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method1',
+ 'success',
+ frozenset(),
+ True,
+ None,
+ None,
+ False,
+ None,
+ '0',
+ None,
+ ),
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method2',
+ 'inprogress',
+ None,
+ True,
+ None,
+ None,
+ False,
+ None,
+ '1',
+ None,
+ ),
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method2',
+ 'success',
+ frozenset(),
+ True,
+ None,
+ None,
+ False,
+ None,
+ '1',
+ None,
+ ),
+ ]), set(event[0:3] + (freeze(event[3]),) + event[4:10] + (None,)
+ for event in result._events))
+
+ def test_broken_runner(self):
+ # If the object called breaks, the stream is informed about it
+ # regardless.
+ class BrokenTest(object):
+ # broken - no result parameter!
+ def __call__(self):
+ pass
+ def run(self):
+ pass
+ result = LoggingStream()
+ cases = lambda:[(BrokenTest(), '0')]
+ suite = ConcurrentStreamTestSuite(cases)
+ suite.run(result)
+ events = result._events
+ # Check the traceback loosely.
+ self.assertThat(events[1][6].decode('utf8'), DocTestMatches("""\
+Traceback (most recent call last):
+ File "...testtools/testsuite.py", line ..., in _run_test
+ test.run(process_result)
+TypeError: run() takes ...1 ...argument...2...given...
+""", doctest.ELLIPSIS))
+ events = [event[0:10] + (None,) for event in events]
+ events[1] = events[1][:6] + (None,) + events[1][7:]
+ self.assertEqual([
+ ('status', "broken-runner-'0'", 'inprogress', None, True, None, None, False, None, _u('0'), None),
+ ('status', "broken-runner-'0'", None, None, True, 'traceback', None,
+ False,
+ 'text/x-traceback; charset="utf8"; language="python"',
+ '0',
+ None),
+ ('status', "broken-runner-'0'", None, None, True, 'traceback', b'', True,
+ 'text/x-traceback; charset="utf8"; language="python"', '0', None),
+ ('status', "broken-runner-'0'", 'fail', set(), True, None, None, False, None, _u('0'), None)
+ ], events)
+
+ def split_suite(self, suite):
+ tests = list(enumerate(iterate_tests(suite)))
+ return [(test, _u(str(pos))) for pos, test in tests]
+
+
+class TestFixtureSuite(TestCase):
+
+ def setUp(self):
+ super(TestFixtureSuite, self).setUp()
+ if FunctionFixture is None:
+ self.skip("Need fixtures")
+
+ def test_fixture_suite(self):
+ log = []
+ class Sample(TestCase):
+ def test_one(self):
+ log.append(1)
+ def test_two(self):
+ log.append(2)
+ fixture = FunctionFixture(
+ lambda: log.append('setUp'),
+ lambda fixture: log.append('tearDown'))
+ suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_two')])
+ suite.run(LoggingResult([]))
+ self.assertEqual(['setUp', 1, 2, 'tearDown'], log)
+
+ def test_fixture_suite_sort(self):
+ log = []
+ class Sample(TestCase):
+ def test_one(self):
+ log.append(1)
+ def test_two(self):
+ log.append(2)
+ fixture = FunctionFixture(
+ lambda: log.append('setUp'),
+ lambda fixture: log.append('tearDown'))
+ suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_one')])
+ self.assertRaises(ValueError, suite.sort_tests)
+
+
+class TestSortedTests(TestCase):
+
+ def test_sorts_custom_suites(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ class Subclass(unittest.TestSuite):
+ def sort_tests(self):
+ self._tests = sorted_tests(self, True)
+ input_suite = Subclass([b, a])
+ suite = sorted_tests(input_suite)
+ self.assertEqual([a, b], list(iterate_tests(suite)))
+ self.assertEqual([input_suite], list(iter(suite)))
+
+ def test_custom_suite_without_sort_tests_works(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ class Subclass(unittest.TestSuite):pass
+ input_suite = Subclass([b, a])
+ suite = sorted_tests(input_suite)
+ self.assertEqual([b, a], list(iterate_tests(suite)))
+ self.assertEqual([input_suite], list(iter(suite)))
+
+ def test_sorts_simple_suites(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ suite = sorted_tests(unittest.TestSuite([b, a]))
+ self.assertEqual([a, b], list(iterate_tests(suite)))
+
+ def test_duplicate_simple_suites(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ c = PlaceHolder('a')
+ self.assertRaises(
+ ValueError, sorted_tests, unittest.TestSuite([a, b, c]))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py
new file mode 100644
index 00000000000..4305c624a86
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2011 testtools developers. See LICENSE for details.
+
+from __future__ import with_statement
+
+import sys
+
+from testtools import (
+ ExpectedException,
+ TestCase,
+ )
+from testtools.matchers import (
+ AfterPreprocessing,
+ Equals,
+ EndsWith,
+ )
+
+
+class TestExpectedException(TestCase):
+ """Test the ExpectedException context manager."""
+
+ def test_pass_on_raise(self):
+ with ExpectedException(ValueError, 'tes.'):
+ raise ValueError('test')
+
+ def test_pass_on_raise_matcher(self):
+ with ExpectedException(
+ ValueError, AfterPreprocessing(str, Equals('test'))):
+ raise ValueError('test')
+
+ def test_raise_on_text_mismatch(self):
+ try:
+ with ExpectedException(ValueError, 'tes.'):
+ raise ValueError('mismatch')
+ except AssertionError:
+ e = sys.exc_info()[1]
+ self.assertEqual("'mismatch' does not match /tes./", str(e))
+ else:
+ self.fail('AssertionError not raised.')
+
+ def test_raise_on_general_mismatch(self):
+ matcher = AfterPreprocessing(str, Equals('test'))
+ value_error = ValueError('mismatch')
+ try:
+ with ExpectedException(ValueError, matcher):
+ raise value_error
+ except AssertionError:
+ e = sys.exc_info()[1]
+ self.assertEqual(matcher.match(value_error).describe(), str(e))
+ else:
+ self.fail('AssertionError not raised.')
+
+ def test_raise_on_error_mismatch(self):
+ try:
+ with ExpectedException(TypeError, 'tes.'):
+ raise ValueError('mismatch')
+ except ValueError:
+ e = sys.exc_info()[1]
+ self.assertEqual('mismatch', str(e))
+ else:
+ self.fail('ValueError not raised.')
+
+ def test_raise_if_no_exception(self):
+ try:
+ with ExpectedException(TypeError, 'tes.'):
+ pass
+ except AssertionError:
+ e = sys.exc_info()[1]
+ self.assertEqual('TypeError not raised.', str(e))
+ else:
+ self.fail('AssertionError not raised.')
+
+ def test_pass_on_raise_any_message(self):
+ with ExpectedException(ValueError):
+ raise ValueError('whatever')
+
+ def test_annotate(self):
+ def die():
+ with ExpectedException(ValueError, msg="foo"):
+ pass
+ exc = self.assertRaises(AssertionError, die)
+ self.assertThat(exc.args[0], EndsWith(': foo'))
+
+ def test_annotated_matcher(self):
+ def die():
+ with ExpectedException(ValueError, 'bar', msg="foo"):
+ pass
+ exc = self.assertRaises(AssertionError, die)
+ self.assertThat(exc.args[0], EndsWith(': foo'))