summaryrefslogtreecommitdiff
path: root/src/third_party/wiredtiger/test/3rdparty
diff options
context:
space:
mode:
Diffstat (limited to 'src/third_party/wiredtiger/test/3rdparty')
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/PKG-INFO22
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/PKG-INFO22
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/SOURCES.txt7
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/dependency_links.txt1
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/requires.txt2
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/top_level.txt1
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.py144
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/setup.cfg5
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/setup.py33
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/PKG-INFO164
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/README.txt137
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/discover-0.4.0/discover.py480
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/setup.cfg2
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/setup.py74
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/.gitignore35
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/LICENSE26
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/MANIFEST.in6
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/Makefile30
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/NEWS27
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/PKG-INFO68
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/README.rst57
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/__init__.py105
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/tests/__init__.py17
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/tests/test_extras.py188
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/setup.cfg10
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/extras-0.0.3/setup.py43
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/MANIFEST.in20
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/NEWS493
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/PKG-INFO483
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/README468
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-1to242
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-2to147
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-filter165
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-ls60
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-notify48
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-stats32
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-tags27
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2gtk240
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2junitxml36
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2pyunit59
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/tap2subunit26
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/__init__.py1320
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/chunked.py185
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/details.py119
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/filters.py206
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/iso8601.py133
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/progress_model.py106
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/run.py131
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/test_results.py729
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/__init__.py63
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-script.py21
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-two-script.py7
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_chunked.py146
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_details.py106
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_filters.py35
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_progress_model.py112
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_run.py64
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_filter.py346
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_stats.py78
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_tags.py85
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_tap2subunit.py387
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol.py1362
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol2.py436
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_results.py566
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/v2.py495
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/PKG-INFO483
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/SOURCES.txt44
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/dependency_links.txt1
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/requires.txt2
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/top_level.txt1
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/setup.cfg5
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/setup.py66
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/.bzrignore5
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/Apache-2.0202
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/BSD26
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/COPYING31
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/GOALS25
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/HACKING38
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/MANIFEST.in10
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/Makefile19
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/NEWS56
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/PKG-INFO335
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/README316
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/__init__.py16
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/example.py30
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/test_sample.py22
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/PKG-INFO335
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/SOURCES.txt25
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/dependency_links.txt1
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/requires.txt1
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/top_level.txt1
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/__init__.py74
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/scenarios.py167
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/testcase.py70
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/__init__.py43
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_scenarios.py261
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_testcase.py157
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/setup.cfg5
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/setup.py31
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/.gitignore15
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/LICENSE59
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/MANIFEST.in10
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/Makefile56
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/NEWS1281
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/PKG-INFO113
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/README.rst92
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/Makefile89
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/_static/placeholder.txt0
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/_templates/placeholder.txt0
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/conf.py194
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/for-framework-folk.rst454
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/for-test-authors.rst1432
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/hacking.rst163
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/index.rst36
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/make.bat113
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/overview.rst101
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/setup.cfg10
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/setup.py86
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/__init__.py125
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat2x.py17
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat3x.py17
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_spinner.py316
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/compat.py415
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content.py385
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content_type.py41
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/deferredruntest.py336
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/distutilscmd.py62
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/helpers.py48
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/__init__.py119
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_basic.py326
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_datastructures.py228
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_dict.py259
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_doctest.py104
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py126
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_filesystem.py192
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_higherorder.py368
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_impl.py175
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/monkey.py97
-rwxr-xr-xsrc/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/run.py399
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/runtest.py212
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tags.py34
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testcase.py942
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/__init__.py49
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/doubles.py174
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/real.py1776
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py47
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py108
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py29
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py42
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py396
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py209
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py227
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py82
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py192
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py243
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py254
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py132
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py603
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py349
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py66
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py767
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py100
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py118
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py30
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py167
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py248
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py303
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py333
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py84
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py1550
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py2919
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py279
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py88
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testsuite.py317
-rw-r--r--src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/utils.py13
175 files changed, 36035 insertions, 0 deletions
diff --git a/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/PKG-INFO b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/PKG-INFO
new file mode 100644
index 00000000000..edb06bf7a42
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/PKG-INFO
@@ -0,0 +1,22 @@
+Metadata-Version: 1.1
+Name: concurrencytest
+Version: 0.1.2
+Summary: testtools extension for running unittest suites concurrently
+Home-page: https://github.com/cgoldberg/concurrencytest
+Author: Corey Goldberg
+Author-email: cgoldberg _at_ gmail.com
+License: GNU GPLv3
+Download-URL: http://pypi.python.org/pypi/concurrencytest
+Description: UNKNOWN
+Keywords: test,testtools,unittest,concurrency,parallel
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Software Development :: Testing
diff --git a/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/PKG-INFO b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/PKG-INFO
new file mode 100644
index 00000000000..edb06bf7a42
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/PKG-INFO
@@ -0,0 +1,22 @@
+Metadata-Version: 1.1
+Name: concurrencytest
+Version: 0.1.2
+Summary: testtools extension for running unittest suites concurrently
+Home-page: https://github.com/cgoldberg/concurrencytest
+Author: Corey Goldberg
+Author-email: cgoldberg _at_ gmail.com
+License: GNU GPLv3
+Download-URL: http://pypi.python.org/pypi/concurrencytest
+Description: UNKNOWN
+Keywords: test,testtools,unittest,concurrency,parallel
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Software Development :: Testing
diff --git a/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/SOURCES.txt b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/SOURCES.txt
new file mode 100644
index 00000000000..bf9f692ad1f
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/SOURCES.txt
@@ -0,0 +1,7 @@
+concurrencytest.py
+setup.py
+concurrencytest.egg-info/PKG-INFO
+concurrencytest.egg-info/SOURCES.txt
+concurrencytest.egg-info/dependency_links.txt
+concurrencytest.egg-info/requires.txt
+concurrencytest.egg-info/top_level.txt \ No newline at end of file
diff --git a/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/dependency_links.txt b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/dependency_links.txt
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/requires.txt b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/requires.txt
new file mode 100644
index 00000000000..537ebcbac33
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/requires.txt
@@ -0,0 +1,2 @@
+python-subunit
+testtools \ No newline at end of file
diff --git a/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/top_level.txt b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/top_level.txt
new file mode 100644
index 00000000000..cfc96e6db71
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/top_level.txt
@@ -0,0 +1 @@
+concurrencytest
diff --git a/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.py b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.py
new file mode 100644
index 00000000000..b3cb52d48d3
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/concurrencytest.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+#
+# Modified by: Corey Goldberg, 2013
+# License: GPLv2+
+#
+# Original code from:
+# Bazaar (bzrlib.tests.__init__.py, v2.6, copied Jun 01 2013)
+# Copyright (C) 2005-2011 Canonical Ltd
+# License: GPLv2+
+
+"""Python testtools extension for running unittest suites concurrently.
+
+The `testtools` project provides a ConcurrentTestSuite class, but does
+not provide a `make_tests` implementation needed to use it.
+
+This allows you to parallelize a test run across a configurable number
+of worker processes. While this can speed up CPU-bound test runs, it is
+mainly useful for IO-bound tests that spend most of their time waiting for
+data to arrive from someplace else and can benefit from cocncurrency.
+
+Unix only.
+"""
+
+import os
+import sys
+import traceback
+import unittest
+from itertools import cycle
+from multiprocessing import cpu_count
+
+from subunit import ProtocolTestCase, TestProtocolClient
+from subunit.test_results import AutoTimingTestResultDecorator
+
+from testtools import ConcurrentTestSuite, iterate_tests
+
+
+_all__ = [
+ 'ConcurrentTestSuite',
+ 'fork_for_tests',
+ 'partition_tests',
+]
+
+
+CPU_COUNT = cpu_count()
+
+
+def fork_for_tests(concurrency_num=CPU_COUNT):
+ """Implementation of `make_tests` used to construct `ConcurrentTestSuite`.
+
+ :param concurrency_num: number of processes to use.
+ """
+ def do_fork(suite):
+ """Take suite and start up multiple runners by forking (Unix only).
+
+ :param suite: TestSuite object.
+
+ :return: An iterable of TestCase-like objects which can each have
+ run(result) called on them to feed tests to result.
+ """
+ result = []
+ test_blocks = partition_tests(suite, concurrency_num)
+ # Clear the tests from the original suite so it doesn't keep them alive
+ suite._tests[:] = []
+ for process_tests in test_blocks:
+ process_suite = unittest.TestSuite(process_tests)
+ # Also clear each split list so new suite has only reference
+ process_tests[:] = []
+ c2pread, c2pwrite = os.pipe()
+ pid = os.fork()
+ if pid == 0:
+ try:
+ stream = os.fdopen(c2pwrite, 'wb', 1)
+ os.close(c2pread)
+ # Leave stderr and stdout open so we can see test noise
+ # Close stdin so that the child goes away if it decides to
+ # read from stdin (otherwise its a roulette to see what
+ # child actually gets keystrokes for pdb etc).
+ sys.stdin.close()
+ subunit_result = AutoTimingTestResultDecorator(
+ TestProtocolClient(stream)
+ )
+ process_suite.run(subunit_result)
+ except:
+ # Try and report traceback on stream, but exit with error
+ # even if stream couldn't be created or something else
+ # goes wrong. The traceback is formatted to a string and
+ # written in one go to avoid interleaving lines from
+ # multiple failing children.
+ try:
+ stream.write(traceback.format_exc())
+ finally:
+ os._exit(1)
+ os._exit(0)
+ else:
+ os.close(c2pwrite)
+ stream = os.fdopen(c2pread, 'rb', 1)
+ test = ProtocolTestCase(stream)
+ result.append(test)
+ return result
+ return do_fork
+
+
+def partition_tests(suite, count):
+ """Partition suite into count lists of tests."""
+ # This just assigns tests in a round-robin fashion. On one hand this
+ # splits up blocks of related tests that might run faster if they shared
+ # resources, but on the other it avoids assigning blocks of slow tests to
+ # just one partition. So the slowest partition shouldn't be much slower
+ # than the fastest.
+ partitions = [list() for _ in range(count)]
+ tests = iterate_tests(suite)
+ for partition, test in zip(cycle(partitions), tests):
+ partition.append(test)
+ return partitions
+
+
+if __name__ == '__main__':
+ import time
+
+ class SampleTestCase(unittest.TestCase):
+ """Dummy tests that sleep for demo."""
+
+ def test_me_1(self):
+ time.sleep(0.5)
+
+ def test_me_2(self):
+ time.sleep(0.5)
+
+ def test_me_3(self):
+ time.sleep(0.5)
+
+ def test_me_4(self):
+ time.sleep(0.5)
+
+ # Load tests from SampleTestCase defined above
+ suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
+ runner = unittest.TextTestRunner()
+
+ # Run tests sequentially
+ runner.run(suite)
+
+ # Run same tests across 4 processes
+ concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(4))
+ runner.run(concurrent_suite)
diff --git a/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/setup.cfg b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/setup.cfg
new file mode 100644
index 00000000000..861a9f55426
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/setup.py b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/setup.py
new file mode 100644
index 00000000000..447dc4110fe
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/concurrencytest-0.1.2/setup.py
@@ -0,0 +1,33 @@
+
+"""setup/install script for concurrencytest"""
+
+
+import os
+from setuptools import setup
+
+
+setup(
+ name='concurrencytest',
+ version='0.1.2',
+ py_modules=['concurrencytest'],
+ install_requires=['python-subunit', 'testtools'],
+ author='Corey Goldberg',
+ author_email='cgoldberg _at_ gmail.com',
+ description='testtools extension for running unittest suites concurrently',
+ url='https://github.com/cgoldberg/concurrencytest',
+ download_url='http://pypi.python.org/pypi/concurrencytest',
+ keywords='test testtools unittest concurrency parallel'.split(),
+ license='GNU GPLv3',
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
+ 'Operating System :: POSIX',
+ 'Operating System :: Unix',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 3',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: Software Development :: Testing',
+ ]
+)
diff --git a/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/PKG-INFO b/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/PKG-INFO
new file mode 100644
index 00000000000..3bcd1661643
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/PKG-INFO
@@ -0,0 +1,164 @@
+Metadata-Version: 1.0
+Name: discover
+Version: 0.4.0
+Summary: Test discovery for unittest. Backported from Python 2.7 for Python 2.4+
+Home-page: http://pypi.python.org/pypi/discover/
+Author: Michael Foord
+Author-email: michael@voidspace.org.uk
+License: UNKNOWN
+Description: This is the test discovery mechanism and ``load_tests`` protocol for unittest
+ backported from Python 2.7 to work with Python 2.4 or more recent (including
+ Python 3).
+
+ .. note::
+
+ Test discovery is just part of what is new in unittest in Python 2.7. All
+ of the new features have been backported to run on Python 2.4-2.6, including
+ test discovery. This is the
+ `unittest2 package <http://pypi.python.org/pypi/unittest2>`_.
+
+ discover can be installed with pip or easy_install. After installing switch the
+ current directory to the top level directory of your project and run::
+
+ python -m discover
+ python discover.py
+
+ (If you have setuptools or `distribute <http://pypi.python.org/pypi/distribute>`_
+ installed you will also have a ``discover`` script available.)
+
+ This will discover all tests (with certain restrictions) from the current
+ directory. The discover module has several options to control its behavior (full
+ usage options are displayed with ``python -m discover -h``)::
+
+ Usage: discover.py [options]
+
+ Options:
+ -v, --verbose Verbose output
+ -s directory Directory to start discovery ('.' default)
+ -p pattern Pattern to match test files ('test*.py' default)
+ -t directory Top level directory of project (default to
+ start directory)
+
+ For test discovery all test modules must be importable from the top
+ level directory of the project.
+
+ For example to use a different pattern for matching test modules run::
+
+ python -m discover -p '*test.py'
+
+ (For UNIX-like shells like Bash you need to put quotes around the test pattern
+ or the shell will attempt to expand the pattern instead of passing it through to
+ discover. On Windows you must *not* put quotes around the pattern as the
+ Windows shell will pass the quotes to discover as well.)
+
+ Test discovery is implemented in ``discover.DiscoveringTestLoader.discover``. As
+ well as using discover as a command line script you can import
+ ``DiscoveringTestLoader``, which is a subclass of ``unittest.TestLoader``, and
+ use it in your test framework.
+
+ This method finds and returns all test modules from the specified start
+ directory, recursing into subdirectories to find them. Only test files that
+ match *pattern* will be loaded. (Using shell style pattern matching.)
+
+ All test modules must be importable from the top level of the project. If
+ the start directory is not the top level directory then the top level
+ directory must be specified separately.
+
+ The ``load_tests`` protocol allows test modules and packages to customize how
+ they are loaded. This is implemented in
+ ``discover.DiscoveringTestLoader.loadTestsFromModule``. If a test module defines
+ a ``load_tests`` function then tests are loaded from the module by calling
+ ``load_tests`` with three arguments: `loader`, `standard_tests`, `None`.
+
+ If a test package name (directory with `__init__.py`) matches the
+ pattern then the package will be checked for a ``load_tests``
+ function. If this exists then it will be called with *loader*, *tests*,
+ *pattern*.
+
+ .. note::
+
+ The default pattern for matching tests is ``test*.py``. The '.py' means
+ that it will match test files and *not* match package names. You can
+ change this by changing the pattern using a command line option like
+ ``-p 'test*'``.
+
+ If ``load_tests`` exists then discovery does *not* recurse into the package,
+ ``load_tests`` is responsible for loading all tests in the package.
+
+ The pattern is deliberately not stored as a loader attribute so that
+ packages can continue discovery themselves. *top_level_dir* is stored so
+ ``load_tests`` does not need to pass this argument in to
+ ``loader.discover()``.
+
+ discover.py is maintained in a google code project (where bugs and feature
+ requests should be posted): http://code.google.com/p/unittest-ext/
+
+ The latest development version of discover.py can be found at:
+ http://code.google.com/p/unittest-ext/source/browse/trunk/discover.py
+
+
+ CHANGELOG
+ =========
+
+
+ 2010/06/11 0.4.0
+ ----------------
+
+ * Addition of a setuptools compatible test collector. Set
+ "test_suite = 'discover.collector'" in setup.py. "setup.py test" will start
+ test discovery with default parameters from the same directory as the setup.py.
+ * Allow test discovery using dotted module names instead of a path.
+ * Addition of a setuptools compatible entrypoint for the discover script.
+ * A faulty load_tests function will not halt test discovery. A failing test
+ is created to report the error.
+ * If test discovery imports a module from the wrong location (usually because
+ the module is globally installed and the user is expecting to run tests
+ against a development version in a different location) then discovery halts
+ with an ImportError and the problem is reported.
+ * Matching files during test discovery is done in
+ ``DiscoveringTestLoader._match_path``. This method can be overriden in
+ subclasses to, for example, match on the full file path or use regular
+ expressions for matching.
+ * Tests for discovery ported from unittest2. (The tests require unittest2 to
+ run.)
+
+ Feature parity with the ``TestLoader`` in Python 2.7 RC 1.
+
+
+ 2010/02/07 0.3.2
+ ----------------
+
+ * If ``load_tests`` exists it is passed the standard tests as a ``TestSuite``
+ rather than a list of tests.
+
+ 2009/09/13 0.3.1
+ ----------------
+
+ * Fixed a problem when a package directory matches the discovery pattern.
+
+ 2009/08/20 0.3.0
+ ----------------
+
+ * Failing to import a file (e.g. due to a syntax error) no longer halts
+ discovery but is reported as a failure.
+ * Discovery will not attempt to import test files whose names are not valid Python
+ identifiers, even if they match the pattern.
+Keywords: unittest,testing,tests
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.0
+Classifier: Programming Language :: Python :: 3.1
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Software Development :: Testing
diff --git a/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/README.txt b/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/README.txt
new file mode 100644
index 00000000000..8e6b4e11803
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/README.txt
@@ -0,0 +1,137 @@
+This is the test discovery mechanism and ``load_tests`` protocol for unittest
+backported from Python 2.7 to work with Python 2.4 or more recent (including
+Python 3).
+
+.. note::
+
+ Test discovery is just part of what is new in unittest in Python 2.7. All
+ of the new features have been backported to run on Python 2.4-2.6, including
+ test discovery. This is the
+ `unittest2 package <http://pypi.python.org/pypi/unittest2>`_.
+
+discover can be installed with pip or easy_install. After installing switch the
+current directory to the top level directory of your project and run::
+
+ python -m discover
+ python discover.py
+
+(If you have setuptools or `distribute <http://pypi.python.org/pypi/distribute>`_
+installed you will also have a ``discover`` script available.)
+
+This will discover all tests (with certain restrictions) from the current
+directory. The discover module has several options to control its behavior (full
+usage options are displayed with ``python -m discover -h``)::
+
+ Usage: discover.py [options]
+
+ Options:
+ -v, --verbose Verbose output
+ -s directory Directory to start discovery ('.' default)
+ -p pattern Pattern to match test files ('test*.py' default)
+ -t directory Top level directory of project (default to
+ start directory)
+
+ For test discovery all test modules must be importable from the top
+ level directory of the project.
+
+For example to use a different pattern for matching test modules run::
+
+ python -m discover -p '*test.py'
+
+(For UNIX-like shells like Bash you need to put quotes around the test pattern
+or the shell will attempt to expand the pattern instead of passing it through to
+discover. On Windows you must *not* put quotes around the pattern as the
+Windows shell will pass the quotes to discover as well.)
+
+Test discovery is implemented in ``discover.DiscoveringTestLoader.discover``. As
+well as using discover as a command line script you can import
+``DiscoveringTestLoader``, which is a subclass of ``unittest.TestLoader``, and
+use it in your test framework.
+
+This method finds and returns all test modules from the specified start
+directory, recursing into subdirectories to find them. Only test files that
+match *pattern* will be loaded. (Using shell style pattern matching.)
+
+All test modules must be importable from the top level of the project. If
+the start directory is not the top level directory then the top level
+directory must be specified separately.
+
+The ``load_tests`` protocol allows test modules and packages to customize how
+they are loaded. This is implemented in
+``discover.DiscoveringTestLoader.loadTestsFromModule``. If a test module defines
+a ``load_tests`` function then tests are loaded from the module by calling
+``load_tests`` with three arguments: `loader`, `standard_tests`, `None`.
+
+If a test package name (directory with `__init__.py`) matches the
+pattern then the package will be checked for a ``load_tests``
+function. If this exists then it will be called with *loader*, *tests*,
+*pattern*.
+
+.. note::
+
+ The default pattern for matching tests is ``test*.py``. The '.py' means
+ that it will match test files and *not* match package names. You can
+ change this by changing the pattern using a command line option like
+ ``-p 'test*'``.
+
+If ``load_tests`` exists then discovery does *not* recurse into the package,
+``load_tests`` is responsible for loading all tests in the package.
+
+The pattern is deliberately not stored as a loader attribute so that
+packages can continue discovery themselves. *top_level_dir* is stored so
+``load_tests`` does not need to pass this argument in to
+``loader.discover()``.
+
+discover.py is maintained in a google code project (where bugs and feature
+requests should be posted): http://code.google.com/p/unittest-ext/
+
+The latest development version of discover.py can be found at:
+http://code.google.com/p/unittest-ext/source/browse/trunk/discover.py
+
+
+CHANGELOG
+=========
+
+
+2010/06/11 0.4.0
+----------------
+
+* Addition of a setuptools compatible test collector. Set
+ "test_suite = 'discover.collector'" in setup.py. "setup.py test" will start
+ test discovery with default parameters from the same directory as the setup.py.
+* Allow test discovery using dotted module names instead of a path.
+* Addition of a setuptools compatible entrypoint for the discover script.
+* A faulty load_tests function will not halt test discovery. A failing test
+ is created to report the error.
+* If test discovery imports a module from the wrong location (usually because
+ the module is globally installed and the user is expecting to run tests
+ against a development version in a different location) then discovery halts
+ with an ImportError and the problem is reported.
+* Matching files during test discovery is done in
+ ``DiscoveringTestLoader._match_path``. This method can be overriden in
+ subclasses to, for example, match on the full file path or use regular
+ expressions for matching.
+* Tests for discovery ported from unittest2. (The tests require unittest2 to
+ run.)
+
+Feature parity with the ``TestLoader`` in Python 2.7 RC 1.
+
+
+2010/02/07 0.3.2
+----------------
+
+* If ``load_tests`` exists it is passed the standard tests as a ``TestSuite``
+ rather than a list of tests.
+
+2009/09/13 0.3.1
+----------------
+
+* Fixed a problem when a package directory matches the discovery pattern.
+
+2009/08/20 0.3.0
+----------------
+
+* Failing to import a file (e.g. due to a syntax error) no longer halts
+ discovery but is reported as a failure.
+* Discovery will not attempt to import test files whose names are not valid Python
+ identifiers, even if they match the pattern. \ No newline at end of file
diff --git a/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/discover.py b/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/discover.py
new file mode 100755
index 00000000000..c1e20273bcb
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/discover.py
@@ -0,0 +1,480 @@
+#! /usr/bin/env python
+# Copyright Michael Foord 2009-2010
+# discover.py
+# Test discovery for unittest
+# Compatible with Python 2.4-2.6 and 3.0-3.1
+# Licensed under the BSD License
+# See: http://pypi.python.org/pypi/discover
+
+import optparse
+import os
+import re
+import sys
+import traceback
+import types
+import unittest
+
+from fnmatch import fnmatch
+
+__version__ = '0.4.0'
+__all__ = ['DiscoveringTestLoader', 'main', 'defaultTestLoader']
+
+
+if hasattr(types, 'ClassType'):
+ class_types = (types.ClassType, type)
+else:
+ # for Python 3.0 compatibility
+ class_types = type
+
+def _CmpToKey(mycmp):
+ 'Convert a cmp= function into a key= function'
+ class K(object):
+ def __init__(self, obj):
+ self.obj = obj
+ def __lt__(self, other):
+ return mycmp(self.obj, other.obj) == -1
+ return K
+
+try:
+ from types import UnboundMethodType
+except ImportError:
+ # Python 3 compatibility
+ UnboundMethodType = types.FunctionType
+
+# what about .pyc or .pyo (etc)
+# we would need to avoid loading the same tests multiple times
+# from '.py', '.pyc' *and* '.pyo'
+VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
+
+
+def _make_failed_import_test(name, suiteClass):
+ message = 'Failed to import test module: %s' % name
+ if hasattr(traceback, 'format_exc'):
+ # Python 2.3 compatibility
+ # format_exc returns two frames of discover.py as well
+ message += '\n%s' % traceback.format_exc()
+ return _make_failed_test('ModuleImportFailure', name, ImportError(message),
+ suiteClass)
+
+def _make_failed_load_tests(name, exception, suiteClass):
+ return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
+
+def _make_failed_test(classname, methodname, exception, suiteClass):
+ def testFailure(self):
+ raise exception
+ attrs = {methodname: testFailure}
+ TestClass = type(classname, (unittest.TestCase,), attrs)
+ return suiteClass((TestClass(methodname),))
+
+try:
+ cmp
+except NameError:
+ @staticmethod
+ def cmp(x, y):
+ """Return -1 if x < y, 0 if x == y and 1 if x > y"""
+ return (x > y) - (x < y)
+
+
+class DiscoveringTestLoader(unittest.TestLoader):
+ """
+ This class is responsible for loading tests according to various criteria
+ and returning them wrapped in a TestSuite
+ """
+ testMethodPrefix = 'test'
+ sortTestMethodsUsing = cmp
+ suiteClass = unittest.TestSuite
+ _top_level_dir = None
+
+ def loadTestsFromTestCase(self, testCaseClass):
+ """Return a suite of all tests cases contained in testCaseClass"""
+ if issubclass(testCaseClass, unittest.TestSuite):
+ raise TypeError("Test cases should not be derived from TestSuite."
+ " Maybe you meant to derive from TestCase?")
+ testCaseNames = self.getTestCaseNames(testCaseClass)
+ if not testCaseNames and hasattr(testCaseClass, 'runTest'):
+ testCaseNames = ['runTest']
+ loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
+ return loaded_suite
+
+ def loadTestsFromModule(self, module, use_load_tests=True):
+ """Return a suite of all tests cases contained in the given module"""
+ tests = []
+ for name in dir(module):
+ obj = getattr(module, name)
+ if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
+ tests.append(self.loadTestsFromTestCase(obj))
+
+ load_tests = getattr(module, 'load_tests', None)
+ tests = self.suiteClass(tests)
+ if use_load_tests and load_tests is not None:
+ try:
+ return load_tests(self, tests, None)
+ except:
+ ExceptionClass, e = sys.exc_info()[:2]
+ if not isinstance(e, Exception):
+ # for BaseException exceptions
+ raise
+ return _make_failed_load_tests(module.__name__, e,
+ self.suiteClass)
+ return tests
+
+ def loadTestsFromName(self, name, module=None):
+ """Return a suite of all tests cases given a string specifier.
+
+ The name may resolve either to a module, a test case class, a
+ test method within a test case class, or a callable object which
+ returns a TestCase or TestSuite instance.
+
+ The method optionally resolves the names relative to a given module.
+ """
+ parts = name.split('.')
+ if module is None:
+ parts_copy = parts[:]
+ while parts_copy:
+ try:
+ module = __import__('.'.join(parts_copy))
+ break
+ except ImportError:
+ del parts_copy[-1]
+ if not parts_copy:
+ raise
+ parts = parts[1:]
+ obj = module
+ for part in parts:
+ parent, obj = obj, getattr(obj, part)
+
+ if isinstance(obj, types.ModuleType):
+ return self.loadTestsFromModule(obj)
+ elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
+ return self.loadTestsFromTestCase(obj)
+ elif (isinstance(obj, UnboundMethodType) and
+ isinstance(parent, type) and
+ issubclass(parent, unittest.TestCase)):
+ name = obj.__name__
+ inst = parent(name)
+ # static methods follow a different path
+ if not isinstance(getattr(inst, name), types.FunctionType):
+ return self.suiteClass([inst])
+ elif isinstance(obj, unittest.TestSuite):
+ return obj
+ if hasattr(obj, '__call__'):
+ test = obj()
+ if isinstance(test, unittest.TestSuite):
+ return test
+ elif isinstance(test, unittest.TestCase):
+ return self.suiteClass([test])
+ else:
+ raise TypeError("calling %s returned %s, not a test" %
+ (obj, test))
+ else:
+ raise TypeError("don't know how to make test from: %s" % obj)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Return a suite of all tests cases found using the given sequence
+ of string specifiers. See 'loadTestsFromName()'.
+ """
+ suites = [self.loadTestsFromName(name, module) for name in names]
+ return self.suiteClass(suites)
+
+ def getTestCaseNames(self, testCaseClass):
+ """Return a sorted sequence of method names found within testCaseClass
+ """
+ def isTestMethod(attrname, testCaseClass=testCaseClass,
+ prefix=self.testMethodPrefix):
+ return attrname.startswith(prefix) and \
+ hasattr(getattr(testCaseClass, attrname), '__call__')
+ testFnNames = list(filter(isTestMethod, dir(testCaseClass)))
+ if self.sortTestMethodsUsing:
+ testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
+ return testFnNames
+
+ def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
+ """Find and return all test modules from the specified start
+ directory, recursing into subdirectories to find them. Only test files
+ that match the pattern will be loaded. (Using shell style pattern
+ matching.)
+
+ All test modules must be importable from the top level of the project.
+ If the start directory is not the top level directory then the top
+ level directory must be specified separately.
+
+ If a test package name (directory with '__init__.py') matches the
+ pattern then the package will be checked for a 'load_tests' function. If
+ this exists then it will be called with loader, tests, pattern.
+
+ If load_tests exists then discovery does *not* recurse into the package,
+ load_tests is responsible for loading all tests in the package.
+
+ The pattern is deliberately not stored as a loader attribute so that
+ packages can continue discovery themselves. top_level_dir is stored so
+ load_tests does not need to pass this argument in to loader.discover().
+ """
+ set_implicit_top = False
+ if top_level_dir is None and self._top_level_dir is not None:
+ # make top_level_dir optional if called from load_tests in a package
+ top_level_dir = self._top_level_dir
+ elif top_level_dir is None:
+ set_implicit_top = True
+ top_level_dir = start_dir
+
+ top_level_dir = os.path.abspath(top_level_dir)
+
+ if not top_level_dir in sys.path:
+ # all test modules must be importable from the top level directory
+ # should we *unconditionally* put the start directory in first
+ # in sys.path to minimise likelihood of conflicts between installed
+ # modules and development versions?
+ sys.path.insert(0, top_level_dir)
+ self._top_level_dir = top_level_dir
+
+ is_not_importable = False
+ if os.path.isdir(os.path.abspath(start_dir)):
+ start_dir = os.path.abspath(start_dir)
+ if start_dir != top_level_dir:
+ is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
+ else:
+ # support for discovery from dotted module names
+ try:
+ __import__(start_dir)
+ except ImportError:
+ is_not_importable = True
+ else:
+ the_module = sys.modules[start_dir]
+ top_part = start_dir.split('.')[0]
+ start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
+ if set_implicit_top:
+ self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
+ sys.path.remove(top_level_dir)
+
+ if is_not_importable:
+ raise ImportError('Start directory is not importable: %r' % start_dir)
+
+ tests = list(self._find_tests(start_dir, pattern))
+ return self.suiteClass(tests)
+
+ def _get_name_from_path(self, path):
+ path = os.path.splitext(os.path.normpath(path))[0]
+
+ _relpath = relpath(path, self._top_level_dir)
+ assert not os.path.isabs(_relpath), "Path must be within the project"
+ assert not _relpath.startswith('..'), "Path must be within the project"
+
+ name = _relpath.replace(os.path.sep, '.')
+ return name
+
+ def _get_module_from_name(self, name):
+ __import__(name)
+ return sys.modules[name]
+
+ def _match_path(self, path, full_path, pattern):
+ # override this method to use alternative matching strategy
+ return fnmatch(path, pattern)
+
+ def _find_tests(self, start_dir, pattern):
+ """Used by discovery. Yields test suites it loads."""
+ paths = os.listdir(start_dir)
+
+ for path in paths:
+ full_path = os.path.join(start_dir, path)
+ if os.path.isfile(full_path):
+ if not VALID_MODULE_NAME.match(path):
+ # valid Python identifiers only
+ continue
+ if not self._match_path(path, full_path, pattern):
+ continue
+ # if the test file matches, load it
+ name = self._get_name_from_path(full_path)
+ try:
+ module = self._get_module_from_name(name)
+ except:
+ yield _make_failed_import_test(name, self.suiteClass)
+ else:
+ mod_file = os.path.abspath(getattr(module, '__file__', full_path))
+ realpath = os.path.splitext(mod_file)[0]
+ fullpath_noext = os.path.splitext(full_path)[0]
+ if realpath.lower() != fullpath_noext.lower():
+ module_dir = os.path.dirname(realpath)
+ mod_name = os.path.splitext(os.path.basename(full_path))[0]
+ expected_dir = os.path.dirname(full_path)
+ msg = ("%r module incorrectly imported from %r. Expected %r. "
+ "Is this module globally installed?")
+ raise ImportError(msg % (mod_name, module_dir, expected_dir))
+ yield self.loadTestsFromModule(module)
+ elif os.path.isdir(full_path):
+ if not os.path.isfile(os.path.join(full_path, '__init__.py')):
+ continue
+
+ load_tests = None
+ tests = None
+ if fnmatch(path, pattern):
+ # only check load_tests if the package directory itself matches the filter
+ name = self._get_name_from_path(full_path)
+ package = self._get_module_from_name(name)
+ load_tests = getattr(package, 'load_tests', None)
+ tests = self.loadTestsFromModule(package, use_load_tests=False)
+
+ if load_tests is None:
+ if tests is not None:
+ # tests loaded from package file
+ yield tests
+ # recurse into the package
+ for test in self._find_tests(full_path, pattern):
+ yield test
+ else:
+ try:
+ yield load_tests(self, tests, pattern)
+ except:
+ ExceptionClass, e = sys.exc_info()[:2]
+ if not isinstance(e, Exception):
+ # for BaseException exceptions
+ raise
+ yield _make_failed_load_tests(package.__name__, e,
+ self.suiteClass)
+
+
+##############################################
+# relpath implementation taken from Python 2.7
+
+if not hasattr(os.path, 'relpath'):
+ if os.path is sys.modules.get('ntpath'):
+ def relpath(path, start=os.path.curdir):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+ start_list = os.path.abspath(start).split(os.path.sep)
+ path_list = os.path.abspath(path).split(os.path.sep)
+ if start_list[0].lower() != path_list[0].lower():
+ unc_path, rest = os.path.splitunc(path)
+ unc_start, rest = os.path.splitunc(start)
+ if bool(unc_path) ^ bool(unc_start):
+ raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
+ % (path, start))
+ else:
+ raise ValueError("path is on drive %s, start on drive %s"
+ % (path_list[0], start_list[0]))
+ # Work out how much of the filepath is shared by start and path.
+ for i in range(min(len(start_list), len(path_list))):
+ if start_list[i].lower() != path_list[i].lower():
+ break
+ else:
+ i += 1
+
+ rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return os.path.curdir
+ return os.path.join(*rel_list)
+
+ else:
+ # default to posixpath definition
+ def relpath(path, start=os.path.curdir):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+
+ start_list = os.path.abspath(start).split(os.path.sep)
+ path_list = os.path.abspath(path).split(os.path.sep)
+
+ # Work out how much of the filepath is shared by start and path.
+ i = len(os.path.commonprefix([start_list, path_list]))
+
+ rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return os.path.curdir
+ return os.path.join(*rel_list)
+else:
+ from os.path import relpath
+
+#############################################
+
+
+USAGE = """\
+Usage: discover.py [options]
+
+Options:
+ -v, --verbose Verbose output
+ -s directory Directory to start discovery ('.' default)
+ -p pattern Pattern to match test files ('test*.py' default)
+ -t directory Top level directory of project (default to
+ start directory)
+
+For test discovery all test modules must be importable from the top
+level directory of the project.
+"""
+
+def _usage_exit(msg=None):
+ if msg:
+ print (msg)
+ print (USAGE)
+ sys.exit(2)
+
+
+def _do_discovery(argv, verbosity, Loader):
+ # handle command line args for test discovery
+ parser = optparse.OptionParser()
+ parser.add_option('-v', '--verbose', dest='verbose', default=False,
+ help='Verbose output', action='store_true')
+ parser.add_option('-s', '--start-directory', dest='start', default='.',
+ help="Directory to start discovery ('.' default)")
+ parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
+ help="Pattern to match tests ('test*.py' default)")
+ parser.add_option('-t', '--top-level-directory', dest='top', default=None,
+ help='Top level directory of project (defaults to start directory)')
+
+ options, args = parser.parse_args(argv)
+ if len(args) > 3:
+ _usage_exit()
+
+ for name, value in zip(('start', 'pattern', 'top'), args):
+ setattr(options, name, value)
+
+ if options.verbose:
+ verbosity = 2
+
+ start_dir = options.start
+ pattern = options.pattern
+ top_level_dir = options.top
+
+ loader = Loader()
+ return loader.discover(start_dir, pattern, top_level_dir), verbosity
+
+
+def _run_tests(tests, testRunner, verbosity, exit):
+ if isinstance(testRunner, class_types):
+ try:
+ testRunner = testRunner(verbosity=verbosity)
+ except TypeError:
+ # didn't accept the verbosity argument
+ testRunner = testRunner()
+ result = testRunner.run(tests)
+ if exit:
+ sys.exit(not result.wasSuccessful())
+ return result
+
+
+def main(argv=None, testRunner=None, testLoader=None, exit=True, verbosity=1):
+ if testLoader is None:
+ testLoader = DiscoveringTestLoader
+ if testRunner is None:
+ testRunner = unittest.TextTestRunner
+ if argv is None:
+ argv = sys.argv[1:]
+
+ tests, verbosity = _do_discovery(argv, verbosity, testLoader)
+ return _run_tests(tests, testRunner, verbosity, exit)
+
+defaultTestLoader = DiscoveringTestLoader()
+
+def collector():
+ # import __main__ triggers code re-execution
+ __main__ = sys.modules['__main__']
+ setupDir = os.path.abspath(os.path.dirname(__main__.__file__))
+ return defaultTestLoader.discover(setupDir)
+
+if __name__ == '__main__':
+ if sys.argv[0] is None:
+ # fix for weird behaviour when run with python -m
+ # from a zipped egg.
+ sys.argv[0] = 'discover.py'
+ main()
diff --git a/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/setup.cfg b/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/setup.cfg
new file mode 100644
index 00000000000..51964789b3d
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/setup.cfg
@@ -0,0 +1,2 @@
+[sdist]
+force-manifest = 1 \ No newline at end of file
diff --git a/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/setup.py b/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/setup.py
new file mode 100644
index 00000000000..2a0a0e7c218
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/discover-0.4.0/setup.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+# setup.py
+# Install script for discover.py
+# Copyright (C) 2009-2010 Michael Foord
+# E-mail: michael AT voidspace DOT org DOT uk
+
+# This software is licensed under the terms of the BSD license.
+# http://www.voidspace.org.uk/python/license.shtml
+
+import sys
+from distutils.core import setup
+from discover import __version__ as VERSION
+
+
+NAME = 'discover'
+MODULES = ('discover',)
+DESCRIPTION = 'Test discovery for unittest. Backported from Python 2.7 for Python 2.4+'
+URL = 'http://pypi.python.org/pypi/discover/'
+CLASSIFIERS = [
+ 'Development Status :: 4 - Beta',
+ 'Environment :: Console',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: BSD License',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2.4',
+ 'Programming Language :: Python :: 2.5',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.0',
+ 'Programming Language :: Python :: 3.1',
+ 'Programming Language :: Python :: 3.2',
+ 'Operating System :: OS Independent',
+ 'Topic :: Software Development :: Libraries',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: Software Development :: Testing',
+]
+AUTHOR = 'Michael Foord'
+AUTHOR_EMAIL = 'michael@voidspace.org.uk'
+KEYWORDS = "unittest, testing, tests".split(', ')
+LONG_DESCRIPTION = open('README.txt').read()
+
+
+params = dict(
+ name=NAME,
+ version=VERSION,
+ description=DESCRIPTION,
+ long_description=LONG_DESCRIPTION,
+ author=AUTHOR,
+ author_email=AUTHOR_EMAIL,
+ url=URL,
+ py_modules=MODULES,
+ classifiers=CLASSIFIERS,
+ keywords=KEYWORDS
+)
+
+
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+else:
+ params.update(dict(
+ entry_points = {
+ 'console_scripts': [
+ 'discover = discover:main',
+ ],
+ },
+ ))
+ params['test_suite'] = 'discover.collector'
+
+setup(**params)
+
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/.gitignore b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/.gitignore
new file mode 100644
index 00000000000..cfc114cbe95
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/.gitignore
@@ -0,0 +1,35 @@
+*.py[co]
+
+# Packages
+*.egg
+*.egg-info
+dist
+build
+eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+MANIFEST
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.tox
+
+#Translations
+*.mo
+
+#Mr Developer
+.mr.developer.cfg
+
+# editors
+*.swp
+*~
+
+# Testrepository
+.testrepository
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/LICENSE b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/LICENSE
new file mode 100644
index 00000000000..4dfca452e1a
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/LICENSE
@@ -0,0 +1,26 @@
+Copyright (c) 2010-2012 the extras authors.
+
+The extras authors are:
+ * Jonathan Lange
+ * Martin Pool
+ * Robert Collins
+
+and are collectively referred to as "extras developers".
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/MANIFEST.in b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/MANIFEST.in
new file mode 100644
index 00000000000..da2696e2430
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/MANIFEST.in
@@ -0,0 +1,6 @@
+include LICENSE
+include Makefile
+include MANIFEST.in
+include NEWS
+include README.rst
+include .gitignore
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/Makefile b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/Makefile
new file mode 100644
index 00000000000..270e8d11546
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/Makefile
@@ -0,0 +1,30 @@
+# See README.rst for copyright and licensing details.
+
+PYTHON=python
+SOURCES=$(shell find extras -name "*.py")
+
+check:
+ PYTHONPATH=$(PWD) $(PYTHON) -m testtools.run extras.tests.test_suite
+
+TAGS: ${SOURCES}
+ ctags -e -R extras/
+
+tags: ${SOURCES}
+ ctags -R extras/
+
+clean:
+ rm -f TAGS tags
+ find extras -name "*.pyc" -exec rm '{}' \;
+
+### Documentation ###
+
+apidocs:
+ # pydoctor emits deprecation warnings under Ubuntu 10.10 LTS
+ PYTHONWARNINGS='ignore::DeprecationWarning' \
+ pydoctor --make-html --add-package extras \
+ --docformat=restructuredtext --project-name=extras \
+ --project-url=https://launchpad.net/extras
+
+
+.PHONY: apidocs
+.PHONY: check clean
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/NEWS b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/NEWS
new file mode 100644
index 00000000000..60713b8efa6
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/NEWS
@@ -0,0 +1,27 @@
+extras NEWS
++++++++++++
+
+Changes and improvements to extras_, grouped by release.
+
+NEXT
+~~~~
+
+0.0.3
+~~~~~
+
+* Extras setup.py would break on older testtools releases, which could break
+ installs of newer testtools due to extras then failing to install.
+ (Robert Collins)
+
+0.0.2
+~~~~~
+
+* Fix Makefile to not have cruft leftover from testtools.
+
+0.0.1
+~~~~~
+
+* Initial extraction from testtools.
+
+
+.. _extras: http://pypi.python.org/pypi/extras
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/PKG-INFO b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/PKG-INFO
new file mode 100644
index 00000000000..645b7c7e619
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/PKG-INFO
@@ -0,0 +1,68 @@
+Metadata-Version: 1.1
+Name: extras
+Version: 0.0.3
+Summary: Useful extra bits for Python - things that shold be in the standard library
+Home-page: https://github.com/testing-cabal/extras
+Author: Testing cabal
+Author-email: testtools-dev@lists.launchpad.net
+License: UNKNOWN
+Description: ======
+ extras
+ ======
+
+ extras is a set of extensions to the Python standard library, originally
+ written to make the code within testtools cleaner, but now split out for
+ general use outside of a testing context.
+
+
+ Documentation
+ -------------
+
+ pydoc extras is your friend. extras currently contains the following functions:
+
+ * try_import
+
+ * try_imports
+
+ * safe_hasattr
+
+ Which do what their name suggests.
+
+
+ Licensing
+ ---------
+
+ This project is distributed under the MIT license and copyright is owned by
+ the extras authors. See LICENSE for details.
+
+
+ Required Dependencies
+ ---------------------
+
+ * Python 2.6+ or 3.0+
+
+
+ Bug reports and patches
+ -----------------------
+
+ Please report bugs using github issues at <https://github.com/testing-cabal/extras>.
+ Patches can also be submitted via github. You can mail the authors directly
+ via the mailing list testtools-dev@lists.launchpad.net. (Note that Launchpad
+ discards email from unknown addresses - be sure to sign up for a Launchpad
+ account before mailing the list, or your mail will be silently discarded).
+
+
+ History
+ -------
+
+ extras used to be testtools.helpers, and was factored out when folk wanted to
+ use it separately.
+
+
+ Thanks
+ ------
+
+ * Martin Pool
+
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: MIT License
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/README.rst b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/README.rst
new file mode 100644
index 00000000000..7d3f10ba93c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/README.rst
@@ -0,0 +1,57 @@
+======
+extras
+======
+
+extras is a set of extensions to the Python standard library, originally
+written to make the code within testtools cleaner, but now split out for
+general use outside of a testing context.
+
+
+Documentation
+-------------
+
+pydoc extras is your friend. extras currently contains the following functions:
+
+* try_import
+
+* try_imports
+
+* safe_hasattr
+
+Which do what their name suggests.
+
+
+Licensing
+---------
+
+This project is distributed under the MIT license and copyright is owned by
+the extras authors. See LICENSE for details.
+
+
+Required Dependencies
+---------------------
+
+ * Python 2.6+ or 3.0+
+
+
+Bug reports and patches
+-----------------------
+
+Please report bugs using github issues at <https://github.com/testing-cabal/extras>.
+Patches can also be submitted via github. You can mail the authors directly
+via the mailing list testtools-dev@lists.launchpad.net. (Note that Launchpad
+discards email from unknown addresses - be sure to sign up for a Launchpad
+account before mailing the list, or your mail will be silently discarded).
+
+
+History
+-------
+
+extras used to be testtools.helpers, and was factored out when folk wanted to
+use it separately.
+
+
+Thanks
+------
+
+ * Martin Pool
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/__init__.py b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/__init__.py
new file mode 100644
index 00000000000..2d34b5258de
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/__init__.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2010-2012 extras developers. See LICENSE for details.
+
+"""Extensions to the Python standard library."""
+
+import sys
+
+__all__ = [
+ 'safe_hasattr',
+ 'try_import',
+ 'try_imports',
+ ]
+
+# same format as sys.version_info: "A tuple containing the five components of
+# the version number: major, minor, micro, releaselevel, and serial. All
+# values except releaselevel are integers; the release level is 'alpha',
+# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
+# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
+# releaselevel of 'dev' for unreleased under-development code.
+#
+# If the releaselevel is 'alpha' then the major/minor/micro components are not
+# established at this point, and setup.py will use a version of next-$(revno).
+# If the releaselevel is 'final', then the tarball will be major.minor.micro.
+# Otherwise it is major.minor.micro~$(revno).
+
+__version__ = (0, 0, 3, 'final', 0)
+
+
+def try_import(name, alternative=None, error_callback=None):
+ """Attempt to import ``name``. If it fails, return ``alternative``.
+
+ When supporting multiple versions of Python or optional dependencies, it
+ is useful to be able to try to import a module.
+
+ :param name: The name of the object to import, e.g. ``os.path`` or
+ ``os.path.join``.
+ :param alternative: The value to return if no module can be imported.
+ Defaults to None.
+ :param error_callback: If non-None, a callable that is passed the ImportError
+ when the module cannot be loaded.
+ """
+ module_segments = name.split('.')
+ last_error = None
+ while module_segments:
+ module_name = '.'.join(module_segments)
+ try:
+ module = __import__(module_name)
+ except ImportError:
+ last_error = sys.exc_info()[1]
+ module_segments.pop()
+ continue
+ else:
+ break
+ else:
+ if last_error is not None and error_callback is not None:
+ error_callback(last_error)
+ return alternative
+ nonexistent = object()
+ for segment in name.split('.')[1:]:
+ module = getattr(module, segment, nonexistent)
+ if module is nonexistent:
+ if last_error is not None and error_callback is not None:
+ error_callback(last_error)
+ return alternative
+ return module
+
+
+_RAISE_EXCEPTION = object()
+def try_imports(module_names, alternative=_RAISE_EXCEPTION, error_callback=None):
+ """Attempt to import modules.
+
+ Tries to import the first module in ``module_names``. If it can be
+ imported, we return it. If not, we go on to the second module and try
+ that. The process continues until we run out of modules to try. If none
+ of the modules can be imported, either raise an exception or return the
+ provided ``alternative`` value.
+
+ :param module_names: A sequence of module names to try to import.
+ :param alternative: The value to return if no module can be imported.
+ If unspecified, we raise an ImportError.
+ :param error_callback: If None, called with the ImportError for *each*
+ module that fails to load.
+ :raises ImportError: If none of the modules can be imported and no
+ alternative value was specified.
+ """
+ module_names = list(module_names)
+ for module_name in module_names:
+ module = try_import(module_name, error_callback=error_callback)
+ if module:
+ return module
+ if alternative is _RAISE_EXCEPTION:
+ raise ImportError(
+ "Could not import any of: %s" % ', '.join(module_names))
+ return alternative
+
+
+def safe_hasattr(obj, attr, _marker=object()):
+ """Does 'obj' have an attribute 'attr'?
+
+ Use this rather than built-in hasattr, as the built-in swallows exceptions
+ in some versions of Python and behaves unpredictably with respect to
+ properties.
+ """
+ return getattr(obj, attr, _marker) is not _marker
+
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/tests/__init__.py b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/tests/__init__.py
new file mode 100644
index 00000000000..e0d7d4a34d6
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/tests/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2010-2012 extras developers. See LICENSE for details.
+
+"""Tests for extras."""
+
+from unittest import TestSuite, TestLoader
+
+
+def test_suite():
+ from extras.tests import (
+ test_extras,
+ )
+ modules = [
+ test_extras,
+ ]
+ loader = TestLoader()
+ suites = map(loader.loadTestsFromModule, modules)
+ return TestSuite(suites)
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/tests/test_extras.py b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/tests/test_extras.py
new file mode 100644
index 00000000000..be1ed1c69f6
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/tests/test_extras.py
@@ -0,0 +1,188 @@
+# Copyright (c) 2010-2012 extras developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.matchers import (
+ Equals,
+ Is,
+ Not,
+ )
+
+from extras import (
+ safe_hasattr,
+ try_import,
+ try_imports,
+ )
+
+def check_error_callback(test, function, arg, expected_error_count,
+ expect_result):
+ """General test template for error_callback argument.
+
+ :param test: Test case instance.
+ :param function: Either try_import or try_imports.
+ :param arg: Name or names to import.
+ :param expected_error_count: Expected number of calls to the callback.
+ :param expect_result: Boolean for whether a module should
+ ultimately be returned or not.
+ """
+ cb_calls = []
+ def cb(e):
+ test.assertIsInstance(e, ImportError)
+ cb_calls.append(e)
+ try:
+ result = function(arg, error_callback=cb)
+ except ImportError:
+ test.assertFalse(expect_result)
+ else:
+ if expect_result:
+ test.assertThat(result, Not(Is(None)))
+ else:
+ test.assertThat(result, Is(None))
+ test.assertEquals(len(cb_calls), expected_error_count)
+
+
+class TestSafeHasattr(TestCase):
+
+ def test_attribute_not_there(self):
+ class Foo(object):
+ pass
+ self.assertEqual(False, safe_hasattr(Foo(), 'anything'))
+
+ def test_attribute_there(self):
+ class Foo(object):
+ pass
+ foo = Foo()
+ foo.attribute = None
+ self.assertEqual(True, safe_hasattr(foo, 'attribute'))
+
+ def test_property_there(self):
+ class Foo(object):
+ @property
+ def attribute(self):
+ return None
+ foo = Foo()
+ self.assertEqual(True, safe_hasattr(foo, 'attribute'))
+
+ def test_property_raises(self):
+ class Foo(object):
+ @property
+ def attribute(self):
+ 1/0
+ foo = Foo()
+ self.assertRaises(ZeroDivisionError, safe_hasattr, foo, 'attribute')
+
+
+class TestTryImport(TestCase):
+
+ def test_doesnt_exist(self):
+ # try_import('thing', foo) returns foo if 'thing' doesn't exist.
+ marker = object()
+ result = try_import('doesntexist', marker)
+ self.assertThat(result, Is(marker))
+
+ def test_None_is_default_alternative(self):
+ # try_import('thing') returns None if 'thing' doesn't exist.
+ result = try_import('doesntexist')
+ self.assertThat(result, Is(None))
+
+ def test_existing_module(self):
+ # try_import('thing', foo) imports 'thing' and returns it if it's a
+ # module that exists.
+ result = try_import('os', object())
+ import os
+ self.assertThat(result, Is(os))
+
+ def test_existing_submodule(self):
+ # try_import('thing.another', foo) imports 'thing' and returns it if
+ # it's a module that exists.
+ result = try_import('os.path', object())
+ import os
+ self.assertThat(result, Is(os.path))
+
+ def test_nonexistent_submodule(self):
+ # try_import('thing.another', foo) imports 'thing' and returns foo if
+ # 'another' doesn't exist.
+ marker = object()
+ result = try_import('os.doesntexist', marker)
+ self.assertThat(result, Is(marker))
+
+ def test_object_from_module(self):
+ # try_import('thing.object') imports 'thing' and returns
+ # 'thing.object' if 'thing' is a module and 'object' is not.
+ result = try_import('os.path.join')
+ import os
+ self.assertThat(result, Is(os.path.join))
+
+ def test_error_callback(self):
+ # the error callback is called on failures.
+ check_error_callback(self, try_import, 'doesntexist', 1, False)
+
+ def test_error_callback_missing_module_member(self):
+ # the error callback is called on failures to find an object
+ # inside an existing module.
+ check_error_callback(self, try_import, 'os.nonexistent', 1, False)
+
+ def test_error_callback_not_on_success(self):
+ # the error callback is not called on success.
+ check_error_callback(self, try_import, 'os.path', 0, True)
+
+
+class TestTryImports(TestCase):
+
+ def test_doesnt_exist(self):
+ # try_imports('thing', foo) returns foo if 'thing' doesn't exist.
+ marker = object()
+ result = try_imports(['doesntexist'], marker)
+ self.assertThat(result, Is(marker))
+
+ def test_fallback(self):
+ result = try_imports(['doesntexist', 'os'])
+ import os
+ self.assertThat(result, Is(os))
+
+ def test_None_is_default_alternative(self):
+ # try_imports('thing') returns None if 'thing' doesn't exist.
+ e = self.assertRaises(
+ ImportError, try_imports, ['doesntexist', 'noreally'])
+ self.assertThat(
+ str(e),
+ Equals("Could not import any of: doesntexist, noreally"))
+
+ def test_existing_module(self):
+ # try_imports('thing', foo) imports 'thing' and returns it if it's a
+ # module that exists.
+ result = try_imports(['os'], object())
+ import os
+ self.assertThat(result, Is(os))
+
+ def test_existing_submodule(self):
+ # try_imports('thing.another', foo) imports 'thing' and returns it if
+ # it's a module that exists.
+ result = try_imports(['os.path'], object())
+ import os
+ self.assertThat(result, Is(os.path))
+
+ def test_nonexistent_submodule(self):
+ # try_imports('thing.another', foo) imports 'thing' and returns foo if
+ # 'another' doesn't exist.
+ marker = object()
+ result = try_imports(['os.doesntexist'], marker)
+ self.assertThat(result, Is(marker))
+
+ def test_fallback_submodule(self):
+ result = try_imports(['os.doesntexist', 'os.path'])
+ import os
+ self.assertThat(result, Is(os.path))
+
+ def test_error_callback(self):
+ # One error for every class that doesn't exist.
+ check_error_callback(self, try_imports,
+ ['os.doesntexist', 'os.notthiseither'],
+ 2, False)
+ check_error_callback(self, try_imports,
+ ['os.doesntexist', 'os.notthiseither', 'os'],
+ 2, True)
+ check_error_callback(self, try_imports,
+ ['os.path'],
+ 0, True)
+
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/setup.cfg b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/setup.cfg
new file mode 100644
index 00000000000..92ee5499429
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/setup.cfg
@@ -0,0 +1,10 @@
+[test]
+test_module = extras.tests
+buffer = 1
+catch = 1
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/setup.py b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/setup.py
new file mode 100755
index 00000000000..c384a765801
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/setup.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""Distutils installer for extras."""
+
+from setuptools import setup
+import os.path
+
+import extras
+testtools_cmd = extras.try_import('testtools.TestCommand')
+
+
+def get_version():
+ """Return the version of extras that we are building."""
+ version = '.'.join(
+ str(component) for component in extras.__version__[0:3])
+ return version
+
+
+def get_long_description():
+ readme_path = os.path.join(
+ os.path.dirname(__file__), 'README.rst')
+ return open(readme_path).read()
+
+
+cmdclass = {}
+
+if testtools_cmd is not None:
+ cmdclass['test'] = testtools_cmd
+
+
+setup(name='extras',
+ author='Testing cabal',
+ author_email='testtools-dev@lists.launchpad.net',
+ url='https://github.com/testing-cabal/extras',
+ description=('Useful extra bits for Python - things that shold be '
+ 'in the standard library'),
+ long_description=get_long_description(),
+ version=get_version(),
+ classifiers=["License :: OSI Approved :: MIT License"],
+ packages=[
+ 'extras',
+ 'extras.tests',
+ ],
+ cmdclass=cmdclass)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/MANIFEST.in b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/MANIFEST.in
new file mode 100644
index 00000000000..eb989816283
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/MANIFEST.in
@@ -0,0 +1,20 @@
+exclude .bzrignore
+exclude aclocal.m4
+prune autom4te.cache
+prune c
+prune c++
+prune compile
+exclude configure*
+exclude depcomp
+exclude INSTALL
+exclude install-sh
+exclude lib*
+exclude ltmain.sh
+prune m4
+exclude Makefile*
+exclude missing
+prune perl
+exclude py-compile
+prune shell
+exclude stamp-h1
+include NEWS
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/NEWS b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/NEWS
new file mode 100644
index 00000000000..59af931ea2f
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/NEWS
@@ -0,0 +1,493 @@
+---------------------
+subunit release notes
+---------------------
+
+NEXT (In development)
+---------------------
+
+0.0.16
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Perl files should now honour perl system config.
+ (Benedikt Morbach, #1233198)
+
+* Python 3.1 and 3.2 have an inconsistent memoryview implementation which
+ required a workaround for NUL byte detection. (Robert Collins, #1216246)
+
+* The test suite was failing 6 tests due to testtools changing it's output
+ formatting of exceptions. (Robert Collins)
+
+* V2 parser errors now set appropriate mime types for the encapsulated packet
+ data and the error message. (Robert Collins)
+
+* When tests fail to import ``python subunit.run -l ...`` will now write a
+ subunit file attachment listing the failed imports and exit 2, rather than
+ listing the stub objects from the importer and exiting 0.
+ (Robert Collins, #1245672)
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Most filters will now accept a file path argument instead of only reading
+ from stdin. (Robert Collins, #409206)
+
+0.0.15
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Clients of subunit did not expect memoryview objects in StreamResult events.
+ (Robert Collins)
+
+* Memoryview and struct were mutually incompatible in 2.7.3 and 3.2.
+ (Robert Collins, #1216163)
+
+0.0.14
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Memoryview detection was broken and thus it's use was never really tested.
+ (Robert Collins, 1216101)
+
+* TestProtocol2's tag tests were set sort order dependent.
+ (Robert Collins, #1025392)
+
+* TestTestProtocols' test_tags_both was set sort order dependent.
+ (Robert Collins, #1025392)
+
+* TestTestProtocols' test_*_details were dictionary sort order dependent.
+ (Robert Collins, #1025392)
+
+* TestSubUnitTags's test_add_tag was also se sort order dependent.
+ (Robert Collins, #1025392)
+
+0.0.13
+------
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* subunit should now build with automake 1.11 again. (Robert Collins)
+
+* `subunit-stats` no longer outputs encapsulated stdout as subunit.
+ (Robert Collins, #1171987)
+
+* The logic for `subunit.run` is now importable via python -
+ `subunit.run.main`. (Robert Collins, #606770)
+
+BUG FIXES
+~~~~~~~~~
+
+* Removed GPL files that were (C) non Subunit Developers - they are
+ incompatible for binary distribution, which affects redistributors.
+ (Robert Collins, #1185591)
+
+0.0.12
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Subunit v2 packets with both file content and route code were not being
+ parsed correctly - they would incorrectly emit a parser error, due to trying
+ to parse the route code length from the first byes of the file content.
+ (Robert Collins, 1172815)
+
+0.0.11
+------
+
+v2 protocol draft included in this release. The v2 protocol trades off human
+readability for a massive improvement in robustness, the ability to represent
+concurrent tests in a single stream, cheaper parsing, and that provides
+significantly better in-line debugging support and structured forwarding
+of non-test data (such as stdout or stdin data).
+
+This change includes two new filters (subunit-1to2 and subunit-2to1). Use
+these filters to convert old streams to v2 and convert v2 streams to v1.
+
+All the other filters now only parse and emit v2 streams. V2 is still in
+draft format, so if you want to delay and wait for v2 to be finalised, you
+should use subunit-2to1 before any serialisation steps take place.
+With the ability to encapsulate multiple non-test streams, another significant
+cange is that filters which emit subunit now encapsulate any non-subunit they
+encounter, labelling it 'stdout'. This permits multiplexing such streams and
+detangling the stdout streams from each input.
+
+The subunit libraries (Python etc) have not changed their behaviour: they
+still emit v1 from their existing API calls. New API's are being added
+and applications should migrate once their language has those API's available.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* ``subunit.run`` now replaces sys.stdout to ensure that stdout is unbuffered
+ - without this pdb output is not reliably visible when stdout is a pipe
+ as it usually is. (Robert Collins)
+
+* v2 protocol draft included in this release. (Python implementation only so
+ far). (Robert Collins)
+
+* Two new Python classes -- ``StreamResultToBytes`` and
+ ``ByteStreamToStreamResult`` handle v2 generation and parsing.
+ (Robert Collins)
+
+0.0.10
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* make_stream_binary is now public for reuse. (Robert Collins)
+
+* NAME was not defined in the protocol BNF. (Robert Collins)
+
+* UnsupportedOperation is available in the Python2.6 io library, so ask
+ forgiveness rather than permission for obtaining it. (Robert Collins)
+
+* Streams with no fileno() attribute are now supported, but they are not
+ checked for being in binary mode: be sure to take care of that if using
+ the library yourself. (Robert Collins)
+
+0.0.9
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* All the source files are now included in the distribution tarball.
+ (Arfrever Frehtes Taifersar Arahesis, Robert Collins, #996275)
+
+* ``python/subunit/tests/test_run.py`` and ``python/subunit/filters.py`` were
+ not included in the 0.0.8 tarball. (Robert Collins)
+
+* Test ids which include non-ascii unicode characters are now supported.
+ (Robert Collins, #1029866)
+
+* The ``failfast`` option to ``subunit.run`` will now work. The dependency on
+ testtools has been raised to 0.9.23 to permit this.
+ (Robert Collins, #1090582)
+
+0.0.8
+-----
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Perl module now correctly outputs "failure" instead of "fail". (Stewart Smith)
+
+* Shell functions now output timestamps. (Stewart Smith, Robert Collins)
+
+* 'subunit2csv' script that converts subunit output to CSV format.
+ (Jonathan Lange)
+
+* ``TagCollapsingDecorator`` now correctly distinguishes between local and
+ global tags. (Jonathan Lange)
+
+* ``TestResultFilter`` always forwards ``time:`` events.
+ (Benji York, Brad Crittenden)
+
+BUG FIXES
+~~~~~~~~~
+
+* Add 'subunit --no-xfail', which will omit expected failures from the subunit
+ stream. (John Arbash Meinel, #623642)
+
+* Add 'subunit -F/--only-genuine-failures' which sets all of '--no-skips',
+ '--no-xfail', '--no-passthrough, '--no-success', and gives you just the
+ failure stream. (John Arbash Meinel)
+
+* Python2.6 support was broken by the fixup feature.
+ (Arfrever Frehtes Taifersar Arahesis, #987490)
+
+* Python3 support regressed in trunk.
+ (Arfrever Frehtes Taifersar Arahesis, #987514)
+
+* Python3 support was insufficiently robust in detecting unicode streams.
+ (Robert Collins, Arfrever Frehtes Taifersar Arahesis)
+
+* Tag support has been implemented for TestProtocolClient.
+ (Robert Collins, #518016)
+
+* Tags can now be filtered. (Jonathan Lange, #664171)
+
+* Test suite works with latest testtools (but not older ones - formatting
+ changes only). (Robert Collins)
+
+0.0.7
+-----
+
+The Subunit Python test runner ``python -m subunit.run`` can now report the
+test ids and also filter via a test id list file thanks to improvements in
+``testtools.run``. See the testtools manual, or testrepository - a major
+user of such functionality.
+
+Additionally the protocol now has a keyword uxsuccess for Unexpected Success
+reporting. Older parsers will report tests with this status code as 'lost
+connection'.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Add ``TimeCollapsingDecorator`` which collapses multiple sequential time()
+ calls into just the first and last. (Jonathan Lange)
+
+* Add ``TagCollapsingDecorator`` which collapses many tags() calls into one
+ where possible. (Jonathan Lange, Robert Collins)
+
+* Force flush of writes to stdout in c/tests/test_child.
+ (Jelmer Vernooij, #687611)
+
+* Provisional Python 3.x support.
+ (Robert Collins, Tres Seaver, Martin[gz], #666819)
+
+* ``subunit.chunked.Decoder`` Python class takes a new ``strict`` option,
+ which defaults to ``True``. When ``False``, the ``Decoder`` will accept
+ incorrect input that is still unambiguous. i.e. subunit will not barf if
+ a \r is missing from the input. (Martin Pool)
+
+* ``subunit-filter`` preserves the relative ordering of ``time:`` statements,
+ so you can now use filtered streams to gather data about how long it takes
+ to run a test. (Jonathan Lange, #716554)
+
+* ``subunit-ls`` now handles a stream with time: instructions that start
+ partway through the stream (which may lead to strange times) more gracefully.
+ (Robert Collins, #785954)
+
+* ``subunit-ls`` should handle the new test outcomes in Python2.7 better.
+ (Robert Collins, #785953)
+
+* ``TestResultFilter`` now collapses sequential calls to time().
+ (Jonathan Lange, #567150)
+
+* ``TestResultDecorator.tags()`` now actually works, and is no longer a buggy
+ copy/paste of ``TestResultDecorator.time()``. (Jonathan Lange, #681828)
+
+* ``TestResultFilter`` now supports a ``fixup_expected_failures``
+ argument. (Jelmer Vernooij, #755241)
+
+* The ``subunit.run`` Python module supports ``-l`` and ``--load-list`` as
+ per ``testtools.run``. This required a dependency bump due to a small
+ API change in ``testtools``. (Robert Collins)
+
+* The help for subunit-filter was confusing about the behaviour of ``-f`` /
+ ``--no-failure``. (Robert Collins, #703392)
+
+* The Python2.7 / testtools addUnexpectedSuccess API is now supported. This
+ required adding a new status code to the protocol. (Robert Collins, #654474)
+
+CHANGES
+~~~~~~~
+
+* testtools 0.9.11 or newer is new needed (due to the Python 3 support).
+ (Robert Collins)
+
+0.0.6
+-----
+
+This release of subunit fixes a number of unicode related bugs. This depends on
+testtools 0.9.4 and will not function without it. Thanks to Tres Seaver there
+is also an optional native setup.py file for use with easy_install and the
+like.
+
+BUG FIXES
+~~~~~~~~~
+
+* Be consistent about delivering unicode content to testtools StringException
+ class which has become (appropriately) conservative. (Robert Collins)
+
+* Fix incorrect reference to subunit_test_failf in c/README.
+ (Brad Hards, #524341)
+
+* Fix incorrect ordering of tags method parameters in TestResultDecorator. This
+ is purely cosmetic as the parameters are passed down with no interpretation.
+ (Robert Collins, #537611)
+
+* Old style tracebacks with no encoding info are now treated as UTF8 rather
+ than some-random-codec-like-ascii. (Robert Collins)
+
+* On windows, ProtocolTestCase and TestProtocolClient will set their streams to
+ binary mode by calling into msvcrt; this avoids having their input or output
+ mangled by the default line ending translation on that platform.
+ (Robert Collins, Martin [gz], #579296)
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Subunit now has a setup.py for python deployments that are not using
+ distribution packages. (Tres Seaver, #538181)
+
+* Subunit now supports test discovery by building on the testtools support for
+ it. You can take advantage of it with "python -m subunit.run discover [path]"
+ and see "python -m subunit.run discover --help" for more options.
+
+* Subunit now uses the improved unicode support in testtools when outputting
+ non-details based test information; this should consistently UTF8 encode such
+ strings.
+
+* The Python TestProtocolClient now flushes output on startTest and stopTest.
+ (Martin [gz]).
+
+
+0.0.5
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* make check was failing if subunit wasn't installed due to a missing include
+ path for the test program test_child.
+
+* make distcheck was failing due to a missing $(top_srcdir) rune.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* New filter `subunit-notify` that will show a notification window with test
+ statistics when the test run finishes.
+
+* subunit.run will now pipe its output to the command in the
+ SUBUNIT_FORMATTER environment variable, if set.
+
+0.0.4
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* subunit2junitxml -f required a value, this is now fixed and -f acts as a
+ boolean switch with no parameter.
+
+* Building with autoconf 2.65 is now supported.
+
+
+0.0.3
+-----
+
+ CHANGES:
+
+ * License change, by unanimous agreement of contributors to BSD/Apache
+ License Version 2.0. This makes Subunit compatible with more testing
+ frameworks.
+
+ IMPROVEMENTS:
+
+ * CPPUnit is now directly supported: subunit builds a cppunit listener
+ ``libcppunit-subunit``.
+
+ * In the python API ``addExpectedFailure`` and ``addUnexpectedSuccess``
+ from python 2.7/3.1 are now supported. ``addExpectedFailure`` is
+ serialised as ``xfail``, and ``addUnexpectedSuccess`` as ``success``.
+ The ``ProtocolTestCase`` parser now calls outcomes using an extended
+ API that permits attaching arbitrary MIME resources such as text files
+ log entries and so on. This extended API is being developed with the
+ Python testing community, and is in flux. ``TestResult`` objects that
+ do not support the API will be detected and transparently downgraded
+ back to the regular Python unittest API.
+
+ * INSTALLDIRS can be set to control the perl MakeMaker 'INSTALLDIRS'
+ viarable when installing.
+
+ * Multipart test outcomes are tentatively supported; the exact protocol
+ for them, both serialiser and object is not yet finalised. Testers and
+ early adopters are sought. As part of this and also in an attempt to
+ provider a more precise focus on the wire protocol and toolchain,
+ Subunit now depends on testtools (http://launchpad.net/testtools)
+ release 0.9.0 or newer.
+
+ * subunit2junitxml supports a new option, --forward which causes it
+ to forward the raw subunit stream in a similar manner to tee. This
+ is used with the -o option to both write a xml report and get some
+ other subunit filter to process the stream.
+
+ * The C library now has ``subunit_test_skip``.
+
+ BUG FIXES:
+
+ * Install progress_model.py correctly.
+
+ * Non-gcc builds will no longer try to use gcc specific flags.
+ (Thanks trondn-norbye)
+
+ API CHANGES:
+
+ INTERNALS:
+
+0.0.2
+-----
+
+ CHANGES:
+
+ IMPROVEMENTS:
+
+ * A number of filters now support ``--no-passthrough`` to cause all
+ non-subunit content to be discarded. This is useful when precise control
+ over what is output is required - such as with subunit2junitxml.
+
+ * A small perl parser is now included, and a new ``subunit-diff`` tool
+ using that is included. (Jelmer Vernooij)
+
+ * Subunit streams can now include optional, incremental lookahead
+ information about progress. This allows reporters to make estimates
+ about completion, when such information is available. See the README
+ under ``progress`` for more details.
+
+ * ``subunit-filter`` now supports regex filtering via ``--with`` and
+ ``without`` options. (Martin Pool)
+
+ * ``subunit2gtk`` has been added, a filter that shows a GTK summary of a
+ test stream.
+
+ * ``subunit2pyunit`` has a --progress flag which will cause the bzrlib
+ test reporter to be used, which has a textual progress bar. This requires
+ a recent bzrlib as a minor bugfix was required in bzrlib to support this.
+
+ * ``subunit2junitxml`` has been added. This filter converts a subunit
+ stream to a single JUnit style XML stream using the pyjunitxml
+ python library.
+
+ * The shell functions support skipping via ``subunit_skip_test`` now.
+
+ BUG FIXES:
+
+ * ``xfail`` outcomes are now passed to python TestResult's via
+ addExpectedFailure if it is present on the TestResult. Python 2.6 and
+ earlier which do not have this function will have ``xfail`` outcomes
+ passed through as success outcomes as earlier versions of subunit did.
+
+ API CHANGES:
+
+ * tags are no longer passed around in python via the ``TestCase.tags``
+ attribute. Instead ``TestResult.tags(new_tags, gone_tags)`` is called,
+ and like in the protocol, if called while a test is active only applies
+ to that test. (Robert Collins)
+
+ * ``TestResultFilter`` takes a new optional constructor parameter
+ ``filter_predicate``. (Martin Pool)
+
+ * When a progress: directive is encountered in a subunit stream, the
+ python bindings now call the ``progress(offset, whence)`` method on
+ ``TestResult``.
+
+ * When a time: directive is encountered in a subunit stream, the python
+ bindings now call the ``time(seconds)`` method on ``TestResult``.
+
+ INTERNALS:
+
+ * (python) Added ``subunit.test_results.AutoTimingTestResultDecorator``. Most
+ users of subunit will want to wrap their ``TestProtocolClient`` objects
+ in this decorator to get test timing data for performance analysis.
+
+ * (python) ExecTestCase supports passing arguments to test scripts.
+
+ * (python) New helper ``subunit.test_results.HookedTestResultDecorator``
+ which can be used to call some code on every event, without having to
+ implement all the event methods.
+
+ * (python) ``TestProtocolClient.time(a_datetime)`` has been added which
+ causes a timestamp to be output to the stream.
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/PKG-INFO b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/PKG-INFO
new file mode 100644
index 00000000000..de79389b594
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/PKG-INFO
@@ -0,0 +1,483 @@
+Metadata-Version: 1.0
+Name: python-subunit
+Version: 0.0.16
+Summary: Python implementation of subunit test streaming protocol
+Home-page: http://launchpad.net/subunit
+Author: Robert Collins
+Author-email: subunit-dev@lists.launchpad.net
+License: UNKNOWN
+Description:
+ subunit: A streaming protocol for test results
+ Copyright (C) 2005-2013 Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+ See the COPYING file for full details on the licensing of Subunit.
+
+ subunit reuses iso8601 by Michael Twomey, distributed under an MIT style
+ licence - see python/iso8601/LICENSE for details.
+
+ Subunit
+ -------
+
+ Subunit is a streaming protocol for test results.
+
+ There are two major revisions of the protocol. Version 1 was trivially human
+ readable but had significant defects as far as highly parallel testing was
+ concerned - it had no room for doing discovery and execution in parallel,
+ required substantial buffering when multiplexing and was fragile - a corrupt
+ byte could cause an entire stream to be misparsed. Version 1.1 added
+ encapsulation of binary streams which mitigated some of the issues but the
+ core remained.
+
+ Version 2 shares many of the good characteristics of Version 1 - it can be
+ embedded into a regular text stream (e.g. from a build system) and it still
+ models xUnit style test execution. It also fixes many of the issues with
+ Version 1 - Version 2 can be multiplexed without excessive buffering (in
+ time or space), it has a well defined recovery mechanism for dealing with
+ corrupted streams (e.g. where two processes write to the same stream
+ concurrently, or where the stream generator suffers a bug).
+
+ More details on both protocol version s can be found in the 'Protocol' section
+ of this document.
+
+ Subunit comes with command line filters to process a subunit stream and
+ language bindings for python, C, C++ and shell. Bindings are easy to write
+ for other languages.
+
+ A number of useful things can be done easily with subunit:
+ * Test aggregation: Tests run separately can be combined and then
+ reported/displayed together. For instance, tests from different languages
+ can be shown as a seamless whole, and tests running on multiple machines
+ can be aggregated into a single stream through a multiplexer.
+ * Test archiving: A test run may be recorded and replayed later.
+ * Test isolation: Tests that may crash or otherwise interact badly with each
+ other can be run seperately and then aggregated, rather than interfering
+ with each other or requiring an adhoc test->runner reporting protocol.
+ * Grid testing: subunit can act as the necessary serialisation and
+ deserialiation to get test runs on distributed machines to be reported in
+ real time.
+
+ Subunit supplies the following filters:
+ * tap2subunit - convert perl's TestAnythingProtocol to subunit.
+ * subunit2csv - convert a subunit stream to csv.
+ * subunit2pyunit - convert a subunit stream to pyunit test results.
+ * subunit2gtk - show a subunit stream in GTK.
+ * subunit2junitxml - convert a subunit stream to JUnit's XML format.
+ * subunit-diff - compare two subunit streams.
+ * subunit-filter - filter out tests from a subunit stream.
+ * subunit-ls - list info about tests present in a subunit stream.
+ * subunit-stats - generate a summary of a subunit stream.
+ * subunit-tags - add or remove tags from a stream.
+
+ Integration with other tools
+ ----------------------------
+
+ Subunit's language bindings act as integration with various test runners like
+ 'check', 'cppunit', Python's 'unittest'. Beyond that a small amount of glue
+ (typically a few lines) will allow Subunit to be used in more sophisticated
+ ways.
+
+ Python
+ ======
+
+ Subunit has excellent Python support: most of the filters and tools are written
+ in python and there are facilities for using Subunit to increase test isolation
+ seamlessly within a test suite.
+
+ The most common way is to run an existing python test suite and have it output
+ subunit via the ``subunit.run`` module::
+
+ $ python -m subunit.run mypackage.tests.test_suite
+
+ For more information on the Python support Subunit offers , please see
+ ``pydoc subunit``, or the source in ``python/subunit/``
+
+ C
+ =
+
+ Subunit has C bindings to emit the protocol. The 'check' C unit testing project
+ has included subunit support in their project for some years now. See
+ 'c/README' for more details.
+
+ C++
+ ===
+
+ The C library is includable and usable directly from C++. A TestListener for
+ CPPUnit is included in the Subunit distribution. See 'c++/README' for details.
+
+ shell
+ =====
+
+ There are two sets of shell tools. There are filters, which accept a subunit
+ stream on stdin and output processed data (or a transformed stream) on stdout.
+
+ Then there are unittest facilities similar to those for C : shell bindings
+ consisting of simple functions to output protocol elements, and a patch for
+ adding subunit output to the 'ShUnit' shell test runner. See 'shell/README' for
+ details.
+
+ Filter recipes
+ --------------
+
+ To ignore some failing tests whose root cause is already known::
+
+ subunit-filter --without 'AttributeError.*flavor'
+
+
+ The xUnit test model
+ --------------------
+
+ Subunit implements a slightly modified xUnit test model. The stock standard
+ model is that there are tests, which have an id(), can be run, and when run
+ start, emit an outcome (like success or failure) and then finish.
+
+ Subunit extends this with the idea of test enumeration (find out about tests
+ a runner has without running them), tags (allow users to describe tests in
+ ways the test framework doesn't apply any semantic value to), file attachments
+ (allow arbitrary data to make analysing a failure easy) and timestamps.
+
+ The protocol
+ ------------
+
+ Version 2, or v2 is new and still under development, but is intended to
+ supercede version 1 in the very near future. Subunit's bundled tools accept
+ only version 2 and only emit version 2, but the new filters subunit-1to2 and
+ subunit-2to1 can be used to interoperate with older third party libraries.
+
+ Version 2
+ =========
+
+ Version 2 is a binary protocol consisting of independent packets that can be
+ embedded in the output from tools like make - as long as each packet has no
+ other bytes mixed in with it (which 'make -j N>1' has a tendency of doing).
+ Version 2 is currently in draft form, and early adopters should be willing
+ to either discard stored results (if protocol changes are made), or bulk
+ convert them back to v1 and then to a newer edition of v2.
+
+ The protocol synchronises at the start of the stream, after a packet, or
+ after any 0x0A byte. That is, a subunit v2 packet starts after a newline or
+ directly after the end of the prior packet.
+
+ Subunit is intended to be transported over a reliable streaming protocol such
+ as TCP. As such it does not concern itself with out of order delivery of
+ packets. However, because of the possibility of corruption due to either
+ bugs in the sender, or due to mixed up data from concurrent writes to the same
+ fd when being embedded, subunit strives to recover reasonably gracefully from
+ damaged data.
+
+ A key design goal for Subunit version 2 is to allow processing and multiplexing
+ without forcing buffering for semantic correctness, as buffering tends to hide
+ hung or otherwise misbehaving tests. That said, limited time based buffering
+ for network efficiency is a good idea - this is ultimately implementator
+ choice. Line buffering is also discouraged for subunit streams, as dropping
+ into a debugger or other tool may require interactive traffic even if line
+ buffering would not otherwise be a problem.
+
+ In version two there are two conceptual events - a test status event and a file
+ attachment event. Events may have timestamps, and the path of multiplexers that
+ an event is routed through is recorded to permit sending actions back to the
+ source (such as new tests to run or stdin for driving debuggers and other
+ interactive input). Test status events are used to enumerate tests, to report
+ tests and test helpers as they run. Tests may have tags, used to allow
+ tunnelling extra meanings through subunit without requiring parsing of
+ arbitrary file attachments. Things that are not standalone tests get marked
+ as such by setting the 'Runnable' flag to false. (For instance, individual
+ assertions in TAP are not runnable tests, only the top level TAP test script
+ is runnable).
+
+ File attachments are used to provide rich detail about the nature of a failure.
+ File attachments can also be used to encapsulate stdout and stderr both during
+ and outside tests.
+
+ Most numbers are stored in network byte order - Most Significant Byte first
+ encoded using a variation of http://www.dlugosz.com/ZIP2/VLI.html. The first
+ byte's top 2 high order bits encode the total number of octets in the number.
+ This encoding can encode values from 0 to 2**30-1, enough to encode a
+ nanosecond. Numbers that are not variable length encoded are still stored in
+ MSB order.
+
+ prefix octets max max
+ +-------+--------+---------+------------+
+ | 00 | 1 | 2**6-1 | 63 |
+ | 01 | 2 | 2**14-1 | 16383 |
+ | 10 | 3 | 2**22-1 | 4194303 |
+ | 11 | 4 | 2**30-1 | 1073741823 |
+ +-------+--------+---------+------------+
+
+ All variable length elements of the packet are stored with a length prefix
+ number allowing them to be skipped over for consumers that don't need to
+ interpret them.
+
+ UTF-8 strings are with no terminating NUL and should not have any embedded NULs
+ (implementations SHOULD validate any such strings that they process and take
+ some remedial action (such as discarding the packet as corrupt).
+
+ In short the structure of a packet is:
+ PACKET := SIGNATURE FLAGS PACKET_LENGTH TIMESTAMP? TESTID? TAGS? MIME?
+ FILECONTENT? ROUTING_CODE? CRC32
+
+ In more detail...
+
+ Packets are identified by a single byte signature - 0xB3, which is never legal
+ in a UTF-8 stream as the first byte of a character. 0xB3 starts with the first
+ bit set and the second not, which is the UTF-8 signature for a continuation
+ byte. 0xB3 was chosen as 0x73 ('s' in ASCII') with the top two bits replaced by
+ the 1 and 0 for a continuation byte.
+
+ If subunit packets are being embedded in a non-UTF-8 text stream, where 0x73 is
+ a legal character, consider either recoding the text to UTF-8, or using
+ subunit's 'file' packets to embed the text stream in subunit, rather than the
+ other way around.
+
+ Following the signature byte comes a 16-bit flags field, which includes a
+ 4-bit version field - if the version is not 0x2 then the packet cannot be
+ read. It is recommended to signal an error at this point (e.g. by emitting
+ a synthetic error packet and returning to the top level loop to look for
+ new packets, or exiting with an error). If recovery is desired, treat the
+ packet signature as an opaque byte and scan for a new synchronisation point.
+ NB: Subunit V1 and V2 packets may legitimately included 0xB3 internally,
+ as they are an 8-bit safe container format, so recovery from this situation
+ may involve an arbitrary number of false positives until an actual packet
+ is encountered : and even then it may still be false, failing after passing
+ the version check due to coincidence.
+
+ Flags are stored in network byte order too.
+ +-------------------------+------------------------+
+ | High byte | Low byte |
+ | 15 14 13 12 11 10 9 8 | 7 6 5 4 3 2 1 0 |
+ | VERSION |feature bits| |
+ +------------+------------+------------------------+
+
+ Valid version values are:
+ 0x2 - version 2
+
+ Feature bits:
+ Bit 11 - mask 0x0800 - Test id present.
+ Bit 10 - mask 0x0400 - Routing code present.
+ Bit 9 - mask 0x0200 - Timestamp present.
+ Bit 8 - mask 0x0100 - Test is 'runnable'.
+ Bit 7 - mask 0x0080 - Tags are present.
+ Bit 6 - mask 0x0040 - File content is present.
+ Bit 5 - mask 0x0020 - File MIME type is present.
+ Bit 4 - mask 0x0010 - EOF marker.
+ Bit 3 - mask 0x0008 - Must be zero in version 2.
+
+ Test status gets three bits:
+ Bit 2 | Bit 1 | Bit 0 - mask 0x0007 - A test status enum lookup:
+ 000 - undefined / no test
+ 001 - Enumeration / existence
+ 002 - In progress
+ 003 - Success
+ 004 - Unexpected Success
+ 005 - Skipped
+ 006 - Failed
+ 007 - Expected failure
+
+ After the flags field is a number field giving the length in bytes for the
+ entire packet including the signature and the checksum. This length must
+ be less than 4MiB - 4194303 bytes. The encoding can obviously record a larger
+ number but one of the goals is to avoid requiring large buffers, or causing
+ large latency in the packet forward/processing pipeline. Larger file
+ attachments can be communicated in multiple packets, and the overhead in such a
+ 4MiB packet is approximately 0.2%.
+
+ The rest of the packet is a series of optional features as specified by the set
+ feature bits in the flags field. When absent they are entirely absent.
+
+ Forwarding and multiplexing of packets can be done without interpreting the
+ remainder of the packet until the routing code and checksum (which are both at
+ the end of the packet). Additionally, routers can often avoid copying or moving
+ the bulk of the packet, as long as the routing code size increase doesn't force
+ the length encoding to take up a new byte (which will only happen to packets
+ less than or equal to 16KiB in length) - large packets are very efficient to
+ route.
+
+ Timestamp when present is a 32 bit unsigned integer for secnods, and a variable
+ length number for nanoseconds, representing UTC time since Unix Epoch in
+ seconds and nanoseconds.
+
+ Test id when present is a UTF-8 string. The test id should uniquely identify
+ runnable tests such that they can be selected individually. For tests and other
+ actions which cannot be individually run (such as test
+ fixtures/layers/subtests) uniqueness is not required (though being human
+ meaningful is highly recommended).
+
+ Tags when present is a length prefixed vector of UTF-8 strings, one per tag.
+ There are no restrictions on tag content (other than the restrictions on UTF-8
+ strings in subunit in general). Tags have no ordering.
+
+ When a MIME type is present, it defines the MIME type for the file across all
+ packets same file (routing code + testid + name uniquely identifies a file,
+ reset when EOF is flagged). If a file never has a MIME type set, it should be
+ treated as application/octet-stream.
+
+ File content when present is a UTF-8 string for the name followed by the length
+ in bytes of the content, and then the content octets.
+
+ If present routing code is a UTF-8 string. The routing code is used to
+ determine which test backend a test was running on when doing data analysis,
+ and to route stdin to the test process if interaction is required.
+
+ Multiplexers SHOULD add a routing code if none is present, and prefix any
+ existing routing code with a routing code ('/' separated) if one is already
+ present. For example, a multiplexer might label each stream it is multiplexing
+ with a simple ordinal ('0', '1' etc), and given an incoming packet with route
+ code '3' from stream '0' would adjust the route code when forwarding the packet
+ to be '0/3'.
+
+ Following the end of the packet is a CRC-32 checksum of the contents of the
+ packet including the signature.
+
+ Example packets
+ ~~~~~~~~~~~~~~~
+
+ Trivial test "foo" enumeration packet, with test id, runnable set,
+ status=enumeration. Spaces below are to visually break up signature / flags /
+ length / testid / crc32
+
+ b3 2901 0c 03666f6f 08555f1b
+
+
+ Version 1 (and 1.1)
+ ===================
+
+ Version 1 (and 1.1) are mostly human readable protocols.
+
+ Sample subunit wire contents
+ ----------------------------
+
+ The following::
+ test: test foo works
+ success: test foo works.
+ test: tar a file.
+ failure: tar a file. [
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+ ]
+ a writeln to stdout
+
+ When run through subunit2pyunit::
+ .F
+ a writeln to stdout
+
+ ========================
+ FAILURE: tar a file.
+ -------------------
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+
+
+ Subunit protocol description
+ ============================
+
+ This description is being ported to an EBNF style. Currently its only partly in
+ that style, but should be fairly clear all the same. When in doubt, refer the
+ source (and ideally help fix up the description!). Generally the protocol is
+ line orientated and consists of either directives and their parameters, or
+ when outside a DETAILS region unexpected lines which are not interpreted by
+ the parser - they should be forwarded unaltered.
+
+ test|testing|test:|testing: test LABEL
+ success|success:|successful|successful: test LABEL
+ success|success:|successful|successful: test LABEL DETAILS
+ failure: test LABEL
+ failure: test LABEL DETAILS
+ error: test LABEL
+ error: test LABEL DETAILS
+ skip[:] test LABEL
+ skip[:] test LABEL DETAILS
+ xfail[:] test LABEL
+ xfail[:] test LABEL DETAILS
+ uxsuccess[:] test LABEL
+ uxsuccess[:] test LABEL DETAILS
+ progress: [+|-]X
+ progress: push
+ progress: pop
+ tags: [-]TAG ...
+ time: YYYY-MM-DD HH:MM:SSZ
+
+ LABEL: UTF8*
+ NAME: UTF8*
+ DETAILS ::= BRACKETED | MULTIPART
+ BRACKETED ::= '[' CR UTF8-lines ']' CR
+ MULTIPART ::= '[ multipart' CR PART* ']' CR
+ PART ::= PART_TYPE CR NAME CR PART_BYTES CR
+ PART_TYPE ::= Content-Type: type/sub-type(;parameter=value,parameter=value)
+ PART_BYTES ::= (DIGITS CR LF BYTE{DIGITS})* '0' CR LF
+
+ unexpected output on stdout -> stdout.
+ exit w/0 or last test completing -> error
+
+ Tags given outside a test are applied to all following tests
+ Tags given after a test: line and before the result line for the same test
+ apply only to that test, and inherit the current global tags.
+ A '-' before a tag is used to remove tags - e.g. to prevent a global tag
+ applying to a single test, or to cancel a global tag.
+
+ The progress directive is used to provide progress information about a stream
+ so that stream consumer can provide completion estimates, progress bars and so
+ on. Stream generators that know how many tests will be present in the stream
+ should output "progress: COUNT". Stream filters that add tests should output
+ "progress: +COUNT", and those that remove tests should output
+ "progress: -COUNT". An absolute count should reset the progress indicators in
+ use - it indicates that two separate streams from different generators have
+ been trivially concatenated together, and there is no knowledge of how many
+ more complete streams are incoming. Smart concatenation could scan each stream
+ for their count and sum them, or alternatively translate absolute counts into
+ relative counts inline. It is recommended that outputters avoid absolute counts
+ unless necessary. The push and pop directives are used to provide local regions
+ for progress reporting. This fits with hierarchically operating test
+ environments - such as those that organise tests into suites - the top-most
+ runner can report on the number of suites, and each suite surround its output
+ with a (push, pop) pair. Interpreters should interpret a pop as also advancing
+ the progress of the restored level by one step. Encountering progress
+ directives between the start and end of a test pair indicates that a previous
+ test was interrupted and did not cleanly terminate: it should be implicitly
+ closed with an error (the same as when a stream ends with no closing test
+ directive for the most recently started test).
+
+ The time directive acts as a clock event - it sets the time for all future
+ events. The value should be a valid ISO8601 time.
+
+ The skip, xfail and uxsuccess outcomes are not supported by all testing
+ environments. In Python the testttools (https://launchpad.net/testtools)
+ library is used to translate these automatically if an older Python version
+ that does not support them is in use. See the testtools documentation for the
+ translation policy.
+
+ skip is used to indicate a test was discovered but not executed. xfail is used
+ to indicate a test that errored in some expected fashion (also know as "TODO"
+ tests in some frameworks). uxsuccess is used to indicate and unexpected success
+ where a test though to be failing actually passes. It is complementary to
+ xfail.
+
+ Hacking on subunit
+ ------------------
+
+ Releases
+ ========
+
+ * Update versions in configure.ac and python/subunit/__init__.py.
+ * Make PyPI and regular tarball releases. Upload the regular one to LP, the
+ PyPI one to PyPI.
+ * Push a tagged commit.
+
+
+Keywords: python test streaming
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Testing
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/README b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/README
new file mode 100644
index 00000000000..4fa9444ea6f
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/README
@@ -0,0 +1,468 @@
+
+ subunit: A streaming protocol for test results
+ Copyright (C) 2005-2013 Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+ See the COPYING file for full details on the licensing of Subunit.
+
+ subunit reuses iso8601 by Michael Twomey, distributed under an MIT style
+ licence - see python/iso8601/LICENSE for details.
+
+Subunit
+-------
+
+Subunit is a streaming protocol for test results.
+
+There are two major revisions of the protocol. Version 1 was trivially human
+readable but had significant defects as far as highly parallel testing was
+concerned - it had no room for doing discovery and execution in parallel,
+required substantial buffering when multiplexing and was fragile - a corrupt
+byte could cause an entire stream to be misparsed. Version 1.1 added
+encapsulation of binary streams which mitigated some of the issues but the
+core remained.
+
+Version 2 shares many of the good characteristics of Version 1 - it can be
+embedded into a regular text stream (e.g. from a build system) and it still
+models xUnit style test execution. It also fixes many of the issues with
+Version 1 - Version 2 can be multiplexed without excessive buffering (in
+time or space), it has a well defined recovery mechanism for dealing with
+corrupted streams (e.g. where two processes write to the same stream
+concurrently, or where the stream generator suffers a bug).
+
+More details on both protocol version s can be found in the 'Protocol' section
+of this document.
+
+Subunit comes with command line filters to process a subunit stream and
+language bindings for python, C, C++ and shell. Bindings are easy to write
+for other languages.
+
+A number of useful things can be done easily with subunit:
+ * Test aggregation: Tests run separately can be combined and then
+ reported/displayed together. For instance, tests from different languages
+ can be shown as a seamless whole, and tests running on multiple machines
+ can be aggregated into a single stream through a multiplexer.
+ * Test archiving: A test run may be recorded and replayed later.
+ * Test isolation: Tests that may crash or otherwise interact badly with each
+ other can be run seperately and then aggregated, rather than interfering
+ with each other or requiring an adhoc test->runner reporting protocol.
+ * Grid testing: subunit can act as the necessary serialisation and
+ deserialiation to get test runs on distributed machines to be reported in
+ real time.
+
+Subunit supplies the following filters:
+ * tap2subunit - convert perl's TestAnythingProtocol to subunit.
+ * subunit2csv - convert a subunit stream to csv.
+ * subunit2pyunit - convert a subunit stream to pyunit test results.
+ * subunit2gtk - show a subunit stream in GTK.
+ * subunit2junitxml - convert a subunit stream to JUnit's XML format.
+ * subunit-diff - compare two subunit streams.
+ * subunit-filter - filter out tests from a subunit stream.
+ * subunit-ls - list info about tests present in a subunit stream.
+ * subunit-stats - generate a summary of a subunit stream.
+ * subunit-tags - add or remove tags from a stream.
+
+Integration with other tools
+----------------------------
+
+Subunit's language bindings act as integration with various test runners like
+'check', 'cppunit', Python's 'unittest'. Beyond that a small amount of glue
+(typically a few lines) will allow Subunit to be used in more sophisticated
+ways.
+
+Python
+======
+
+Subunit has excellent Python support: most of the filters and tools are written
+in python and there are facilities for using Subunit to increase test isolation
+seamlessly within a test suite.
+
+The most common way is to run an existing python test suite and have it output
+subunit via the ``subunit.run`` module::
+
+ $ python -m subunit.run mypackage.tests.test_suite
+
+For more information on the Python support Subunit offers , please see
+``pydoc subunit``, or the source in ``python/subunit/``
+
+C
+=
+
+Subunit has C bindings to emit the protocol. The 'check' C unit testing project
+has included subunit support in their project for some years now. See
+'c/README' for more details.
+
+C++
+===
+
+The C library is includable and usable directly from C++. A TestListener for
+CPPUnit is included in the Subunit distribution. See 'c++/README' for details.
+
+shell
+=====
+
+There are two sets of shell tools. There are filters, which accept a subunit
+stream on stdin and output processed data (or a transformed stream) on stdout.
+
+Then there are unittest facilities similar to those for C : shell bindings
+consisting of simple functions to output protocol elements, and a patch for
+adding subunit output to the 'ShUnit' shell test runner. See 'shell/README' for
+details.
+
+Filter recipes
+--------------
+
+To ignore some failing tests whose root cause is already known::
+
+ subunit-filter --without 'AttributeError.*flavor'
+
+
+The xUnit test model
+--------------------
+
+Subunit implements a slightly modified xUnit test model. The stock standard
+model is that there are tests, which have an id(), can be run, and when run
+start, emit an outcome (like success or failure) and then finish.
+
+Subunit extends this with the idea of test enumeration (find out about tests
+a runner has without running them), tags (allow users to describe tests in
+ways the test framework doesn't apply any semantic value to), file attachments
+(allow arbitrary data to make analysing a failure easy) and timestamps.
+
+The protocol
+------------
+
+Version 2, or v2 is new and still under development, but is intended to
+supercede version 1 in the very near future. Subunit's bundled tools accept
+only version 2 and only emit version 2, but the new filters subunit-1to2 and
+subunit-2to1 can be used to interoperate with older third party libraries.
+
+Version 2
+=========
+
+Version 2 is a binary protocol consisting of independent packets that can be
+embedded in the output from tools like make - as long as each packet has no
+other bytes mixed in with it (which 'make -j N>1' has a tendency of doing).
+Version 2 is currently in draft form, and early adopters should be willing
+to either discard stored results (if protocol changes are made), or bulk
+convert them back to v1 and then to a newer edition of v2.
+
+The protocol synchronises at the start of the stream, after a packet, or
+after any 0x0A byte. That is, a subunit v2 packet starts after a newline or
+directly after the end of the prior packet.
+
+Subunit is intended to be transported over a reliable streaming protocol such
+as TCP. As such it does not concern itself with out of order delivery of
+packets. However, because of the possibility of corruption due to either
+bugs in the sender, or due to mixed up data from concurrent writes to the same
+fd when being embedded, subunit strives to recover reasonably gracefully from
+damaged data.
+
+A key design goal for Subunit version 2 is to allow processing and multiplexing
+without forcing buffering for semantic correctness, as buffering tends to hide
+hung or otherwise misbehaving tests. That said, limited time based buffering
+for network efficiency is a good idea - this is ultimately implementator
+choice. Line buffering is also discouraged for subunit streams, as dropping
+into a debugger or other tool may require interactive traffic even if line
+buffering would not otherwise be a problem.
+
+In version two there are two conceptual events - a test status event and a file
+attachment event. Events may have timestamps, and the path of multiplexers that
+an event is routed through is recorded to permit sending actions back to the
+source (such as new tests to run or stdin for driving debuggers and other
+interactive input). Test status events are used to enumerate tests, to report
+tests and test helpers as they run. Tests may have tags, used to allow
+tunnelling extra meanings through subunit without requiring parsing of
+arbitrary file attachments. Things that are not standalone tests get marked
+as such by setting the 'Runnable' flag to false. (For instance, individual
+assertions in TAP are not runnable tests, only the top level TAP test script
+is runnable).
+
+File attachments are used to provide rich detail about the nature of a failure.
+File attachments can also be used to encapsulate stdout and stderr both during
+and outside tests.
+
+Most numbers are stored in network byte order - Most Significant Byte first
+encoded using a variation of http://www.dlugosz.com/ZIP2/VLI.html. The first
+byte's top 2 high order bits encode the total number of octets in the number.
+This encoding can encode values from 0 to 2**30-1, enough to encode a
+nanosecond. Numbers that are not variable length encoded are still stored in
+MSB order.
+
+ prefix octets max max
++-------+--------+---------+------------+
+| 00 | 1 | 2**6-1 | 63 |
+| 01 | 2 | 2**14-1 | 16383 |
+| 10 | 3 | 2**22-1 | 4194303 |
+| 11 | 4 | 2**30-1 | 1073741823 |
++-------+--------+---------+------------+
+
+All variable length elements of the packet are stored with a length prefix
+number allowing them to be skipped over for consumers that don't need to
+interpret them.
+
+UTF-8 strings are with no terminating NUL and should not have any embedded NULs
+(implementations SHOULD validate any such strings that they process and take
+some remedial action (such as discarding the packet as corrupt).
+
+In short the structure of a packet is:
+PACKET := SIGNATURE FLAGS PACKET_LENGTH TIMESTAMP? TESTID? TAGS? MIME?
+ FILECONTENT? ROUTING_CODE? CRC32
+
+In more detail...
+
+Packets are identified by a single byte signature - 0xB3, which is never legal
+in a UTF-8 stream as the first byte of a character. 0xB3 starts with the first
+bit set and the second not, which is the UTF-8 signature for a continuation
+byte. 0xB3 was chosen as 0x73 ('s' in ASCII') with the top two bits replaced by
+the 1 and 0 for a continuation byte.
+
+If subunit packets are being embedded in a non-UTF-8 text stream, where 0x73 is
+a legal character, consider either recoding the text to UTF-8, or using
+subunit's 'file' packets to embed the text stream in subunit, rather than the
+other way around.
+
+Following the signature byte comes a 16-bit flags field, which includes a
+4-bit version field - if the version is not 0x2 then the packet cannot be
+read. It is recommended to signal an error at this point (e.g. by emitting
+a synthetic error packet and returning to the top level loop to look for
+new packets, or exiting with an error). If recovery is desired, treat the
+packet signature as an opaque byte and scan for a new synchronisation point.
+NB: Subunit V1 and V2 packets may legitimately included 0xB3 internally,
+as they are an 8-bit safe container format, so recovery from this situation
+may involve an arbitrary number of false positives until an actual packet
+is encountered : and even then it may still be false, failing after passing
+the version check due to coincidence.
+
+Flags are stored in network byte order too.
++-------------------------+------------------------+
+| High byte | Low byte |
+| 15 14 13 12 11 10 9 8 | 7 6 5 4 3 2 1 0 |
+| VERSION |feature bits| |
++------------+------------+------------------------+
+
+Valid version values are:
+0x2 - version 2
+
+Feature bits:
+Bit 11 - mask 0x0800 - Test id present.
+Bit 10 - mask 0x0400 - Routing code present.
+Bit 9 - mask 0x0200 - Timestamp present.
+Bit 8 - mask 0x0100 - Test is 'runnable'.
+Bit 7 - mask 0x0080 - Tags are present.
+Bit 6 - mask 0x0040 - File content is present.
+Bit 5 - mask 0x0020 - File MIME type is present.
+Bit 4 - mask 0x0010 - EOF marker.
+Bit 3 - mask 0x0008 - Must be zero in version 2.
+
+Test status gets three bits:
+Bit 2 | Bit 1 | Bit 0 - mask 0x0007 - A test status enum lookup:
+000 - undefined / no test
+001 - Enumeration / existence
+002 - In progress
+003 - Success
+004 - Unexpected Success
+005 - Skipped
+006 - Failed
+007 - Expected failure
+
+After the flags field is a number field giving the length in bytes for the
+entire packet including the signature and the checksum. This length must
+be less than 4MiB - 4194303 bytes. The encoding can obviously record a larger
+number but one of the goals is to avoid requiring large buffers, or causing
+large latency in the packet forward/processing pipeline. Larger file
+attachments can be communicated in multiple packets, and the overhead in such a
+4MiB packet is approximately 0.2%.
+
+The rest of the packet is a series of optional features as specified by the set
+feature bits in the flags field. When absent they are entirely absent.
+
+Forwarding and multiplexing of packets can be done without interpreting the
+remainder of the packet until the routing code and checksum (which are both at
+the end of the packet). Additionally, routers can often avoid copying or moving
+the bulk of the packet, as long as the routing code size increase doesn't force
+the length encoding to take up a new byte (which will only happen to packets
+less than or equal to 16KiB in length) - large packets are very efficient to
+route.
+
+Timestamp when present is a 32 bit unsigned integer for secnods, and a variable
+length number for nanoseconds, representing UTC time since Unix Epoch in
+seconds and nanoseconds.
+
+Test id when present is a UTF-8 string. The test id should uniquely identify
+runnable tests such that they can be selected individually. For tests and other
+actions which cannot be individually run (such as test
+fixtures/layers/subtests) uniqueness is not required (though being human
+meaningful is highly recommended).
+
+Tags when present is a length prefixed vector of UTF-8 strings, one per tag.
+There are no restrictions on tag content (other than the restrictions on UTF-8
+strings in subunit in general). Tags have no ordering.
+
+When a MIME type is present, it defines the MIME type for the file across all
+packets same file (routing code + testid + name uniquely identifies a file,
+reset when EOF is flagged). If a file never has a MIME type set, it should be
+treated as application/octet-stream.
+
+File content when present is a UTF-8 string for the name followed by the length
+in bytes of the content, and then the content octets.
+
+If present routing code is a UTF-8 string. The routing code is used to
+determine which test backend a test was running on when doing data analysis,
+and to route stdin to the test process if interaction is required.
+
+Multiplexers SHOULD add a routing code if none is present, and prefix any
+existing routing code with a routing code ('/' separated) if one is already
+present. For example, a multiplexer might label each stream it is multiplexing
+with a simple ordinal ('0', '1' etc), and given an incoming packet with route
+code '3' from stream '0' would adjust the route code when forwarding the packet
+to be '0/3'.
+
+Following the end of the packet is a CRC-32 checksum of the contents of the
+packet including the signature.
+
+Example packets
+~~~~~~~~~~~~~~~
+
+Trivial test "foo" enumeration packet, with test id, runnable set,
+status=enumeration. Spaces below are to visually break up signature / flags /
+length / testid / crc32
+
+b3 2901 0c 03666f6f 08555f1b
+
+
+Version 1 (and 1.1)
+===================
+
+Version 1 (and 1.1) are mostly human readable protocols.
+
+Sample subunit wire contents
+----------------------------
+
+The following::
+ test: test foo works
+ success: test foo works.
+ test: tar a file.
+ failure: tar a file. [
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+ ]
+ a writeln to stdout
+
+When run through subunit2pyunit::
+ .F
+ a writeln to stdout
+
+ ========================
+ FAILURE: tar a file.
+ -------------------
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+
+
+Subunit protocol description
+============================
+
+This description is being ported to an EBNF style. Currently its only partly in
+that style, but should be fairly clear all the same. When in doubt, refer the
+source (and ideally help fix up the description!). Generally the protocol is
+line orientated and consists of either directives and their parameters, or
+when outside a DETAILS region unexpected lines which are not interpreted by
+the parser - they should be forwarded unaltered.
+
+test|testing|test:|testing: test LABEL
+success|success:|successful|successful: test LABEL
+success|success:|successful|successful: test LABEL DETAILS
+failure: test LABEL
+failure: test LABEL DETAILS
+error: test LABEL
+error: test LABEL DETAILS
+skip[:] test LABEL
+skip[:] test LABEL DETAILS
+xfail[:] test LABEL
+xfail[:] test LABEL DETAILS
+uxsuccess[:] test LABEL
+uxsuccess[:] test LABEL DETAILS
+progress: [+|-]X
+progress: push
+progress: pop
+tags: [-]TAG ...
+time: YYYY-MM-DD HH:MM:SSZ
+
+LABEL: UTF8*
+NAME: UTF8*
+DETAILS ::= BRACKETED | MULTIPART
+BRACKETED ::= '[' CR UTF8-lines ']' CR
+MULTIPART ::= '[ multipart' CR PART* ']' CR
+PART ::= PART_TYPE CR NAME CR PART_BYTES CR
+PART_TYPE ::= Content-Type: type/sub-type(;parameter=value,parameter=value)
+PART_BYTES ::= (DIGITS CR LF BYTE{DIGITS})* '0' CR LF
+
+unexpected output on stdout -> stdout.
+exit w/0 or last test completing -> error
+
+Tags given outside a test are applied to all following tests
+Tags given after a test: line and before the result line for the same test
+apply only to that test, and inherit the current global tags.
+A '-' before a tag is used to remove tags - e.g. to prevent a global tag
+applying to a single test, or to cancel a global tag.
+
+The progress directive is used to provide progress information about a stream
+so that stream consumer can provide completion estimates, progress bars and so
+on. Stream generators that know how many tests will be present in the stream
+should output "progress: COUNT". Stream filters that add tests should output
+"progress: +COUNT", and those that remove tests should output
+"progress: -COUNT". An absolute count should reset the progress indicators in
+use - it indicates that two separate streams from different generators have
+been trivially concatenated together, and there is no knowledge of how many
+more complete streams are incoming. Smart concatenation could scan each stream
+for their count and sum them, or alternatively translate absolute counts into
+relative counts inline. It is recommended that outputters avoid absolute counts
+unless necessary. The push and pop directives are used to provide local regions
+for progress reporting. This fits with hierarchically operating test
+environments - such as those that organise tests into suites - the top-most
+runner can report on the number of suites, and each suite surround its output
+with a (push, pop) pair. Interpreters should interpret a pop as also advancing
+the progress of the restored level by one step. Encountering progress
+directives between the start and end of a test pair indicates that a previous
+test was interrupted and did not cleanly terminate: it should be implicitly
+closed with an error (the same as when a stream ends with no closing test
+directive for the most recently started test).
+
+The time directive acts as a clock event - it sets the time for all future
+events. The value should be a valid ISO8601 time.
+
+The skip, xfail and uxsuccess outcomes are not supported by all testing
+environments. In Python the testttools (https://launchpad.net/testtools)
+library is used to translate these automatically if an older Python version
+that does not support them is in use. See the testtools documentation for the
+translation policy.
+
+skip is used to indicate a test was discovered but not executed. xfail is used
+to indicate a test that errored in some expected fashion (also know as "TODO"
+tests in some frameworks). uxsuccess is used to indicate and unexpected success
+where a test though to be failing actually passes. It is complementary to
+xfail.
+
+Hacking on subunit
+------------------
+
+Releases
+========
+
+* Update versions in configure.ac and python/subunit/__init__.py.
+* Make PyPI and regular tarball releases. Upload the regular one to LP, the
+ PyPI one to PyPI.
+* Push a tagged commit.
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-1to2 b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-1to2
new file mode 100755
index 00000000000..9725820cfc5
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-1to2
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2013 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Convert a version 1 subunit stream to version 2 stream."""
+
+from optparse import OptionParser
+import sys
+
+from testtools import ExtendedToStreamDecorator
+
+from subunit import StreamResultToBytes
+from subunit.filters import find_stream, run_tests_from_stream
+
+
+def make_options(description):
+ parser = OptionParser(description=__doc__)
+ return parser
+
+
+def main():
+ parser = make_options(__doc__)
+ (options, args) = parser.parse_args()
+ run_tests_from_stream(find_stream(sys.stdin, args),
+ ExtendedToStreamDecorator(StreamResultToBytes(sys.stdout)))
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-2to1 b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-2to1
new file mode 100755
index 00000000000..0072307f0fb
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-2to1
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2013 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Convert a version 2 subunit stream to a version 1 stream."""
+
+from optparse import OptionParser
+import sys
+
+from testtools import StreamToExtendedDecorator
+
+from subunit import ByteStreamToStreamResult, TestProtocolClient
+from subunit.filters import find_stream, run_tests_from_stream
+
+
+def make_options(description):
+ parser = OptionParser(description=__doc__)
+ return parser
+
+
+def main():
+ parser = make_options(__doc__)
+ (options, args) = parser.parse_args()
+ case = ByteStreamToStreamResult(
+ find_stream(sys.stdin, args), non_subunit_name='stdout')
+ result = StreamToExtendedDecorator(TestProtocolClient(sys.stdout))
+ # What about stdout chunks?
+ result.startTestRun()
+ case.run(result)
+ result.stopTestRun()
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-filter b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-filter
new file mode 100755
index 00000000000..e9e2bb06325
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-filter
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 200-2013 Robert Collins <robertc@robertcollins.net>
+# (C) 2009 Martin Pool
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to include/exclude tests.
+
+The default is to strip successful tests.
+
+Tests can be filtered by Python regular expressions with --with and --without,
+which match both the test name and the error text (if any). The result
+contains tests which match any of the --with expressions and none of the
+--without expressions. For case-insensitive matching prepend '(?i)'.
+Remember to quote shell metacharacters.
+"""
+
+from optparse import OptionParser
+import sys
+import re
+
+from testtools import ExtendedToStreamDecorator, StreamToExtendedDecorator
+
+from subunit import (
+ DiscardStream,
+ ProtocolTestCase,
+ StreamResultToBytes,
+ read_test_list,
+ )
+from subunit.filters import filter_by_result, find_stream
+from subunit.test_results import (
+ and_predicates,
+ make_tag_filter,
+ TestResultFilter,
+ )
+
+
+def make_options(description):
+ parser = OptionParser(description=__doc__)
+ parser.add_option("--error", action="store_false",
+ help="include errors", default=False, dest="error")
+ parser.add_option("-e", "--no-error", action="store_true",
+ help="exclude errors", dest="error")
+ parser.add_option("--failure", action="store_false",
+ help="include failures", default=False, dest="failure")
+ parser.add_option("-f", "--no-failure", action="store_true",
+ help="exclude failures", dest="failure")
+ parser.add_option("--passthrough", action="store_false",
+ help="Forward non-subunit input as 'stdout'.", default=False,
+ dest="no_passthrough")
+ parser.add_option("--no-passthrough", action="store_true",
+ help="Discard all non subunit input.", default=False,
+ dest="no_passthrough")
+ parser.add_option("-s", "--success", action="store_false",
+ help="include successes", dest="success")
+ parser.add_option("--no-success", action="store_true",
+ help="exclude successes", default=True, dest="success")
+ parser.add_option("--no-skip", action="store_true",
+ help="exclude skips", dest="skip")
+ parser.add_option("--xfail", action="store_false",
+ help="include expected falures", default=True, dest="xfail")
+ parser.add_option("--no-xfail", action="store_true",
+ help="exclude expected falures", default=True, dest="xfail")
+ parser.add_option(
+ "--with-tag", type=str,
+ help="include tests with these tags", action="append", dest="with_tags")
+ parser.add_option(
+ "--without-tag", type=str,
+ help="exclude tests with these tags", action="append", dest="without_tags")
+ parser.add_option("-m", "--with", type=str,
+ help="regexp to include (case-sensitive by default)",
+ action="append", dest="with_regexps")
+ parser.add_option("--fixup-expected-failures", type=str,
+ help="File with list of test ids that are expected to fail; on failure "
+ "their result will be changed to xfail; on success they will be "
+ "changed to error.", dest="fixup_expected_failures", action="append")
+ parser.add_option("--without", type=str,
+ help="regexp to exclude (case-sensitive by default)",
+ action="append", dest="without_regexps")
+ parser.add_option("-F", "--only-genuine-failures", action="callback",
+ callback=only_genuine_failures_callback,
+ help="Only pass through failures and exceptions.")
+ return parser
+
+
+def only_genuine_failures_callback(option, opt, value, parser):
+ parser.rargs.insert(0, '--no-passthrough')
+ parser.rargs.insert(0, '--no-xfail')
+ parser.rargs.insert(0, '--no-skip')
+ parser.rargs.insert(0, '--no-success')
+
+
+def _compile_re_from_list(l):
+ return re.compile("|".join(l), re.MULTILINE)
+
+
+def _make_regexp_filter(with_regexps, without_regexps):
+ """Make a callback that checks tests against regexps.
+
+ with_regexps and without_regexps are each either a list of regexp strings,
+ or None.
+ """
+ with_re = with_regexps and _compile_re_from_list(with_regexps)
+ without_re = without_regexps and _compile_re_from_list(without_regexps)
+
+ def check_regexps(test, outcome, err, details, tags):
+ """Check if this test and error match the regexp filters."""
+ test_str = str(test) + outcome + str(err) + str(details)
+ if with_re and not with_re.search(test_str):
+ return False
+ if without_re and without_re.search(test_str):
+ return False
+ return True
+ return check_regexps
+
+
+def _make_result(output, options, predicate):
+ """Make the result that we'll send the test outcomes to."""
+ fixup_expected_failures = set()
+ for path in options.fixup_expected_failures or ():
+ fixup_expected_failures.update(read_test_list(path))
+ return StreamToExtendedDecorator(TestResultFilter(
+ ExtendedToStreamDecorator(
+ StreamResultToBytes(output)),
+ filter_error=options.error,
+ filter_failure=options.failure,
+ filter_success=options.success,
+ filter_skip=options.skip,
+ filter_xfail=options.xfail,
+ filter_predicate=predicate,
+ fixup_expected_failures=fixup_expected_failures))
+
+
+def main():
+ parser = make_options(__doc__)
+ (options, args) = parser.parse_args()
+
+ regexp_filter = _make_regexp_filter(
+ options.with_regexps, options.without_regexps)
+ tag_filter = make_tag_filter(options.with_tags, options.without_tags)
+ filter_predicate = and_predicates([regexp_filter, tag_filter])
+
+ filter_by_result(
+ lambda output_to: _make_result(sys.stdout, options, filter_predicate),
+ output_path=None,
+ passthrough=(not options.no_passthrough),
+ forward=False,
+ protocol_version=2,
+ input_stream=find_stream(sys.stdin, args))
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-ls b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-ls
new file mode 100755
index 00000000000..8c6a1e7e8f5
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-ls
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2008 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""List tests in a subunit stream."""
+
+from optparse import OptionParser
+import sys
+
+from testtools import (
+ CopyStreamResult, StreamToExtendedDecorator, StreamResultRouter,
+ StreamSummary)
+
+from subunit import ByteStreamToStreamResult
+from subunit.filters import find_stream, run_tests_from_stream
+from subunit.test_results import (
+ CatFiles,
+ TestIdPrintingResult,
+ )
+
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--times", action="store_true",
+ help="list the time each test took (requires a timestamped stream)",
+ default=False)
+parser.add_option("--exists", action="store_true",
+ help="list tests that are reported as existing (as well as ran)",
+ default=False)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+(options, args) = parser.parse_args()
+test = ByteStreamToStreamResult(
+ find_stream(sys.stdin, args), non_subunit_name="stdout")
+result = TestIdPrintingResult(sys.stdout, options.times, options.exists)
+if not options.no_passthrough:
+ result = StreamResultRouter(result)
+ cat = CatFiles(sys.stdout)
+ result.add_rule(cat, 'test_id', test_id=None)
+summary = StreamSummary()
+result = CopyStreamResult([result, summary])
+result.startTestRun()
+test.run(result)
+result.stopTestRun()
+if summary.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-notify b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-notify
new file mode 100755
index 00000000000..bc833da779d
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-notify
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2010 Jelmer Vernooij <jelmer@samba.org>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Notify the user of a finished test run."""
+
+import pygtk
+pygtk.require('2.0')
+import pynotify
+from testtools import StreamToExtendedDecorator
+
+from subunit import TestResultStats
+from subunit.filters import run_filter_script
+
+if not pynotify.init("Subunit-notify"):
+ sys.exit(1)
+
+
+def notify_of_result(result):
+ result = result.decorated
+ if result.failed_tests > 0:
+ summary = "Test run failed"
+ else:
+ summary = "Test run successful"
+ body = "Total tests: %d; Passed: %d; Failed: %d" % (
+ result.total_tests,
+ result.passed_tests,
+ result.failed_tests,
+ )
+ nw = pynotify.Notification(summary, body)
+ nw.show()
+
+
+run_filter_script(
+ lambda output:StreamToExtendedDecorator(TestResultStats(output)),
+ __doc__, notify_of_result, protocol_version=2)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-stats b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-stats
new file mode 100755
index 00000000000..79733b06226
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-stats
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to get aggregate statistics."""
+
+import sys
+
+from testtools import StreamToExtendedDecorator
+
+from subunit import TestResultStats
+from subunit.filters import run_filter_script
+
+
+result = TestResultStats(sys.stdout)
+def show_stats(r):
+ r.decorated.formatStats()
+run_filter_script(
+ lambda output:StreamToExtendedDecorator(result),
+ __doc__, show_stats, protocol_version=2, passthrough_subunit=False)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-tags b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-tags
new file mode 100755
index 00000000000..10224924eac
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit-tags
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""A filter to change tags on a subunit stream.
+
+subunit-tags foo -> adds foo
+subunit-tags foo -bar -> adds foo and removes bar
+"""
+
+import sys
+
+from subunit import tag_stream
+
+sys.exit(tag_stream(sys.stdin, sys.stdout, sys.argv[1:]))
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2gtk b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2gtk
new file mode 100755
index 00000000000..78b43097ec9
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2gtk
@@ -0,0 +1,240 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+### The GTK progress bar __init__ function is derived from the pygtk tutorial:
+# The PyGTK Tutorial is Copyright (C) 2001-2005 John Finlay.
+#
+# The GTK Tutorial is Copyright (C) 1997 Ian Main.
+#
+# Copyright (C) 1998-1999 Tony Gale.
+#
+# Permission is granted to make and distribute verbatim copies of this manual
+# provided the copyright notice and this permission notice are preserved on all
+# copies.
+#
+# Permission is granted to copy and distribute modified versions of this
+# document under the conditions for verbatim copying, provided that this
+# copyright notice is included exactly as in the original, and that the entire
+# resulting derived work is distributed under the terms of a permission notice
+# identical to this one.
+#
+# Permission is granted to copy and distribute translations of this document
+# into another language, under the above conditions for modified versions.
+#
+# If you are intending to incorporate this document into a published work,
+# please contact the maintainer, and we will make an effort to ensure that you
+# have the most up to date information available.
+#
+# There is no guarantee that this document lives up to its intended purpose.
+# This is simply provided as a free resource. As such, the authors and
+# maintainers of the information provided within can not make any guarantee
+# that the information is even accurate.
+
+"""Display a subunit stream in a gtk progress window."""
+
+import sys
+import threading
+import unittest
+
+import pygtk
+pygtk.require('2.0')
+import gtk, gtk.gdk, gobject
+
+from testtools import StreamToExtendedDecorator
+
+from subunit import (
+ PROGRESS_POP,
+ PROGRESS_PUSH,
+ PROGRESS_SET,
+ ByteStreamToStreamResult,
+ )
+from subunit.progress_model import ProgressModel
+
+
+class GTKTestResult(unittest.TestResult):
+
+ def __init__(self):
+ super(GTKTestResult, self).__init__()
+ # Instance variables (in addition to TestResult)
+ self.window = None
+ self.run_label = None
+ self.ok_label = None
+ self.not_ok_label = None
+ self.total_tests = None
+
+ self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
+ self.window.set_resizable(True)
+
+ self.window.connect("destroy", gtk.main_quit)
+ self.window.set_title("Tests...")
+ self.window.set_border_width(0)
+
+ vbox = gtk.VBox(False, 5)
+ vbox.set_border_width(10)
+ self.window.add(vbox)
+ vbox.show()
+
+ # Create a centering alignment object
+ align = gtk.Alignment(0.5, 0.5, 0, 0)
+ vbox.pack_start(align, False, False, 5)
+ align.show()
+
+ # Create the ProgressBar
+ self.pbar = gtk.ProgressBar()
+ align.add(self.pbar)
+ self.pbar.set_text("Running")
+ self.pbar.show()
+ self.progress_model = ProgressModel()
+
+ separator = gtk.HSeparator()
+ vbox.pack_start(separator, False, False, 0)
+ separator.show()
+
+ # rows, columns, homogeneous
+ table = gtk.Table(2, 3, False)
+ vbox.pack_start(table, False, True, 0)
+ table.show()
+ # Show summary details about the run. Could use an expander.
+ label = gtk.Label("Run:")
+ table.attach(label, 0, 1, 1, 2, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.run_label = gtk.Label("N/A")
+ table.attach(self.run_label, 1, 2, 1, 2, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.run_label.show()
+
+ label = gtk.Label("OK:")
+ table.attach(label, 0, 1, 2, 3, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.ok_label = gtk.Label("N/A")
+ table.attach(self.ok_label, 1, 2, 2, 3, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.ok_label.show()
+
+ label = gtk.Label("Not OK:")
+ table.attach(label, 0, 1, 3, 4, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.not_ok_label = gtk.Label("N/A")
+ table.attach(self.not_ok_label, 1, 2, 3, 4, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.not_ok_label.show()
+
+ self.window.show()
+ # For the demo.
+ self.window.set_keep_above(True)
+ self.window.present()
+
+ def stopTest(self, test):
+ super(GTKTestResult, self).stopTest(test)
+ gobject.idle_add(self._stopTest)
+
+ def _stopTest(self):
+ self.progress_model.advance()
+ if self.progress_model.width() == 0:
+ self.pbar.pulse()
+ else:
+ pos = self.progress_model.pos()
+ width = self.progress_model.width()
+ percentage = (pos / float(width))
+ self.pbar.set_fraction(percentage)
+
+ def stopTestRun(self):
+ try:
+ super(GTKTestResult, self).stopTestRun()
+ except AttributeError:
+ pass
+ gobject.idle_add(self.pbar.set_text, 'Finished')
+
+ def addError(self, test, err):
+ super(GTKTestResult, self).addError(test, err)
+ gobject.idle_add(self.update_counts)
+
+ def addFailure(self, test, err):
+ super(GTKTestResult, self).addFailure(test, err)
+ gobject.idle_add(self.update_counts)
+
+ def addSuccess(self, test):
+ super(GTKTestResult, self).addSuccess(test)
+ gobject.idle_add(self.update_counts)
+
+ def addSkip(self, test, reason):
+ # addSkip is new in Python 2.7/3.1
+ addSkip = getattr(super(GTKTestResult, self), 'addSkip', None)
+ if callable(addSkip):
+ addSkip(test, reason)
+ gobject.idle_add(self.update_counts)
+
+ def addExpectedFailure(self, test, err):
+ # addExpectedFailure is new in Python 2.7/3.1
+ addExpectedFailure = getattr(super(GTKTestResult, self),
+ 'addExpectedFailure', None)
+ if callable(addExpectedFailure):
+ addExpectedFailure(test, err)
+ gobject.idle_add(self.update_counts)
+
+ def addUnexpectedSuccess(self, test):
+ # addUnexpectedSuccess is new in Python 2.7/3.1
+ addUnexpectedSuccess = getattr(super(GTKTestResult, self),
+ 'addUnexpectedSuccess', None)
+ if callable(addUnexpectedSuccess):
+ addUnexpectedSuccess(test)
+ gobject.idle_add(self.update_counts)
+
+ def progress(self, offset, whence):
+ if whence == PROGRESS_PUSH:
+ self.progress_model.push()
+ elif whence == PROGRESS_POP:
+ self.progress_model.pop()
+ elif whence == PROGRESS_SET:
+ self.total_tests = offset
+ self.progress_model.set_width(offset)
+ else:
+ self.total_tests += offset
+ self.progress_model.adjust_width(offset)
+
+ def time(self, a_datetime):
+ # We don't try to estimate completion yet.
+ pass
+
+ def update_counts(self):
+ self.run_label.set_text(str(self.testsRun))
+ bad = len(self.failures + self.errors)
+ self.ok_label.set_text(str(self.testsRun - bad))
+ self.not_ok_label.set_text(str(bad))
+
+gobject.threads_init()
+result = StreamToExtendedDecorator(GTKTestResult())
+test = ByteStreamToStreamResult(sys.stdin, non_subunit_name='stdout')
+# Get setup
+while gtk.events_pending():
+ gtk.main_iteration()
+# Start IO
+def run_and_finish():
+ test.run(result)
+ result.stopTestRun()
+t = threading.Thread(target=run_and_finish)
+t.daemon = True
+result.startTestRun()
+t.start()
+gtk.main()
+if result.decorated.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2junitxml b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2junitxml
new file mode 100755
index 00000000000..8e827d53740
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2junitxml
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to get aggregate statistics."""
+
+
+import sys
+
+from testtools import StreamToExtendedDecorator
+
+from subunit.filters import run_filter_script
+
+try:
+ from junitxml import JUnitXmlResult
+except ImportError:
+ sys.stderr.write("python-junitxml (https://launchpad.net/pyjunitxml or "
+ "http://pypi.python.org/pypi/junitxml) is required for this filter.")
+ raise
+
+
+run_filter_script(
+ lambda output:StreamToExtendedDecorator(JUnitXmlResult(output)), __doc__,
+ protocol_version=2)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2pyunit b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2pyunit
new file mode 100755
index 00000000000..d10ceea6f09
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/subunit2pyunit
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Display a subunit stream through python's unittest test runner."""
+
+from operator import methodcaller
+from optparse import OptionParser
+import sys
+import unittest
+
+from testtools import StreamToExtendedDecorator, DecorateTestCaseResult, StreamResultRouter
+
+from subunit import ByteStreamToStreamResult
+from subunit.filters import find_stream
+from subunit.test_results import CatFiles
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+parser.add_option("--progress", action="store_true",
+ help="Use bzrlib's test reporter (requires bzrlib)",
+ default=False)
+(options, args) = parser.parse_args()
+test = ByteStreamToStreamResult(
+ find_stream(sys.stdin, args), non_subunit_name='stdout')
+def wrap_result(result):
+ result = StreamToExtendedDecorator(result)
+ if not options.no_passthrough:
+ result = StreamResultRouter(result)
+ result.add_rule(CatFiles(sys.stdout), 'test_id', test_id=None)
+ return result
+test = DecorateTestCaseResult(test, wrap_result,
+ before_run=methodcaller('startTestRun'),
+ after_run=methodcaller('stopTestRun'))
+if options.progress:
+ from bzrlib.tests import TextTestRunner
+ from bzrlib import ui
+ ui.ui_factory = ui.make_ui_for_terminal(None, sys.stdout, sys.stderr)
+ runner = TextTestRunner()
+else:
+ runner = unittest.TextTestRunner(verbosity=2)
+if runner.run(test).wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/tap2subunit b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/tap2subunit
new file mode 100755
index 00000000000..c571972225d
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/filters/tap2subunit
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""A filter that reads a TAP stream and outputs a subunit stream.
+
+More information on TAP is available at
+http://testanything.org/wiki/index.php/Main_Page.
+"""
+
+import sys
+
+from subunit import TAP2SubUnit
+sys.exit(TAP2SubUnit(sys.stdin, sys.stdout))
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/__init__.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/__init__.py
new file mode 100644
index 00000000000..8352585fb3a
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/__init__.py
@@ -0,0 +1,1320 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Subunit - a streaming test protocol
+
+Overview
+++++++++
+
+The ``subunit`` Python package provides a number of ``unittest`` extensions
+which can be used to cause tests to output Subunit, to parse Subunit streams
+into test activity, perform seamless test isolation within a regular test
+case and variously sort, filter and report on test runs.
+
+
+Key Classes
+-----------
+
+The ``subunit.TestProtocolClient`` class is a ``unittest.TestResult``
+extension which will translate a test run into a Subunit stream.
+
+The ``subunit.ProtocolTestCase`` class is an adapter between the Subunit wire
+protocol and the ``unittest.TestCase`` object protocol. It is used to translate
+a stream into a test run, which regular ``unittest.TestResult`` objects can
+process and report/inspect.
+
+Subunit has support for non-blocking usage too, for use with asyncore or
+Twisted. See the ``TestProtocolServer`` parser class for more details.
+
+Subunit includes extensions to the Python ``TestResult`` protocol. These are
+all done in a compatible manner: ``TestResult`` objects that do not implement
+the extension methods will not cause errors to be raised, instead the extension
+will either lose fidelity (for instance, folding expected failures to success
+in Python versions < 2.7 or 3.1), or discard the extended data (for extra
+details, tags, timestamping and progress markers).
+
+The test outcome methods ``addSuccess``, ``addError``, ``addExpectedFailure``,
+``addFailure``, ``addSkip`` take an optional keyword parameter ``details``
+which can be used instead of the usual python unittest parameter.
+When used the value of details should be a dict from ``string`` to
+``testtools.content.Content`` objects. This is a draft API being worked on with
+the Python Testing In Python mail list, with the goal of permitting a common
+way to provide additional data beyond a traceback, such as captured data from
+disk, logging messages etc. The reference for this API is in testtools (0.9.0
+and newer).
+
+The ``tags(new_tags, gone_tags)`` method is called (if present) to add or
+remove tags in the test run that is currently executing. If called when no
+test is in progress (that is, if called outside of the ``startTest``,
+``stopTest`` pair), the the tags apply to all subsequent tests. If called
+when a test is in progress, then the tags only apply to that test.
+
+The ``time(a_datetime)`` method is called (if present) when a ``time:``
+directive is encountered in a Subunit stream. This is used to tell a TestResult
+about the time that events in the stream occurred at, to allow reconstructing
+test timing from a stream.
+
+The ``progress(offset, whence)`` method controls progress data for a stream.
+The offset parameter is an int, and whence is one of subunit.PROGRESS_CUR,
+subunit.PROGRESS_SET, PROGRESS_PUSH, PROGRESS_POP. Push and pop operations
+ignore the offset parameter.
+
+
+Python test support
+-------------------
+
+``subunit.run`` is a convenience wrapper to run a Python test suite via
+the command line, reporting via Subunit::
+
+ $ python -m subunit.run mylib.tests.test_suite
+
+The ``IsolatedTestSuite`` class is a TestSuite that forks before running its
+tests, allowing isolation between the test runner and some tests.
+
+Similarly, ``IsolatedTestCase`` is a base class which can be subclassed to get
+tests that will fork() before that individual test is run.
+
+`ExecTestCase`` is a convenience wrapper for running an external
+program to get a Subunit stream and then report that back to an arbitrary
+result object::
+
+ class AggregateTests(subunit.ExecTestCase):
+
+ def test_script_one(self):
+ './bin/script_one'
+
+ def test_script_two(self):
+ './bin/script_two'
+
+ # Normally your normal test loading would take of this automatically,
+ # It is only spelt out in detail here for clarity.
+ suite = unittest.TestSuite([AggregateTests("test_script_one"),
+ AggregateTests("test_script_two")])
+ # Create any TestResult class you like.
+ result = unittest._TextTestResult(sys.stdout)
+ # And run your suite as normal, Subunit will exec each external script as
+ # needed and report to your result object.
+ suite.run(result)
+
+Utility modules
+---------------
+
+* subunit.chunked contains HTTP chunked encoding/decoding logic.
+* subunit.test_results contains TestResult helper classes.
+"""
+
+import os
+import re
+import subprocess
+import sys
+import unittest
+try:
+ from io import UnsupportedOperation as _UnsupportedOperation
+except ImportError:
+ _UnsupportedOperation = AttributeError
+
+from extras import safe_hasattr
+from testtools import content, content_type, ExtendedToOriginalDecorator
+from testtools.content import TracebackContent
+from testtools.compat import _b, _u, BytesIO, StringIO
+try:
+ from testtools.testresult.real import _StringException
+ RemoteException = _StringException
+except ImportError:
+ raise ImportError ("testtools.testresult.real does not contain "
+ "_StringException, check your version.")
+from testtools import testresult, CopyStreamResult
+
+from subunit import chunked, details, iso8601, test_results
+from subunit.v2 import ByteStreamToStreamResult, StreamResultToBytes
+
+# same format as sys.version_info: "A tuple containing the five components of
+# the version number: major, minor, micro, releaselevel, and serial. All
+# values except releaselevel are integers; the release level is 'alpha',
+# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
+# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
+# releaselevel of 'dev' for unreleased under-development code.
+#
+# If the releaselevel is 'alpha' then the major/minor/micro components are not
+# established at this point, and setup.py will use a version of next-$(revno).
+# If the releaselevel is 'final', then the tarball will be major.minor.micro.
+# Otherwise it is major.minor.micro~$(revno).
+
+__version__ = (0, 0, 16, 'final', 0)
+
+PROGRESS_SET = 0
+PROGRESS_CUR = 1
+PROGRESS_PUSH = 2
+PROGRESS_POP = 3
+
+
+def test_suite():
+ import subunit.tests
+ return subunit.tests.test_suite()
+
+
+def join_dir(base_path, path):
+ """
+ Returns an absolute path to C{path}, calculated relative to the parent
+ of C{base_path}.
+
+ @param base_path: A path to a file or directory.
+ @param path: An absolute path, or a path relative to the containing
+ directory of C{base_path}.
+
+ @return: An absolute path to C{path}.
+ """
+ return os.path.join(os.path.dirname(os.path.abspath(base_path)), path)
+
+
+def tags_to_new_gone(tags):
+ """Split a list of tags into a new_set and a gone_set."""
+ new_tags = set()
+ gone_tags = set()
+ for tag in tags:
+ if tag[0] == '-':
+ gone_tags.add(tag[1:])
+ else:
+ new_tags.add(tag)
+ return new_tags, gone_tags
+
+
+class DiscardStream(object):
+ """A filelike object which discards what is written to it."""
+
+ def fileno(self):
+ raise _UnsupportedOperation()
+
+ def write(self, bytes):
+ pass
+
+ def read(self, len=0):
+ return _b('')
+
+
+class _ParserState(object):
+ """State for the subunit parser."""
+
+ def __init__(self, parser):
+ self.parser = parser
+ self._test_sym = (_b('test'), _b('testing'))
+ self._colon_sym = _b(':')
+ self._error_sym = (_b('error'),)
+ self._failure_sym = (_b('failure'),)
+ self._progress_sym = (_b('progress'),)
+ self._skip_sym = _b('skip')
+ self._success_sym = (_b('success'), _b('successful'))
+ self._tags_sym = (_b('tags'),)
+ self._time_sym = (_b('time'),)
+ self._xfail_sym = (_b('xfail'),)
+ self._uxsuccess_sym = (_b('uxsuccess'),)
+ self._start_simple = _u(" [")
+ self._start_multipart = _u(" [ multipart")
+
+ def addError(self, offset, line):
+ """An 'error:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addExpectedFail(self, offset, line):
+ """An 'xfail:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addFailure(self, offset, line):
+ """A 'failure:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addSkip(self, offset, line):
+ """A 'skip:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addSuccess(self, offset, line):
+ """A 'success:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def lineReceived(self, line):
+ """a line has been received."""
+ parts = line.split(None, 1)
+ if len(parts) == 2 and line.startswith(parts[0]):
+ cmd, rest = parts
+ offset = len(cmd) + 1
+ cmd = cmd.rstrip(self._colon_sym)
+ if cmd in self._test_sym:
+ self.startTest(offset, line)
+ elif cmd in self._error_sym:
+ self.addError(offset, line)
+ elif cmd in self._failure_sym:
+ self.addFailure(offset, line)
+ elif cmd in self._progress_sym:
+ self.parser._handleProgress(offset, line)
+ elif cmd in self._skip_sym:
+ self.addSkip(offset, line)
+ elif cmd in self._success_sym:
+ self.addSuccess(offset, line)
+ elif cmd in self._tags_sym:
+ self.parser._handleTags(offset, line)
+ self.parser.subunitLineReceived(line)
+ elif cmd in self._time_sym:
+ self.parser._handleTime(offset, line)
+ self.parser.subunitLineReceived(line)
+ elif cmd in self._xfail_sym:
+ self.addExpectedFail(offset, line)
+ elif cmd in self._uxsuccess_sym:
+ self.addUnexpectedSuccess(offset, line)
+ else:
+ self.parser.stdOutLineReceived(line)
+ else:
+ self.parser.stdOutLineReceived(line)
+
+ def lostConnection(self):
+ """Connection lost."""
+ self.parser._lostConnectionInTest(_u('unknown state of '))
+
+ def startTest(self, offset, line):
+ """A test start command received."""
+ self.parser.stdOutLineReceived(line)
+
+
+class _InTest(_ParserState):
+ """State for the subunit parser after reading a test: directive."""
+
+ def _outcome(self, offset, line, no_details, details_state):
+ """An outcome directive has been read.
+
+ :param no_details: Callable to call when no details are presented.
+ :param details_state: The state to switch to for details
+ processing of this outcome.
+ """
+ test_name = line[offset:-1].decode('utf8')
+ if self.parser.current_test_description == test_name:
+ self.parser._state = self.parser._outside_test
+ self.parser.current_test_description = None
+ no_details()
+ self.parser.client.stopTest(self.parser._current_test)
+ self.parser._current_test = None
+ self.parser.subunitLineReceived(line)
+ elif self.parser.current_test_description + self._start_simple == \
+ test_name:
+ self.parser._state = details_state
+ details_state.set_simple()
+ self.parser.subunitLineReceived(line)
+ elif self.parser.current_test_description + self._start_multipart == \
+ test_name:
+ self.parser._state = details_state
+ details_state.set_multipart()
+ self.parser.subunitLineReceived(line)
+ else:
+ self.parser.stdOutLineReceived(line)
+
+ def _error(self):
+ self.parser.client.addError(self.parser._current_test,
+ details={})
+
+ def addError(self, offset, line):
+ """An 'error:' directive has been read."""
+ self._outcome(offset, line, self._error,
+ self.parser._reading_error_details)
+
+ def _xfail(self):
+ self.parser.client.addExpectedFailure(self.parser._current_test,
+ details={})
+
+ def addExpectedFail(self, offset, line):
+ """An 'xfail:' directive has been read."""
+ self._outcome(offset, line, self._xfail,
+ self.parser._reading_xfail_details)
+
+ def _uxsuccess(self):
+ self.parser.client.addUnexpectedSuccess(self.parser._current_test)
+
+ def addUnexpectedSuccess(self, offset, line):
+ """A 'uxsuccess:' directive has been read."""
+ self._outcome(offset, line, self._uxsuccess,
+ self.parser._reading_uxsuccess_details)
+
+ def _failure(self):
+ self.parser.client.addFailure(self.parser._current_test, details={})
+
+ def addFailure(self, offset, line):
+ """A 'failure:' directive has been read."""
+ self._outcome(offset, line, self._failure,
+ self.parser._reading_failure_details)
+
+ def _skip(self):
+ self.parser.client.addSkip(self.parser._current_test, details={})
+
+ def addSkip(self, offset, line):
+ """A 'skip:' directive has been read."""
+ self._outcome(offset, line, self._skip,
+ self.parser._reading_skip_details)
+
+ def _succeed(self):
+ self.parser.client.addSuccess(self.parser._current_test, details={})
+
+ def addSuccess(self, offset, line):
+ """A 'success:' directive has been read."""
+ self._outcome(offset, line, self._succeed,
+ self.parser._reading_success_details)
+
+ def lostConnection(self):
+ """Connection lost."""
+ self.parser._lostConnectionInTest(_u(''))
+
+
+class _OutSideTest(_ParserState):
+ """State for the subunit parser outside of a test context."""
+
+ def lostConnection(self):
+ """Connection lost."""
+
+ def startTest(self, offset, line):
+ """A test start command received."""
+ self.parser._state = self.parser._in_test
+ test_name = line[offset:-1].decode('utf8')
+ self.parser._current_test = RemotedTestCase(test_name)
+ self.parser.current_test_description = test_name
+ self.parser.client.startTest(self.parser._current_test)
+ self.parser.subunitLineReceived(line)
+
+
+class _ReadingDetails(_ParserState):
+ """Common logic for readin state details."""
+
+ def endDetails(self):
+ """The end of a details section has been reached."""
+ self.parser._state = self.parser._outside_test
+ self.parser.current_test_description = None
+ self._report_outcome()
+ self.parser.client.stopTest(self.parser._current_test)
+
+ def lineReceived(self, line):
+ """a line has been received."""
+ self.details_parser.lineReceived(line)
+ self.parser.subunitLineReceived(line)
+
+ def lostConnection(self):
+ """Connection lost."""
+ self.parser._lostConnectionInTest(_u('%s report of ') %
+ self._outcome_label())
+
+ def _outcome_label(self):
+ """The label to describe this outcome."""
+ raise NotImplementedError(self._outcome_label)
+
+ def set_simple(self):
+ """Start a simple details parser."""
+ self.details_parser = details.SimpleDetailsParser(self)
+
+ def set_multipart(self):
+ """Start a multipart details parser."""
+ self.details_parser = details.MultipartDetailsParser(self)
+
+
+class _ReadingFailureDetails(_ReadingDetails):
+ """State for the subunit parser when reading failure details."""
+
+ def _report_outcome(self):
+ self.parser.client.addFailure(self.parser._current_test,
+ details=self.details_parser.get_details())
+
+ def _outcome_label(self):
+ return "failure"
+
+
+class _ReadingErrorDetails(_ReadingDetails):
+ """State for the subunit parser when reading error details."""
+
+ def _report_outcome(self):
+ self.parser.client.addError(self.parser._current_test,
+ details=self.details_parser.get_details())
+
+ def _outcome_label(self):
+ return "error"
+
+
+class _ReadingExpectedFailureDetails(_ReadingDetails):
+ """State for the subunit parser when reading xfail details."""
+
+ def _report_outcome(self):
+ self.parser.client.addExpectedFailure(self.parser._current_test,
+ details=self.details_parser.get_details())
+
+ def _outcome_label(self):
+ return "xfail"
+
+
+class _ReadingUnexpectedSuccessDetails(_ReadingDetails):
+ """State for the subunit parser when reading uxsuccess details."""
+
+ def _report_outcome(self):
+ self.parser.client.addUnexpectedSuccess(self.parser._current_test,
+ details=self.details_parser.get_details())
+
+ def _outcome_label(self):
+ return "uxsuccess"
+
+
+class _ReadingSkipDetails(_ReadingDetails):
+ """State for the subunit parser when reading skip details."""
+
+ def _report_outcome(self):
+ self.parser.client.addSkip(self.parser._current_test,
+ details=self.details_parser.get_details("skip"))
+
+ def _outcome_label(self):
+ return "skip"
+
+
+class _ReadingSuccessDetails(_ReadingDetails):
+ """State for the subunit parser when reading success details."""
+
+ def _report_outcome(self):
+ self.parser.client.addSuccess(self.parser._current_test,
+ details=self.details_parser.get_details("success"))
+
+ def _outcome_label(self):
+ return "success"
+
+
+class TestProtocolServer(object):
+ """A parser for subunit.
+
+ :ivar tags: The current tags associated with the protocol stream.
+ """
+
+ def __init__(self, client, stream=None, forward_stream=None):
+ """Create a TestProtocolServer instance.
+
+ :param client: An object meeting the unittest.TestResult protocol.
+ :param stream: The stream that lines received which are not part of the
+ subunit protocol should be written to. This allows custom handling
+ of mixed protocols. By default, sys.stdout will be used for
+ convenience. It should accept bytes to its write() method.
+ :param forward_stream: A stream to forward subunit lines to. This
+ allows a filter to forward the entire stream while still parsing
+ and acting on it. By default forward_stream is set to
+ DiscardStream() and no forwarding happens.
+ """
+ self.client = ExtendedToOriginalDecorator(client)
+ if stream is None:
+ stream = sys.stdout
+ if sys.version_info > (3, 0):
+ stream = stream.buffer
+ self._stream = stream
+ self._forward_stream = forward_stream or DiscardStream()
+ # state objects we can switch too
+ self._in_test = _InTest(self)
+ self._outside_test = _OutSideTest(self)
+ self._reading_error_details = _ReadingErrorDetails(self)
+ self._reading_failure_details = _ReadingFailureDetails(self)
+ self._reading_skip_details = _ReadingSkipDetails(self)
+ self._reading_success_details = _ReadingSuccessDetails(self)
+ self._reading_xfail_details = _ReadingExpectedFailureDetails(self)
+ self._reading_uxsuccess_details = _ReadingUnexpectedSuccessDetails(self)
+ # start with outside test.
+ self._state = self._outside_test
+ # Avoid casts on every call
+ self._plusminus = _b('+-')
+ self._push_sym = _b('push')
+ self._pop_sym = _b('pop')
+
+ def _handleProgress(self, offset, line):
+ """Process a progress directive."""
+ line = line[offset:].strip()
+ if line[0] in self._plusminus:
+ whence = PROGRESS_CUR
+ delta = int(line)
+ elif line == self._push_sym:
+ whence = PROGRESS_PUSH
+ delta = None
+ elif line == self._pop_sym:
+ whence = PROGRESS_POP
+ delta = None
+ else:
+ whence = PROGRESS_SET
+ delta = int(line)
+ self.client.progress(delta, whence)
+
+ def _handleTags(self, offset, line):
+ """Process a tags command."""
+ tags = line[offset:].decode('utf8').split()
+ new_tags, gone_tags = tags_to_new_gone(tags)
+ self.client.tags(new_tags, gone_tags)
+
+ def _handleTime(self, offset, line):
+ # Accept it, but do not do anything with it yet.
+ try:
+ event_time = iso8601.parse_date(line[offset:-1])
+ except TypeError:
+ raise TypeError(_u("Failed to parse %r, got %r")
+ % (line, sys.exec_info[1]))
+ self.client.time(event_time)
+
+ def lineReceived(self, line):
+ """Call the appropriate local method for the received line."""
+ self._state.lineReceived(line)
+
+ def _lostConnectionInTest(self, state_string):
+ error_string = _u("lost connection during %stest '%s'") % (
+ state_string, self.current_test_description)
+ self.client.addError(self._current_test, RemoteError(error_string))
+ self.client.stopTest(self._current_test)
+
+ def lostConnection(self):
+ """The input connection has finished."""
+ self._state.lostConnection()
+
+ def readFrom(self, pipe):
+ """Blocking convenience API to parse an entire stream.
+
+ :param pipe: A file-like object supporting readlines().
+ :return: None.
+ """
+ for line in pipe.readlines():
+ self.lineReceived(line)
+ self.lostConnection()
+
+ def _startTest(self, offset, line):
+ """Internal call to change state machine. Override startTest()."""
+ self._state.startTest(offset, line)
+
+ def subunitLineReceived(self, line):
+ self._forward_stream.write(line)
+
+ def stdOutLineReceived(self, line):
+ self._stream.write(line)
+
+
+class TestProtocolClient(testresult.TestResult):
+ """A TestResult which generates a subunit stream for a test run.
+
+ # Get a TestSuite or TestCase to run
+ suite = make_suite()
+ # Create a stream (any object with a 'write' method). This should accept
+ # bytes not strings: subunit is a byte orientated protocol.
+ stream = file('tests.log', 'wb')
+ # Create a subunit result object which will output to the stream
+ result = subunit.TestProtocolClient(stream)
+ # Optionally, to get timing data for performance analysis, wrap the
+ # serialiser with a timing decorator
+ result = subunit.test_results.AutoTimingTestResultDecorator(result)
+ # Run the test suite reporting to the subunit result object
+ suite.run(result)
+ # Close the stream.
+ stream.close()
+ """
+
+ def __init__(self, stream):
+ testresult.TestResult.__init__(self)
+ stream = make_stream_binary(stream)
+ self._stream = stream
+ self._progress_fmt = _b("progress: ")
+ self._bytes_eol = _b("\n")
+ self._progress_plus = _b("+")
+ self._progress_push = _b("push")
+ self._progress_pop = _b("pop")
+ self._empty_bytes = _b("")
+ self._start_simple = _b(" [\n")
+ self._end_simple = _b("]\n")
+
+ def addError(self, test, error=None, details=None):
+ """Report an error in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addError(self, test, error)
+ addError(self, test, details)
+
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._addOutcome("error", test, error=error, details=details)
+ if self.failfast:
+ self.stop()
+
+ def addExpectedFailure(self, test, error=None, details=None):
+ """Report an expected failure in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addError(self, test, error)
+ addError(self, test, details)
+
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._addOutcome("xfail", test, error=error, details=details)
+
+ def addFailure(self, test, error=None, details=None):
+ """Report a failure in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addFailure(self, test, error)
+ addFailure(self, test, details)
+
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._addOutcome("failure", test, error=error, details=details)
+ if self.failfast:
+ self.stop()
+
+ def _addOutcome(self, outcome, test, error=None, details=None,
+ error_permitted=True):
+ """Report a failure in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addOutcome(self, test, error)
+ addOutcome(self, test, details)
+
+ :param outcome: A string describing the outcome - used as the
+ event name in the subunit stream.
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ :param error_permitted: If True then one and only one of error or
+ details must be supplied. If False then error must not be supplied
+ and details is still optional. """
+ self._stream.write(_b("%s: " % outcome) + self._test_id(test))
+ if error_permitted:
+ if error is None and details is None:
+ raise ValueError
+ else:
+ if error is not None:
+ raise ValueError
+ if error is not None:
+ self._stream.write(self._start_simple)
+ tb_content = TracebackContent(error, test)
+ for bytes in tb_content.iter_bytes():
+ self._stream.write(bytes)
+ elif details is not None:
+ self._write_details(details)
+ else:
+ self._stream.write(_b("\n"))
+ if details is not None or error is not None:
+ self._stream.write(self._end_simple)
+
+ def addSkip(self, test, reason=None, details=None):
+ """Report a skipped test."""
+ if reason is None:
+ self._addOutcome("skip", test, error=None, details=details)
+ else:
+ self._stream.write(_b("skip: %s [\n" % test.id()))
+ self._stream.write(_b("%s\n" % reason))
+ self._stream.write(self._end_simple)
+
+ def addSuccess(self, test, details=None):
+ """Report a success in a test."""
+ self._addOutcome("successful", test, details=details, error_permitted=False)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ """Report an unexpected success in test test.
+
+ Details can optionally be provided: conceptually there
+ are two separate methods:
+ addError(self, test)
+ addError(self, test, details)
+
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._addOutcome("uxsuccess", test, details=details,
+ error_permitted=False)
+ if self.failfast:
+ self.stop()
+
+ def _test_id(self, test):
+ result = test.id()
+ if type(result) is not bytes:
+ result = result.encode('utf8')
+ return result
+
+ def startTest(self, test):
+ """Mark a test as starting its test run."""
+ super(TestProtocolClient, self).startTest(test)
+ self._stream.write(_b("test: ") + self._test_id(test) + _b("\n"))
+ self._stream.flush()
+
+ def stopTest(self, test):
+ super(TestProtocolClient, self).stopTest(test)
+ self._stream.flush()
+
+ def progress(self, offset, whence):
+ """Provide indication about the progress/length of the test run.
+
+ :param offset: Information about the number of tests remaining. If
+ whence is PROGRESS_CUR, then offset increases/decreases the
+ remaining test count. If whence is PROGRESS_SET, then offset
+ specifies exactly the remaining test count.
+ :param whence: One of PROGRESS_CUR, PROGRESS_SET, PROGRESS_PUSH,
+ PROGRESS_POP.
+ """
+ if whence == PROGRESS_CUR and offset > -1:
+ prefix = self._progress_plus
+ offset = _b(str(offset))
+ elif whence == PROGRESS_PUSH:
+ prefix = self._empty_bytes
+ offset = self._progress_push
+ elif whence == PROGRESS_POP:
+ prefix = self._empty_bytes
+ offset = self._progress_pop
+ else:
+ prefix = self._empty_bytes
+ offset = _b(str(offset))
+ self._stream.write(self._progress_fmt + prefix + offset +
+ self._bytes_eol)
+
+ def tags(self, new_tags, gone_tags):
+ """Inform the client about tags added/removed from the stream."""
+ if not new_tags and not gone_tags:
+ return
+ tags = set([tag.encode('utf8') for tag in new_tags])
+ tags.update([_b("-") + tag.encode('utf8') for tag in gone_tags])
+ tag_line = _b("tags: ") + _b(" ").join(tags) + _b("\n")
+ self._stream.write(tag_line)
+
+ def time(self, a_datetime):
+ """Inform the client of the time.
+
+ ":param datetime: A datetime.datetime object.
+ """
+ time = a_datetime.astimezone(iso8601.Utc())
+ self._stream.write(_b("time: %04d-%02d-%02d %02d:%02d:%02d.%06dZ\n" % (
+ time.year, time.month, time.day, time.hour, time.minute,
+ time.second, time.microsecond)))
+
+ def _write_details(self, details):
+ """Output details to the stream.
+
+ :param details: An extended details dict for a test outcome.
+ """
+ self._stream.write(_b(" [ multipart\n"))
+ for name, content in sorted(details.items()):
+ self._stream.write(_b("Content-Type: %s/%s" %
+ (content.content_type.type, content.content_type.subtype)))
+ parameters = content.content_type.parameters
+ if parameters:
+ self._stream.write(_b(";"))
+ param_strs = []
+ for param, value in parameters.items():
+ param_strs.append("%s=%s" % (param, value))
+ self._stream.write(_b(",".join(param_strs)))
+ self._stream.write(_b("\n%s\n" % name))
+ encoder = chunked.Encoder(self._stream)
+ list(map(encoder.write, content.iter_bytes()))
+ encoder.close()
+
+ def done(self):
+ """Obey the testtools result.done() interface."""
+
+
+def RemoteError(description=_u("")):
+ return (_StringException, _StringException(description), None)
+
+
+class RemotedTestCase(unittest.TestCase):
+ """A class to represent test cases run in child processes.
+
+ Instances of this class are used to provide the Python test API a TestCase
+ that can be printed to the screen, introspected for metadata and so on.
+ However, as they are a simply a memoisation of a test that was actually
+ run in the past by a separate process, they cannot perform any interactive
+ actions.
+ """
+
+ def __eq__ (self, other):
+ try:
+ return self.__description == other.__description
+ except AttributeError:
+ return False
+
+ def __init__(self, description):
+ """Create a psuedo test case with description description."""
+ self.__description = description
+
+ def error(self, label):
+ raise NotImplementedError("%s on RemotedTestCases is not permitted." %
+ label)
+
+ def setUp(self):
+ self.error("setUp")
+
+ def tearDown(self):
+ self.error("tearDown")
+
+ def shortDescription(self):
+ return self.__description
+
+ def id(self):
+ return "%s" % (self.__description,)
+
+ def __str__(self):
+ return "%s (%s)" % (self.__description, self._strclass())
+
+ def __repr__(self):
+ return "<%s description='%s'>" % \
+ (self._strclass(), self.__description)
+
+ def run(self, result=None):
+ if result is None: result = self.defaultTestResult()
+ result.startTest(self)
+ result.addError(self, RemoteError(_u("Cannot run RemotedTestCases.\n")))
+ result.stopTest(self)
+
+ def _strclass(self):
+ cls = self.__class__
+ return "%s.%s" % (cls.__module__, cls.__name__)
+
+
+class ExecTestCase(unittest.TestCase):
+ """A test case which runs external scripts for test fixtures."""
+
+ def __init__(self, methodName='runTest'):
+ """Create an instance of the class that will use the named test
+ method when executed. Raises a ValueError if the instance does
+ not have a method with the specified name.
+ """
+ unittest.TestCase.__init__(self, methodName)
+ testMethod = getattr(self, methodName)
+ self.script = join_dir(sys.modules[self.__class__.__module__].__file__,
+ testMethod.__doc__)
+
+ def countTestCases(self):
+ return 1
+
+ def run(self, result=None):
+ if result is None: result = self.defaultTestResult()
+ self._run(result)
+
+ def debug(self):
+ """Run the test without collecting errors in a TestResult"""
+ self._run(testresult.TestResult())
+
+ def _run(self, result):
+ protocol = TestProtocolServer(result)
+ process = subprocess.Popen(self.script, shell=True,
+ stdout=subprocess.PIPE)
+ make_stream_binary(process.stdout)
+ output = process.communicate()[0]
+ protocol.readFrom(BytesIO(output))
+
+
+class IsolatedTestCase(unittest.TestCase):
+ """A TestCase which executes in a forked process.
+
+ Each test gets its own process, which has a performance overhead but will
+ provide excellent isolation from global state (such as django configs,
+ zope utilities and so on).
+ """
+
+ def run(self, result=None):
+ if result is None: result = self.defaultTestResult()
+ run_isolated(unittest.TestCase, self, result)
+
+
+class IsolatedTestSuite(unittest.TestSuite):
+ """A TestSuite which runs its tests in a forked process.
+
+ This decorator that will fork() before running the tests and report the
+ results from the child process using a Subunit stream. This is useful for
+ handling tests that mutate global state, or are testing C extensions that
+ could crash the VM.
+ """
+
+ def run(self, result=None):
+ if result is None: result = testresult.TestResult()
+ run_isolated(unittest.TestSuite, self, result)
+
+
+def run_isolated(klass, self, result):
+ """Run a test suite or case in a subprocess, using the run method on klass.
+ """
+ c2pread, c2pwrite = os.pipe()
+ # fixme - error -> result
+ # now fork
+ pid = os.fork()
+ if pid == 0:
+ # Child
+ # Close parent's pipe ends
+ os.close(c2pread)
+ # Dup fds for child
+ os.dup2(c2pwrite, 1)
+ # Close pipe fds.
+ os.close(c2pwrite)
+
+ # at this point, sys.stdin is redirected, now we want
+ # to filter it to escape ]'s.
+ ### XXX: test and write that bit.
+ stream = os.fdopen(1, 'wb')
+ result = TestProtocolClient(stream)
+ klass.run(self, result)
+ stream.flush()
+ sys.stderr.flush()
+ # exit HARD, exit NOW.
+ os._exit(0)
+ else:
+ # Parent
+ # Close child pipe ends
+ os.close(c2pwrite)
+ # hookup a protocol engine
+ protocol = TestProtocolServer(result)
+ fileobj = os.fdopen(c2pread, 'rb')
+ protocol.readFrom(fileobj)
+ os.waitpid(pid, 0)
+ # TODO return code evaluation.
+ return result
+
+
+def TAP2SubUnit(tap, output_stream):
+ """Filter a TAP pipe into a subunit pipe.
+
+ This should be invoked once per TAP script, as TAP scripts get
+ mapped to a single runnable case with multiple components.
+
+ :param tap: A tap pipe/stream/file object - should emit unicode strings.
+ :param subunit: A pipe/stream/file object to write subunit results to.
+ :return: The exit code to exit with.
+ """
+ output = StreamResultToBytes(output_stream)
+ UTF8_TEXT = 'text/plain; charset=UTF8'
+ BEFORE_PLAN = 0
+ AFTER_PLAN = 1
+ SKIP_STREAM = 2
+ state = BEFORE_PLAN
+ plan_start = 1
+ plan_stop = 0
+ # Test data for the next test to emit
+ test_name = None
+ log = []
+ result = None
+ def missing_test(plan_start):
+ output.status(test_id='test %d' % plan_start,
+ test_status='fail', runnable=False,
+ mime_type=UTF8_TEXT, eof=True, file_name="tap meta",
+ file_bytes=b"test missing from TAP output")
+ def _emit_test():
+ "write out a test"
+ if test_name is None:
+ return
+ if log:
+ log_bytes = b'\n'.join(log_line.encode('utf8') for log_line in log)
+ mime_type = UTF8_TEXT
+ file_name = 'tap comment'
+ eof = True
+ else:
+ log_bytes = None
+ mime_type = None
+ file_name = None
+ eof = True
+ del log[:]
+ output.status(test_id=test_name, test_status=result,
+ file_bytes=log_bytes, mime_type=mime_type, eof=eof,
+ file_name=file_name, runnable=False)
+ for line in tap:
+ if state == BEFORE_PLAN:
+ match = re.match("(\d+)\.\.(\d+)\s*(?:\#\s+(.*))?\n", line)
+ if match:
+ state = AFTER_PLAN
+ _, plan_stop, comment = match.groups()
+ plan_stop = int(plan_stop)
+ if plan_start > plan_stop and plan_stop == 0:
+ # skipped file
+ state = SKIP_STREAM
+ output.status(test_id='file skip', test_status='skip',
+ file_bytes=comment.encode('utf8'), eof=True,
+ file_name='tap comment')
+ continue
+ # not a plan line, or have seen one before
+ match = re.match("(ok|not ok)(?:\s+(\d+)?)?(?:\s+([^#]*[^#\s]+)\s*)?(?:\s+#\s+(TODO|SKIP|skip|todo)(?:\s+(.*))?)?\n", line)
+ if match:
+ # new test, emit current one.
+ _emit_test()
+ status, number, description, directive, directive_comment = match.groups()
+ if status == 'ok':
+ result = 'success'
+ else:
+ result = "fail"
+ if description is None:
+ description = ''
+ else:
+ description = ' ' + description
+ if directive is not None:
+ if directive.upper() == 'TODO':
+ result = 'xfail'
+ elif directive.upper() == 'SKIP':
+ result = 'skip'
+ if directive_comment is not None:
+ log.append(directive_comment)
+ if number is not None:
+ number = int(number)
+ while plan_start < number:
+ missing_test(plan_start)
+ plan_start += 1
+ test_name = "test %d%s" % (plan_start, description)
+ plan_start += 1
+ continue
+ match = re.match("Bail out\!(?:\s*(.*))?\n", line)
+ if match:
+ reason, = match.groups()
+ if reason is None:
+ extra = ''
+ else:
+ extra = ' %s' % reason
+ _emit_test()
+ test_name = "Bail out!%s" % extra
+ result = "fail"
+ state = SKIP_STREAM
+ continue
+ match = re.match("\#.*\n", line)
+ if match:
+ log.append(line[:-1])
+ continue
+ # Should look at buffering status and binding this to the prior result.
+ output.status(file_bytes=line.encode('utf8'), file_name='stdout',
+ mime_type=UTF8_TEXT)
+ _emit_test()
+ while plan_start <= plan_stop:
+ # record missed tests
+ missing_test(plan_start)
+ plan_start += 1
+ return 0
+
+
+def tag_stream(original, filtered, tags):
+ """Alter tags on a stream.
+
+ :param original: The input stream.
+ :param filtered: The output stream.
+ :param tags: The tags to apply. As in a normal stream - a list of 'TAG' or
+ '-TAG' commands.
+
+ A 'TAG' command will add the tag to the output stream,
+ and override any existing '-TAG' command in that stream.
+ Specifically:
+ * A global 'tags: TAG' will be added to the start of the stream.
+ * Any tags commands with -TAG will have the -TAG removed.
+
+ A '-TAG' command will remove the TAG command from the stream.
+ Specifically:
+ * A 'tags: -TAG' command will be added to the start of the stream.
+ * Any 'tags: TAG' command will have 'TAG' removed from it.
+ Additionally, any redundant tagging commands (adding a tag globally
+ present, or removing a tag globally removed) are stripped as a
+ by-product of the filtering.
+ :return: 0
+ """
+ new_tags, gone_tags = tags_to_new_gone(tags)
+ source = ByteStreamToStreamResult(original, non_subunit_name='stdout')
+ class Tagger(CopyStreamResult):
+ def status(self, **kwargs):
+ tags = kwargs.get('test_tags')
+ if not tags:
+ tags = set()
+ tags.update(new_tags)
+ tags.difference_update(gone_tags)
+ if tags:
+ kwargs['test_tags'] = tags
+ else:
+ kwargs['test_tags'] = None
+ super(Tagger, self).status(**kwargs)
+ output = Tagger([StreamResultToBytes(filtered)])
+ source.run(output)
+ return 0
+
+
+class ProtocolTestCase(object):
+ """Subunit wire protocol to unittest.TestCase adapter.
+
+ ProtocolTestCase honours the core of ``unittest.TestCase`` protocol -
+ calling a ProtocolTestCase or invoking the run() method will make a 'test
+ run' happen. The 'test run' will simply be a replay of the test activity
+ that has been encoded into the stream. The ``unittest.TestCase`` ``debug``
+ and ``countTestCases`` methods are not supported because there isn't a
+ sensible mapping for those methods.
+
+ # Get a stream (any object with a readline() method), in this case the
+ # stream output by the example from ``subunit.TestProtocolClient``.
+ stream = file('tests.log', 'rb')
+ # Create a parser which will read from the stream and emit
+ # activity to a unittest.TestResult when run() is called.
+ suite = subunit.ProtocolTestCase(stream)
+ # Create a result object to accept the contents of that stream.
+ result = unittest._TextTestResult(sys.stdout)
+ # 'run' the tests - process the stream and feed its contents to result.
+ suite.run(result)
+ stream.close()
+
+ :seealso: TestProtocolServer (the subunit wire protocol parser).
+ """
+
+ def __init__(self, stream, passthrough=None, forward=None):
+ """Create a ProtocolTestCase reading from stream.
+
+ :param stream: A filelike object which a subunit stream can be read
+ from.
+ :param passthrough: A stream pass non subunit input on to. If not
+ supplied, the TestProtocolServer default is used.
+ :param forward: A stream to pass subunit input on to. If not supplied
+ subunit input is not forwarded.
+ """
+ stream = make_stream_binary(stream)
+ self._stream = stream
+ self._passthrough = passthrough
+ if forward is not None:
+ forward = make_stream_binary(forward)
+ self._forward = forward
+
+ def __call__(self, result=None):
+ return self.run(result)
+
+ def run(self, result=None):
+ if result is None:
+ result = self.defaultTestResult()
+ protocol = TestProtocolServer(result, self._passthrough, self._forward)
+ line = self._stream.readline()
+ while line:
+ protocol.lineReceived(line)
+ line = self._stream.readline()
+ protocol.lostConnection()
+
+
+class TestResultStats(testresult.TestResult):
+ """A pyunit TestResult interface implementation for making statistics.
+
+ :ivar total_tests: The total tests seen.
+ :ivar passed_tests: The tests that passed.
+ :ivar failed_tests: The tests that failed.
+ :ivar seen_tags: The tags seen across all tests.
+ """
+
+ def __init__(self, stream):
+ """Create a TestResultStats which outputs to stream."""
+ testresult.TestResult.__init__(self)
+ self._stream = stream
+ self.failed_tests = 0
+ self.skipped_tests = 0
+ self.seen_tags = set()
+
+ @property
+ def total_tests(self):
+ return self.testsRun
+
+ def addError(self, test, err, details=None):
+ self.failed_tests += 1
+
+ def addFailure(self, test, err, details=None):
+ self.failed_tests += 1
+
+ def addSkip(self, test, reason, details=None):
+ self.skipped_tests += 1
+
+ def formatStats(self):
+ self._stream.write("Total tests: %5d\n" % self.total_tests)
+ self._stream.write("Passed tests: %5d\n" % self.passed_tests)
+ self._stream.write("Failed tests: %5d\n" % self.failed_tests)
+ self._stream.write("Skipped tests: %5d\n" % self.skipped_tests)
+ tags = sorted(self.seen_tags)
+ self._stream.write("Seen tags: %s\n" % (", ".join(tags)))
+
+ @property
+ def passed_tests(self):
+ return self.total_tests - self.failed_tests - self.skipped_tests
+
+ def tags(self, new_tags, gone_tags):
+ """Accumulate the seen tags."""
+ self.seen_tags.update(new_tags)
+
+ def wasSuccessful(self):
+ """Tells whether or not this result was a success"""
+ return self.failed_tests == 0
+
+
+def get_default_formatter():
+ """Obtain the default formatter to write to.
+
+ :return: A file-like object.
+ """
+ formatter = os.getenv("SUBUNIT_FORMATTER")
+ if formatter:
+ return os.popen(formatter, "w")
+ else:
+ stream = sys.stdout
+ if sys.version_info > (3, 0):
+ if safe_hasattr(stream, 'buffer'):
+ stream = stream.buffer
+ return stream
+
+
+def read_test_list(path):
+ """Read a list of test ids from a file on disk.
+
+ :param path: Path to the file
+ :return: Sequence of test ids
+ """
+ f = open(path, 'rb')
+ try:
+ return [l.rstrip("\n") for l in f.readlines()]
+ finally:
+ f.close()
+
+
+def make_stream_binary(stream):
+ """Ensure that a stream will be binary safe. See _make_binary_on_windows.
+
+ :return: A binary version of the same stream (some streams cannot be
+ 'fixed' but can be unwrapped).
+ """
+ try:
+ fileno = stream.fileno()
+ except (_UnsupportedOperation, AttributeError):
+ pass
+ else:
+ _make_binary_on_windows(fileno)
+ return _unwrap_text(stream)
+
+
+def _make_binary_on_windows(fileno):
+ """Win32 mangles \r\n to \n and that breaks streams. See bug lp:505078."""
+ if sys.platform == "win32":
+ import msvcrt
+ msvcrt.setmode(fileno, os.O_BINARY)
+
+
+def _unwrap_text(stream):
+ """Unwrap stream if it is a text stream to get the original buffer."""
+ if sys.version_info > (3, 0):
+ unicode_type = str
+ else:
+ unicode_type = unicode
+ try:
+ # Read streams
+ if type(stream.read(0)) is unicode_type:
+ return stream.buffer
+ except (_UnsupportedOperation, IOError):
+ # Cannot read from the stream: try via writes
+ try:
+ stream.write(_b(''))
+ except TypeError:
+ return stream.buffer
+ return stream
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/chunked.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/chunked.py
new file mode 100644
index 00000000000..b9921291ea2
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/chunked.py
@@ -0,0 +1,185 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+# Copyright (C) 2011 Martin Pool <mbp@sourcefrog.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Encoder/decoder for http style chunked encoding."""
+
+from testtools.compat import _b
+
+empty = _b('')
+
+class Decoder(object):
+ """Decode chunked content to a byte stream."""
+
+ def __init__(self, output, strict=True):
+ """Create a decoder decoding to output.
+
+ :param output: A file-like object. Bytes written to the Decoder are
+ decoded to strip off the chunking and written to the output.
+ Up to a full write worth of data or a single control line may be
+ buffered (whichever is larger). The close method should be called
+ when no more data is available, to detect short streams; the
+ write method will return none-None when the end of a stream is
+ detected. The output object must accept bytes objects.
+
+ :param strict: If True (the default), the decoder will not knowingly
+ accept input that is not conformant to the HTTP specification.
+ (This does not imply that it will catch every nonconformance.)
+ If False, it will accept incorrect input that is still
+ unambiguous.
+ """
+ self.output = output
+ self.buffered_bytes = []
+ self.state = self._read_length
+ self.body_length = 0
+ self.strict = strict
+ self._match_chars = _b("0123456789abcdefABCDEF\r\n")
+ self._slash_n = _b('\n')
+ self._slash_r = _b('\r')
+ self._slash_rn = _b('\r\n')
+ self._slash_nr = _b('\n\r')
+
+ def close(self):
+ """Close the decoder.
+
+ :raises ValueError: If the stream is incomplete ValueError is raised.
+ """
+ if self.state != self._finished:
+ raise ValueError("incomplete stream")
+
+ def _finished(self):
+ """Finished reading, return any remaining bytes."""
+ if self.buffered_bytes:
+ buffered_bytes = self.buffered_bytes
+ self.buffered_bytes = []
+ return empty.join(buffered_bytes)
+ else:
+ raise ValueError("stream is finished")
+
+ def _read_body(self):
+ """Pass body bytes to the output."""
+ while self.body_length and self.buffered_bytes:
+ if self.body_length >= len(self.buffered_bytes[0]):
+ self.output.write(self.buffered_bytes[0])
+ self.body_length -= len(self.buffered_bytes[0])
+ del self.buffered_bytes[0]
+ # No more data available.
+ if not self.body_length:
+ self.state = self._read_length
+ else:
+ self.output.write(self.buffered_bytes[0][:self.body_length])
+ self.buffered_bytes[0] = \
+ self.buffered_bytes[0][self.body_length:]
+ self.body_length = 0
+ self.state = self._read_length
+ return self.state()
+
+ def _read_length(self):
+ """Try to decode a length from the bytes."""
+ count_chars = []
+ for bytes in self.buffered_bytes:
+ for pos in range(len(bytes)):
+ byte = bytes[pos:pos+1]
+ if byte not in self._match_chars:
+ break
+ count_chars.append(byte)
+ if byte == self._slash_n:
+ break
+ if not count_chars:
+ return
+ if count_chars[-1] != self._slash_n:
+ return
+ count_str = empty.join(count_chars)
+ if self.strict:
+ if count_str[-2:] != self._slash_rn:
+ raise ValueError("chunk header invalid: %r" % count_str)
+ if self._slash_r in count_str[:-2]:
+ raise ValueError("too many CRs in chunk header %r" % count_str)
+ self.body_length = int(count_str.rstrip(self._slash_nr), 16)
+ excess_bytes = len(count_str)
+ while excess_bytes:
+ if excess_bytes >= len(self.buffered_bytes[0]):
+ excess_bytes -= len(self.buffered_bytes[0])
+ del self.buffered_bytes[0]
+ else:
+ self.buffered_bytes[0] = self.buffered_bytes[0][excess_bytes:]
+ excess_bytes = 0
+ if not self.body_length:
+ self.state = self._finished
+ if not self.buffered_bytes:
+ # May not call into self._finished with no buffered data.
+ return empty
+ else:
+ self.state = self._read_body
+ return self.state()
+
+ def write(self, bytes):
+ """Decode bytes to the output stream.
+
+ :raises ValueError: If the stream has already seen the end of file
+ marker.
+ :returns: None, or the excess bytes beyond the end of file marker.
+ """
+ if bytes:
+ self.buffered_bytes.append(bytes)
+ return self.state()
+
+
+class Encoder(object):
+ """Encode content to a stream using HTTP Chunked coding."""
+
+ def __init__(self, output):
+ """Create an encoder encoding to output.
+
+ :param output: A file-like object. Bytes written to the Encoder
+ will be encoded using HTTP chunking. Small writes may be buffered
+ and the ``close`` method must be called to finish the stream.
+ """
+ self.output = output
+ self.buffered_bytes = []
+ self.buffer_size = 0
+
+ def flush(self, extra_len=0):
+ """Flush the encoder to the output stream.
+
+ :param extra_len: Increase the size of the chunk by this many bytes
+ to allow for a subsequent write.
+ """
+ if not self.buffer_size and not extra_len:
+ return
+ buffered_bytes = self.buffered_bytes
+ buffer_size = self.buffer_size
+ self.buffered_bytes = []
+ self.buffer_size = 0
+ self.output.write(_b("%X\r\n" % (buffer_size + extra_len)))
+ if buffer_size:
+ self.output.write(empty.join(buffered_bytes))
+ return True
+
+ def write(self, bytes):
+ """Encode bytes to the output stream."""
+ bytes_len = len(bytes)
+ if self.buffer_size + bytes_len >= 65536:
+ self.flush(bytes_len)
+ self.output.write(bytes)
+ else:
+ self.buffered_bytes.append(bytes)
+ self.buffer_size += bytes_len
+
+ def close(self):
+ """Finish the stream. This does not close the output stream."""
+ self.flush()
+ self.output.write(_b("0\r\n"))
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/details.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/details.py
new file mode 100644
index 00000000000..9e5e005864c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/details.py
@@ -0,0 +1,119 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Handlers for outcome details."""
+
+from testtools import content, content_type
+from testtools.compat import _b, BytesIO
+
+from subunit import chunked
+
+end_marker = _b("]\n")
+quoted_marker = _b(" ]")
+empty = _b('')
+
+
+class DetailsParser(object):
+ """Base class/API reference for details parsing."""
+
+
+class SimpleDetailsParser(DetailsParser):
+ """Parser for single-part [] delimited details."""
+
+ def __init__(self, state):
+ self._message = _b("")
+ self._state = state
+
+ def lineReceived(self, line):
+ if line == end_marker:
+ self._state.endDetails()
+ return
+ if line[0:2] == quoted_marker:
+ # quoted ] start
+ self._message += line[1:]
+ else:
+ self._message += line
+
+ def get_details(self, style=None):
+ result = {}
+ if not style:
+ # We know that subunit/testtools serialise [] formatted
+ # tracebacks as utf8, but perhaps we need a ReplacingContent
+ # or something like that.
+ result['traceback'] = content.Content(
+ content_type.ContentType("text", "x-traceback",
+ {"charset": "utf8"}),
+ lambda:[self._message])
+ else:
+ if style == 'skip':
+ name = 'reason'
+ else:
+ name = 'message'
+ result[name] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:[self._message])
+ return result
+
+ def get_message(self):
+ return self._message
+
+
+class MultipartDetailsParser(DetailsParser):
+ """Parser for multi-part [] surrounded MIME typed chunked details."""
+
+ def __init__(self, state):
+ self._state = state
+ self._details = {}
+ self._parse_state = self._look_for_content
+
+ def _look_for_content(self, line):
+ if line == end_marker:
+ self._state.endDetails()
+ return
+ # TODO error handling
+ field, value = line[:-1].decode('utf8').split(' ', 1)
+ try:
+ main, sub = value.split('/')
+ except ValueError:
+ raise ValueError("Invalid MIME type %r" % value)
+ self._content_type = content_type.ContentType(main, sub)
+ self._parse_state = self._get_name
+
+ def _get_name(self, line):
+ self._name = line[:-1].decode('utf8')
+ self._body = BytesIO()
+ self._chunk_parser = chunked.Decoder(self._body)
+ self._parse_state = self._feed_chunks
+
+ def _feed_chunks(self, line):
+ residue = self._chunk_parser.write(line)
+ if residue is not None:
+ # Line based use always ends on no residue.
+ assert residue == empty, 'residue: %r' % (residue,)
+ body = self._body
+ self._details[self._name] = content.Content(
+ self._content_type, lambda:[body.getvalue()])
+ self._chunk_parser.close()
+ self._parse_state = self._look_for_content
+
+ def get_details(self, for_skip=False):
+ return self._details
+
+ def get_message(self):
+ return None
+
+ def lineReceived(self, line):
+ self._parse_state(line)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/filters.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/filters.py
new file mode 100644
index 00000000000..0a0a185c3e3
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/filters.py
@@ -0,0 +1,206 @@
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+
+from optparse import OptionParser
+import sys
+
+from extras import safe_hasattr
+from testtools import CopyStreamResult, StreamResult, StreamResultRouter
+
+from subunit import (
+ DiscardStream, ProtocolTestCase, ByteStreamToStreamResult,
+ StreamResultToBytes,
+ )
+from subunit.test_results import CatFiles
+
+
+def make_options(description):
+ parser = OptionParser(description=description)
+ parser.add_option(
+ "--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False,
+ dest="no_passthrough")
+ parser.add_option(
+ "-o", "--output-to",
+ help="Send the output to this path rather than stdout.")
+ parser.add_option(
+ "-f", "--forward", action="store_true", default=False,
+ help="Forward subunit stream on stdout. When set, received "
+ "non-subunit output will be encapsulated in subunit.")
+ return parser
+
+
+def run_tests_from_stream(input_stream, result, passthrough_stream=None,
+ forward_stream=None, protocol_version=1, passthrough_subunit=True):
+ """Run tests from a subunit input stream through 'result'.
+
+ Non-test events - top level file attachments - are expected to be
+ dropped by v2 StreamResults at the present time (as all the analysis code
+ is in ExtendedTestResult API's), so to implement passthrough_stream they
+ are diverted and copied directly when that is set.
+
+ :param input_stream: A stream containing subunit input.
+ :param result: A TestResult that will receive the test events.
+ NB: This should be an ExtendedTestResult for v1 and a StreamResult for
+ v2.
+ :param passthrough_stream: All non-subunit input received will be
+ sent to this stream. If not provided, uses the ``TestProtocolServer``
+ default, which is ``sys.stdout``.
+ :param forward_stream: All subunit input received will be forwarded
+ to this stream. If not provided, uses the ``TestProtocolServer``
+ default, which is to not forward any input. Do not set this when
+ transforming the stream - items would be double-reported.
+ :param protocol_version: What version of the subunit protocol to expect.
+ :param passthrough_subunit: If True, passthrough should be as subunit
+ otherwise unwrap it. Only has effect when forward_stream is None.
+ (when forwarding as subunit non-subunit input is always turned into
+ subunit)
+ """
+ if 1==protocol_version:
+ test = ProtocolTestCase(
+ input_stream, passthrough=passthrough_stream,
+ forward=forward_stream)
+ elif 2==protocol_version:
+ # In all cases we encapsulate unknown inputs.
+ if forward_stream is not None:
+ # Send events to forward_stream as subunit.
+ forward_result = StreamResultToBytes(forward_stream)
+ # If we're passing non-subunit through, copy:
+ if passthrough_stream is None:
+ # Not passing non-test events - split them off to nothing.
+ router = StreamResultRouter(forward_result)
+ router.add_rule(StreamResult(), 'test_id', test_id=None)
+ result = CopyStreamResult([router, result])
+ else:
+ # otherwise, copy all events to forward_result
+ result = CopyStreamResult([forward_result, result])
+ elif passthrough_stream is not None:
+ if not passthrough_subunit:
+ # Route non-test events to passthrough_stream, unwrapping them for
+ # display.
+ passthrough_result = CatFiles(passthrough_stream)
+ else:
+ passthrough_result = StreamResultToBytes(passthrough_stream)
+ result = StreamResultRouter(result)
+ result.add_rule(passthrough_result, 'test_id', test_id=None)
+ test = ByteStreamToStreamResult(input_stream,
+ non_subunit_name='stdout')
+ else:
+ raise Exception("Unknown protocol version.")
+ result.startTestRun()
+ test.run(result)
+ result.stopTestRun()
+
+
+def filter_by_result(result_factory, output_path, passthrough, forward,
+ input_stream=sys.stdin, protocol_version=1,
+ passthrough_subunit=True):
+ """Filter an input stream using a test result.
+
+ :param result_factory: A callable that when passed an output stream
+ returns a TestResult. It is expected that this result will output
+ to the given stream.
+ :param output_path: A path send output to. If None, output will be go
+ to ``sys.stdout``.
+ :param passthrough: If True, all non-subunit input will be sent to
+ ``sys.stdout``. If False, that input will be discarded.
+ :param forward: If True, all subunit input will be forwarded directly to
+ ``sys.stdout`` as well as to the ``TestResult``.
+ :param input_stream: The source of subunit input. Defaults to
+ ``sys.stdin``.
+ :param protocol_version: The subunit protocol version to expect.
+ :param passthrough_subunit: If True, passthrough should be as subunit.
+ :return: A test result with the results of the run.
+ """
+ if passthrough:
+ passthrough_stream = sys.stdout
+ else:
+ if 1==protocol_version:
+ passthrough_stream = DiscardStream()
+ else:
+ passthrough_stream = None
+
+ if forward:
+ forward_stream = sys.stdout
+ elif 1==protocol_version:
+ forward_stream = DiscardStream()
+ else:
+ forward_stream = None
+
+ if output_path is None:
+ output_to = sys.stdout
+ else:
+ output_to = file(output_path, 'wb')
+
+ try:
+ result = result_factory(output_to)
+ run_tests_from_stream(
+ input_stream, result, passthrough_stream, forward_stream,
+ protocol_version=protocol_version,
+ passthrough_subunit=passthrough_subunit)
+ finally:
+ if output_path:
+ output_to.close()
+ return result
+
+
+def run_filter_script(result_factory, description, post_run_hook=None,
+ protocol_version=1, passthrough_subunit=True):
+ """Main function for simple subunit filter scripts.
+
+ Many subunit filter scripts take a stream of subunit input and use a
+ TestResult to handle the events generated by that stream. This function
+ wraps a lot of the boiler-plate around that by making a script with
+ options for handling passthrough information and stream forwarding, and
+ that will exit with a successful return code (i.e. 0) if the input stream
+ represents a successful test run.
+
+ :param result_factory: A callable that takes an output stream and returns
+ a test result that outputs to that stream.
+ :param description: A description of the filter script.
+ :param protocol_version: What protocol version to consume/emit.
+ :param passthrough_subunit: If True, passthrough should be as subunit.
+ """
+ parser = make_options(description)
+ (options, args) = parser.parse_args()
+ result = filter_by_result(
+ result_factory, options.output_to, not options.no_passthrough,
+ options.forward, protocol_version=protocol_version,
+ passthrough_subunit=passthrough_subunit,
+ input_stream=find_stream(sys.stdin, args))
+ if post_run_hook:
+ post_run_hook(result)
+ if not safe_hasattr(result, 'wasSuccessful'):
+ result = result.decorated
+ if result.wasSuccessful():
+ sys.exit(0)
+ else:
+ sys.exit(1)
+
+
+def find_stream(stdin, argv):
+ """Find a stream to use as input for filters.
+
+ :param stdin: Standard in - used if no files are named in argv.
+ :param argv: Command line arguments after option parsing. If one file
+ is named, that is opened in read only binary mode and returned.
+ A missing file will raise an exception, as will multiple file names.
+ """
+ assert len(argv) < 2, "Too many filenames."
+ if argv:
+ return open(argv[0], 'rb')
+ else:
+ return stdin
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/iso8601.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/iso8601.py
new file mode 100644
index 00000000000..07855d0975c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/iso8601.py
@@ -0,0 +1,133 @@
+# Copyright (c) 2007 Michael Twomey
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+"""ISO 8601 date time string parsing
+
+Basic usage:
+>>> import iso8601
+>>> iso8601.parse_date("2007-01-25T12:00:00Z")
+datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
+>>>
+
+"""
+
+from datetime import datetime, timedelta, tzinfo
+import re
+import sys
+
+__all__ = ["parse_date", "ParseError"]
+
+# Adapted from http://delete.me.uk/2005/03/iso8601.html
+ISO8601_REGEX_PATTERN = (r"(?P<year>[0-9]{4})(-(?P<month>[0-9]{1,2})(-(?P<day>[0-9]{1,2})"
+ r"((?P<separator>.)(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2})(:(?P<second>[0-9]{2})(\.(?P<fraction>[0-9]+))?)?"
+ r"(?P<timezone>Z|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?"
+)
+TIMEZONE_REGEX_PATTERN = "(?P<prefix>[+-])(?P<hours>[0-9]{2}).(?P<minutes>[0-9]{2})"
+ISO8601_REGEX = re.compile(ISO8601_REGEX_PATTERN.encode('utf8'))
+TIMEZONE_REGEX = re.compile(TIMEZONE_REGEX_PATTERN.encode('utf8'))
+
+zulu = "Z".encode('latin-1')
+minus = "-".encode('latin-1')
+
+if sys.version_info < (3, 0):
+ bytes = str
+
+
+class ParseError(Exception):
+ """Raised when there is a problem parsing a date string"""
+
+# Yoinked from python docs
+ZERO = timedelta(0)
+class Utc(tzinfo):
+ """UTC
+
+ """
+ def utcoffset(self, dt):
+ return ZERO
+
+ def tzname(self, dt):
+ return "UTC"
+
+ def dst(self, dt):
+ return ZERO
+UTC = Utc()
+
+class FixedOffset(tzinfo):
+ """Fixed offset in hours and minutes from UTC
+
+ """
+ def __init__(self, offset_hours, offset_minutes, name):
+ self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
+ self.__name = name
+
+ def utcoffset(self, dt):
+ return self.__offset
+
+ def tzname(self, dt):
+ return self.__name
+
+ def dst(self, dt):
+ return ZERO
+
+ def __repr__(self):
+ return "<FixedOffset %r>" % self.__name
+
+def parse_timezone(tzstring, default_timezone=UTC):
+ """Parses ISO 8601 time zone specs into tzinfo offsets
+
+ """
+ if tzstring == zulu:
+ return default_timezone
+ # This isn't strictly correct, but it's common to encounter dates without
+ # timezones so I'll assume the default (which defaults to UTC).
+ # Addresses issue 4.
+ if tzstring is None:
+ return default_timezone
+ m = TIMEZONE_REGEX.match(tzstring)
+ prefix, hours, minutes = m.groups()
+ hours, minutes = int(hours), int(minutes)
+ if prefix == minus:
+ hours = -hours
+ minutes = -minutes
+ return FixedOffset(hours, minutes, tzstring)
+
+def parse_date(datestring, default_timezone=UTC):
+ """Parses ISO 8601 dates into datetime objects
+
+ The timezone is parsed from the date string. However it is quite common to
+ have dates without a timezone (not strictly correct). In this case the
+ default timezone specified in default_timezone is used. This is UTC by
+ default.
+ """
+ if not isinstance(datestring, bytes):
+ raise ParseError("Expecting bytes %r" % datestring)
+ m = ISO8601_REGEX.match(datestring)
+ if not m:
+ raise ParseError("Unable to parse date string %r" % datestring)
+ groups = m.groupdict()
+ tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
+ if groups["fraction"] is None:
+ groups["fraction"] = 0
+ else:
+ groups["fraction"] = int(float("0.%s" % groups["fraction"].decode()) * 1e6)
+ return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
+ int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
+ int(groups["fraction"]), tz)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/progress_model.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/progress_model.py
new file mode 100644
index 00000000000..3a6af89a33b
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/progress_model.py
@@ -0,0 +1,106 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Support for dealing with progress state."""
+
+class ProgressModel(object):
+ """A model of progress indicators as subunit defines it.
+
+ Instances of this class represent a single logical operation that is
+ progressing. The operation may have many steps, and some of those steps may
+ supply their own progress information. ProgressModel uses a nested concept
+ where the overall state can be pushed, creating new starting state, and
+ later pushed to return to the prior state. Many user interfaces will want
+ to display an overall summary though, and accordingly the pos() and width()
+ methods return overall summary information rather than information on the
+ current subtask.
+
+ The default state is 0/0 - indicating that the overall progress is unknown.
+ Anytime the denominator of pos/width is 0, rendering of a ProgressModel
+ should should take this into consideration.
+
+ :ivar: _tasks. This private attribute stores the subtasks. Each is a tuple:
+ pos, width, overall_numerator, overall_denominator. The overall fields
+ store the calculated overall numerator and denominator for the state
+ that was pushed.
+ """
+
+ def __init__(self):
+ """Create a ProgressModel.
+
+ The new model has no progress data at all - it will claim a summary
+ width of zero and position of 0.
+ """
+ self._tasks = []
+ self.push()
+
+ def adjust_width(self, offset):
+ """Adjust the with of the current subtask."""
+ self._tasks[-1][1] += offset
+
+ def advance(self):
+ """Advance the current subtask."""
+ self._tasks[-1][0] += 1
+
+ def pop(self):
+ """Pop a subtask off the ProgressModel.
+
+ See push for a description of how push and pop work.
+ """
+ self._tasks.pop()
+
+ def pos(self):
+ """Return how far through the operation has progressed."""
+ if not self._tasks:
+ return 0
+ task = self._tasks[-1]
+ if len(self._tasks) > 1:
+ # scale up the overall pos by the current task or preserve it if
+ # no current width is known.
+ offset = task[2] * (task[1] or 1)
+ else:
+ offset = 0
+ return offset + task[0]
+
+ def push(self):
+ """Push a new subtask.
+
+ After pushing a new subtask, the overall progress hasn't changed. Calls
+ to adjust_width, advance, set_width will only after the progress within
+ the range that calling 'advance' would have before - the subtask
+ represents progressing one step in the earlier task.
+
+ Call pop() to restore the progress model to the state before push was
+ called.
+ """
+ self._tasks.append([0, 0, self.pos(), self.width()])
+
+ def set_width(self, width):
+ """Set the width of the current subtask."""
+ self._tasks[-1][1] = width
+
+ def width(self):
+ """Return the total width of the operation."""
+ if not self._tasks:
+ return 0
+ task = self._tasks[-1]
+ if len(self._tasks) > 1:
+ # scale up the overall width by the current task or preserve it if
+ # no current width is known.
+ return task[3] * (task[1] or 1)
+ else:
+ return task[1]
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/run.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/run.py
new file mode 100755
index 00000000000..7e4d783bded
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/run.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+#
+# Simple subunit testrunner for python
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Run a unittest testcase reporting results as Subunit.
+
+ $ python -m subunit.run mylib.tests.test_suite
+"""
+
+import io
+import os
+import sys
+
+from testtools import ExtendedToStreamDecorator
+from testtools.testsuite import iterate_tests
+
+from subunit import StreamResultToBytes, get_default_formatter
+from subunit.test_results import AutoTimingTestResultDecorator
+from testtools.run import (
+ BUFFEROUTPUT,
+ CATCHBREAK,
+ FAILFAST,
+ list_test,
+ TestProgram,
+ USAGE_AS_MAIN,
+ )
+
+
+class SubunitTestRunner(object):
+ def __init__(self, verbosity=None, failfast=None, buffer=None, stream=None):
+ """Create a TestToolsTestRunner.
+
+ :param verbosity: Ignored.
+ :param failfast: Stop running tests at the first failure.
+ :param buffer: Ignored.
+ """
+ self.failfast = failfast
+ self.stream = stream or sys.stdout
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ result, _ = self._list(test)
+ result = ExtendedToStreamDecorator(result)
+ result = AutoTimingTestResultDecorator(result)
+ if self.failfast is not None:
+ result.failfast = self.failfast
+ result.startTestRun()
+ try:
+ test(result)
+ finally:
+ result.stopTestRun()
+ return result
+
+ def list(self, test):
+ "List the test."
+ result, errors = self._list(test)
+ if errors:
+ failed_descr = '\n'.join(errors).encode('utf8')
+ result.status(file_name="import errors", runnable=False,
+ file_bytes=failed_descr, mime_type="text/plain;charset=utf8")
+ sys.exit(2)
+
+ def _list(self, test):
+ test_ids, errors = list_test(test)
+ try:
+ fileno = self.stream.fileno()
+ except:
+ fileno = None
+ if fileno is not None:
+ stream = os.fdopen(fileno, 'wb', 0)
+ else:
+ stream = self.stream
+ result = StreamResultToBytes(stream)
+ for test_id in test_ids:
+ result.status(test_id=test_id, test_status='exists')
+ return result, errors
+
+
+class SubunitTestProgram(TestProgram):
+
+ USAGE = USAGE_AS_MAIN
+
+ def usageExit(self, msg=None):
+ if msg:
+ print (msg)
+ usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
+ 'buffer': ''}
+ if self.failfast != False:
+ usage['failfast'] = FAILFAST
+ if self.catchbreak != False:
+ usage['catchbreak'] = CATCHBREAK
+ if self.buffer != False:
+ usage['buffer'] = BUFFEROUTPUT
+ usage_text = self.USAGE % usage
+ usage_lines = usage_text.split('\n')
+ usage_lines.insert(2, "Run a test suite with a subunit reporter.")
+ usage_lines.insert(3, "")
+ print('\n'.join(usage_lines))
+ sys.exit(2)
+
+
+def main():
+ # Disable the default buffering, for Python 2.x where pdb doesn't do it
+ # on non-ttys.
+ stream = get_default_formatter()
+ runner = SubunitTestRunner
+ # Patch stdout to be unbuffered, so that pdb works well on 2.6/2.7.
+ binstdout = io.open(sys.stdout.fileno(), 'wb', 0)
+ if sys.version_info[0] > 2:
+ sys.stdout = io.TextIOWrapper(binstdout, encoding=sys.stdout.encoding)
+ else:
+ sys.stdout = binstdout
+ SubunitTestProgram(module=None, argv=sys.argv, testRunner=runner,
+ stdout=sys.stdout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/test_results.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/test_results.py
new file mode 100644
index 00000000000..8c89d9b5605
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/test_results.py
@@ -0,0 +1,729 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""TestResult helper classes used to by subunit."""
+
+import csv
+import datetime
+
+import testtools
+from testtools.compat import all
+from testtools.content import (
+ text_content,
+ TracebackContent,
+ )
+from testtools import StreamResult
+
+from subunit import iso8601
+import subunit
+
+
+# NOT a TestResult, because we are implementing the interface, not inheriting
+# it.
+class TestResultDecorator(object):
+ """General pass-through decorator.
+
+ This provides a base that other TestResults can inherit from to
+ gain basic forwarding functionality. It also takes care of
+ handling the case where the target doesn't support newer methods
+ or features by degrading them.
+ """
+
+ # XXX: Since lp:testtools r250, this is in testtools. Once it's released,
+ # we should gut this and just use that.
+
+ def __init__(self, decorated):
+ """Create a TestResultDecorator forwarding to decorated."""
+ # Make every decorator degrade gracefully.
+ self.decorated = testtools.ExtendedToOriginalDecorator(decorated)
+
+ def startTest(self, test):
+ return self.decorated.startTest(test)
+
+ def startTestRun(self):
+ return self.decorated.startTestRun()
+
+ def stopTest(self, test):
+ return self.decorated.stopTest(test)
+
+ def stopTestRun(self):
+ return self.decorated.stopTestRun()
+
+ def addError(self, test, err=None, details=None):
+ return self.decorated.addError(test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ return self.decorated.addFailure(test, err, details=details)
+
+ def addSuccess(self, test, details=None):
+ return self.decorated.addSuccess(test, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ return self.decorated.addSkip(test, reason, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ return self.decorated.addExpectedFailure(test, err, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ return self.decorated.addUnexpectedSuccess(test, details=details)
+
+ def _get_failfast(self):
+ return getattr(self.decorated, 'failfast', False)
+
+ def _set_failfast(self, value):
+ self.decorated.failfast = value
+ failfast = property(_get_failfast, _set_failfast)
+
+ def progress(self, offset, whence):
+ return self.decorated.progress(offset, whence)
+
+ def wasSuccessful(self):
+ return self.decorated.wasSuccessful()
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def stop(self):
+ return self.decorated.stop()
+
+ @property
+ def testsRun(self):
+ return self.decorated.testsRun
+
+ def tags(self, new_tags, gone_tags):
+ return self.decorated.tags(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ return self.decorated.time(a_datetime)
+
+
+class HookedTestResultDecorator(TestResultDecorator):
+ """A TestResult which calls a hook on every event."""
+
+ def __init__(self, decorated):
+ self.super = super(HookedTestResultDecorator, self)
+ self.super.__init__(decorated)
+
+ def startTest(self, test):
+ self._before_event()
+ return self.super.startTest(test)
+
+ def startTestRun(self):
+ self._before_event()
+ return self.super.startTestRun()
+
+ def stopTest(self, test):
+ self._before_event()
+ return self.super.stopTest(test)
+
+ def stopTestRun(self):
+ self._before_event()
+ return self.super.stopTestRun()
+
+ def addError(self, test, err=None, details=None):
+ self._before_event()
+ return self.super.addError(test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ self._before_event()
+ return self.super.addFailure(test, err, details=details)
+
+ def addSuccess(self, test, details=None):
+ self._before_event()
+ return self.super.addSuccess(test, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ self._before_event()
+ return self.super.addSkip(test, reason, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._before_event()
+ return self.super.addExpectedFailure(test, err, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._before_event()
+ return self.super.addUnexpectedSuccess(test, details=details)
+
+ def progress(self, offset, whence):
+ self._before_event()
+ return self.super.progress(offset, whence)
+
+ def wasSuccessful(self):
+ self._before_event()
+ return self.super.wasSuccessful()
+
+ @property
+ def shouldStop(self):
+ self._before_event()
+ return self.super.shouldStop
+
+ def stop(self):
+ self._before_event()
+ return self.super.stop()
+
+ def time(self, a_datetime):
+ self._before_event()
+ return self.super.time(a_datetime)
+
+
+class AutoTimingTestResultDecorator(HookedTestResultDecorator):
+ """Decorate a TestResult to add time events to a test run.
+
+ By default this will cause a time event before every test event,
+ but if explicit time data is being provided by the test run, then
+ this decorator will turn itself off to prevent causing confusion.
+ """
+
+ def __init__(self, decorated):
+ self._time = None
+ super(AutoTimingTestResultDecorator, self).__init__(decorated)
+
+ def _before_event(self):
+ time = self._time
+ if time is not None:
+ return
+ time = datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc())
+ self.decorated.time(time)
+
+ def progress(self, offset, whence):
+ return self.decorated.progress(offset, whence)
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def time(self, a_datetime):
+ """Provide a timestamp for the current test activity.
+
+ :param a_datetime: If None, automatically add timestamps before every
+ event (this is the default behaviour if time() is not called at
+ all). If not None, pass the provided time onto the decorated
+ result object and disable automatic timestamps.
+ """
+ self._time = a_datetime
+ return self.decorated.time(a_datetime)
+
+
+class TagsMixin(object):
+
+ def __init__(self):
+ self._clear_tags()
+
+ def _clear_tags(self):
+ self._global_tags = set(), set()
+ self._test_tags = None
+
+ def _get_active_tags(self):
+ global_new, global_gone = self._global_tags
+ if self._test_tags is None:
+ return set(global_new)
+ test_new, test_gone = self._test_tags
+ return global_new.difference(test_gone).union(test_new)
+
+ def _get_current_scope(self):
+ if self._test_tags:
+ return self._test_tags
+ return self._global_tags
+
+ def _flush_current_scope(self, tag_receiver):
+ new_tags, gone_tags = self._get_current_scope()
+ if new_tags or gone_tags:
+ tag_receiver.tags(new_tags, gone_tags)
+ if self._test_tags:
+ self._test_tags = set(), set()
+ else:
+ self._global_tags = set(), set()
+
+ def startTestRun(self):
+ self._clear_tags()
+
+ def startTest(self, test):
+ self._test_tags = set(), set()
+
+ def stopTest(self, test):
+ self._test_tags = None
+
+ def tags(self, new_tags, gone_tags):
+ """Handle tag instructions.
+
+ Adds and removes tags as appropriate. If a test is currently running,
+ tags are not affected for subsequent tests.
+
+ :param new_tags: Tags to add,
+ :param gone_tags: Tags to remove.
+ """
+ current_new_tags, current_gone_tags = self._get_current_scope()
+ current_new_tags.update(new_tags)
+ current_new_tags.difference_update(gone_tags)
+ current_gone_tags.update(gone_tags)
+ current_gone_tags.difference_update(new_tags)
+
+
+class TagCollapsingDecorator(HookedTestResultDecorator, TagsMixin):
+ """Collapses many 'tags' calls into one where possible."""
+
+ def __init__(self, result):
+ super(TagCollapsingDecorator, self).__init__(result)
+ self._clear_tags()
+
+ def _before_event(self):
+ self._flush_current_scope(self.decorated)
+
+ def tags(self, new_tags, gone_tags):
+ TagsMixin.tags(self, new_tags, gone_tags)
+
+
+class TimeCollapsingDecorator(HookedTestResultDecorator):
+ """Only pass on the first and last of a consecutive sequence of times."""
+
+ def __init__(self, decorated):
+ super(TimeCollapsingDecorator, self).__init__(decorated)
+ self._last_received_time = None
+ self._last_sent_time = None
+
+ def _before_event(self):
+ if self._last_received_time is None:
+ return
+ if self._last_received_time != self._last_sent_time:
+ self.decorated.time(self._last_received_time)
+ self._last_sent_time = self._last_received_time
+ self._last_received_time = None
+
+ def time(self, a_time):
+ # Don't upcall, because we don't want to call _before_event, it's only
+ # for non-time events.
+ if self._last_received_time is None:
+ self.decorated.time(a_time)
+ self._last_sent_time = a_time
+ self._last_received_time = a_time
+
+
+def and_predicates(predicates):
+ """Return a predicate that is true iff all predicates are true."""
+ # XXX: Should probably be in testtools to be better used by matchers. jml
+ return lambda *args, **kwargs: all(p(*args, **kwargs) for p in predicates)
+
+
+def make_tag_filter(with_tags, without_tags):
+ """Make a callback that checks tests against tags."""
+
+ with_tags = with_tags and set(with_tags) or None
+ without_tags = without_tags and set(without_tags) or None
+
+ def check_tags(test, outcome, err, details, tags):
+ if with_tags and not with_tags <= tags:
+ return False
+ if without_tags and bool(without_tags & tags):
+ return False
+ return True
+
+ return check_tags
+
+
+class _PredicateFilter(TestResultDecorator, TagsMixin):
+
+ def __init__(self, result, predicate):
+ super(_PredicateFilter, self).__init__(result)
+ self._clear_tags()
+ self.decorated = TimeCollapsingDecorator(
+ TagCollapsingDecorator(self.decorated))
+ self._predicate = predicate
+ # The current test (for filtering tags)
+ self._current_test = None
+ # Has the current test been filtered (for outputting test tags)
+ self._current_test_filtered = None
+ # Calls to this result that we don't know whether to forward on yet.
+ self._buffered_calls = []
+
+ def filter_predicate(self, test, outcome, error, details):
+ return self._predicate(
+ test, outcome, error, details, self._get_active_tags())
+
+ def addError(self, test, err=None, details=None):
+ if (self.filter_predicate(test, 'error', err, details)):
+ self._buffered_calls.append(
+ ('addError', [test, err], {'details': details}))
+ else:
+ self._filtered()
+
+ def addFailure(self, test, err=None, details=None):
+ if (self.filter_predicate(test, 'failure', err, details)):
+ self._buffered_calls.append(
+ ('addFailure', [test, err], {'details': details}))
+ else:
+ self._filtered()
+
+ def addSkip(self, test, reason=None, details=None):
+ if (self.filter_predicate(test, 'skip', reason, details)):
+ self._buffered_calls.append(
+ ('addSkip', [test, reason], {'details': details}))
+ else:
+ self._filtered()
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ if self.filter_predicate(test, 'expectedfailure', err, details):
+ self._buffered_calls.append(
+ ('addExpectedFailure', [test, err], {'details': details}))
+ else:
+ self._filtered()
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._buffered_calls.append(
+ ('addUnexpectedSuccess', [test], {'details': details}))
+
+ def addSuccess(self, test, details=None):
+ if (self.filter_predicate(test, 'success', None, details)):
+ self._buffered_calls.append(
+ ('addSuccess', [test], {'details': details}))
+ else:
+ self._filtered()
+
+ def _filtered(self):
+ self._current_test_filtered = True
+
+ def startTest(self, test):
+ """Start a test.
+
+ Not directly passed to the client, but used for handling of tags
+ correctly.
+ """
+ TagsMixin.startTest(self, test)
+ self._current_test = test
+ self._current_test_filtered = False
+ self._buffered_calls.append(('startTest', [test], {}))
+
+ def stopTest(self, test):
+ """Stop a test.
+
+ Not directly passed to the client, but used for handling of tags
+ correctly.
+ """
+ if not self._current_test_filtered:
+ for method, args, kwargs in self._buffered_calls:
+ getattr(self.decorated, method)(*args, **kwargs)
+ self.decorated.stopTest(test)
+ self._current_test = None
+ self._current_test_filtered = None
+ self._buffered_calls = []
+ TagsMixin.stopTest(self, test)
+
+ def tags(self, new_tags, gone_tags):
+ TagsMixin.tags(self, new_tags, gone_tags)
+ if self._current_test is not None:
+ self._buffered_calls.append(('tags', [new_tags, gone_tags], {}))
+ else:
+ return super(_PredicateFilter, self).tags(new_tags, gone_tags)
+
+ def time(self, a_time):
+ return self.decorated.time(a_time)
+
+ def id_to_orig_id(self, id):
+ if id.startswith("subunit.RemotedTestCase."):
+ return id[len("subunit.RemotedTestCase."):]
+ return id
+
+
+class TestResultFilter(TestResultDecorator):
+ """A pyunit TestResult interface implementation which filters tests.
+
+ Tests that pass the filter are handed on to another TestResult instance
+ for further processing/reporting. To obtain the filtered results,
+ the other instance must be interrogated.
+
+ :ivar result: The result that tests are passed to after filtering.
+ :ivar filter_predicate: The callback run to decide whether to pass
+ a result.
+ """
+
+ def __init__(self, result, filter_error=False, filter_failure=False,
+ filter_success=True, filter_skip=False, filter_xfail=False,
+ filter_predicate=None, fixup_expected_failures=None):
+ """Create a FilterResult object filtering to result.
+
+ :param filter_error: Filter out errors.
+ :param filter_failure: Filter out failures.
+ :param filter_success: Filter out successful tests.
+ :param filter_skip: Filter out skipped tests.
+ :param filter_xfail: Filter out expected failure tests.
+ :param filter_predicate: A callable taking (test, outcome, err,
+ details, tags) and returning True if the result should be passed
+ through. err and details may be none if no error or extra
+ metadata is available. outcome is the name of the outcome such
+ as 'success' or 'failure'. tags is new in 0.0.8; 0.0.7 filters
+ are still supported but should be updated to accept the tags
+ parameter for efficiency.
+ :param fixup_expected_failures: Set of test ids to consider known
+ failing.
+ """
+ predicates = []
+ if filter_error:
+ predicates.append(
+ lambda t, outcome, e, d, tags: outcome != 'error')
+ if filter_failure:
+ predicates.append(
+ lambda t, outcome, e, d, tags: outcome != 'failure')
+ if filter_success:
+ predicates.append(
+ lambda t, outcome, e, d, tags: outcome != 'success')
+ if filter_skip:
+ predicates.append(
+ lambda t, outcome, e, d, tags: outcome != 'skip')
+ if filter_xfail:
+ predicates.append(
+ lambda t, outcome, e, d, tags: outcome != 'expectedfailure')
+ if filter_predicate is not None:
+ def compat(test, outcome, error, details, tags):
+ # 0.0.7 and earlier did not support the 'tags' parameter.
+ try:
+ return filter_predicate(
+ test, outcome, error, details, tags)
+ except TypeError:
+ return filter_predicate(test, outcome, error, details)
+ predicates.append(compat)
+ predicate = and_predicates(predicates)
+ super(TestResultFilter, self).__init__(
+ _PredicateFilter(result, predicate))
+ if fixup_expected_failures is None:
+ self._fixup_expected_failures = frozenset()
+ else:
+ self._fixup_expected_failures = fixup_expected_failures
+
+ def addError(self, test, err=None, details=None):
+ if self._failure_expected(test):
+ self.addExpectedFailure(test, err=err, details=details)
+ else:
+ super(TestResultFilter, self).addError(
+ test, err=err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ if self._failure_expected(test):
+ self.addExpectedFailure(test, err=err, details=details)
+ else:
+ super(TestResultFilter, self).addFailure(
+ test, err=err, details=details)
+
+ def addSuccess(self, test, details=None):
+ if self._failure_expected(test):
+ self.addUnexpectedSuccess(test, details=details)
+ else:
+ super(TestResultFilter, self).addSuccess(test, details=details)
+
+ def _failure_expected(self, test):
+ return (test.id() in self._fixup_expected_failures)
+
+
+class TestIdPrintingResult(testtools.TestResult):
+ """Print test ids to a stream.
+
+ Implements both TestResult and StreamResult, for compatibility.
+ """
+
+ def __init__(self, stream, show_times=False, show_exists=False):
+ """Create a FilterResult object outputting to stream."""
+ super(TestIdPrintingResult, self).__init__()
+ self._stream = stream
+ self.show_exists = show_exists
+ self.show_times = show_times
+
+ def startTestRun(self):
+ self.failed_tests = 0
+ self.__time = None
+ self._test = None
+ self._test_duration = 0
+ self._active_tests = {}
+
+ def addError(self, test, err):
+ self.failed_tests += 1
+ self._test = test
+
+ def addFailure(self, test, err):
+ self.failed_tests += 1
+ self._test = test
+
+ def addSuccess(self, test):
+ self._test = test
+
+ def addSkip(self, test, reason=None, details=None):
+ self._test = test
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self.failed_tests += 1
+ self._test = test
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._test = test
+
+ def reportTest(self, test_id, duration):
+ if self.show_times:
+ seconds = duration.seconds
+ seconds += duration.days * 3600 * 24
+ seconds += duration.microseconds / 1000000.0
+ self._stream.write(test_id + ' %0.3f\n' % seconds)
+ else:
+ self._stream.write(test_id + '\n')
+
+ def startTest(self, test):
+ self._start_time = self._time()
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ if not test_id:
+ return
+ if timestamp is not None:
+ self.time(timestamp)
+ if test_status=='exists':
+ if self.show_exists:
+ self.reportTest(test_id, 0)
+ elif test_status in ('inprogress', None):
+ self._active_tests[test_id] = self._time()
+ else:
+ self._end_test(test_id)
+
+ def _end_test(self, test_id):
+ test_start = self._active_tests.pop(test_id, None)
+ if not test_start:
+ test_duration = 0
+ else:
+ test_duration = self._time() - test_start
+ self.reportTest(test_id, test_duration)
+
+ def stopTest(self, test):
+ test_duration = self._time() - self._start_time
+ self.reportTest(self._test.id(), test_duration)
+
+ def time(self, time):
+ self.__time = time
+
+ def _time(self):
+ return self.__time
+
+ def wasSuccessful(self):
+ "Tells whether or not this result was a success"
+ return self.failed_tests == 0
+
+ def stopTestRun(self):
+ for test_id in list(self._active_tests.keys()):
+ self._end_test(test_id)
+
+
+class TestByTestResult(testtools.TestResult):
+ """Call something every time a test completes."""
+
+# XXX: In testtools since lp:testtools r249. Once that's released, just
+# import that.
+
+ def __init__(self, on_test):
+ """Construct a ``TestByTestResult``.
+
+ :param on_test: A callable that take a test case, a status (one of
+ "success", "failure", "error", "skip", or "xfail"), a start time
+ (a ``datetime`` with timezone), a stop time, an iterable of tags,
+ and a details dict. Is called at the end of each test (i.e. on
+ ``stopTest``) with the accumulated values for that test.
+ """
+ super(TestByTestResult, self).__init__()
+ self._on_test = on_test
+
+ def startTest(self, test):
+ super(TestByTestResult, self).startTest(test)
+ self._start_time = self._now()
+ # There's no supported (i.e. tested) behaviour that relies on these
+ # being set, but it makes me more comfortable all the same. -- jml
+ self._status = None
+ self._details = None
+ self._stop_time = None
+
+ def stopTest(self, test):
+ self._stop_time = self._now()
+ super(TestByTestResult, self).stopTest(test)
+ self._on_test(
+ test=test,
+ status=self._status,
+ start_time=self._start_time,
+ stop_time=self._stop_time,
+ # current_tags is new in testtools 0.9.13.
+ tags=getattr(self, 'current_tags', None),
+ details=self._details)
+
+ def _err_to_details(self, test, err, details):
+ if details:
+ return details
+ return {'traceback': TracebackContent(err, test)}
+
+ def addSuccess(self, test, details=None):
+ super(TestByTestResult, self).addSuccess(test)
+ self._status = 'success'
+ self._details = details
+
+ def addFailure(self, test, err=None, details=None):
+ super(TestByTestResult, self).addFailure(test, err, details)
+ self._status = 'failure'
+ self._details = self._err_to_details(test, err, details)
+
+ def addError(self, test, err=None, details=None):
+ super(TestByTestResult, self).addError(test, err, details)
+ self._status = 'error'
+ self._details = self._err_to_details(test, err, details)
+
+ def addSkip(self, test, reason=None, details=None):
+ super(TestByTestResult, self).addSkip(test, reason, details)
+ self._status = 'skip'
+ if details is None:
+ details = {'reason': text_content(reason)}
+ elif reason:
+ # XXX: What if details already has 'reason' key?
+ details['reason'] = text_content(reason)
+ self._details = details
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ super(TestByTestResult, self).addExpectedFailure(test, err, details)
+ self._status = 'xfail'
+ self._details = self._err_to_details(test, err, details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ super(TestByTestResult, self).addUnexpectedSuccess(test, details)
+ self._status = 'success'
+ self._details = details
+
+
+class CsvResult(TestByTestResult):
+
+ def __init__(self, stream):
+ super(CsvResult, self).__init__(self._on_test)
+ self._write_row = csv.writer(stream).writerow
+
+ def _on_test(self, test, status, start_time, stop_time, tags, details):
+ self._write_row([test.id(), status, start_time, stop_time])
+
+ def startTestRun(self):
+ super(CsvResult, self).startTestRun()
+ self._write_row(['test', 'status', 'start_time', 'stop_time'])
+
+
+class CatFiles(StreamResult):
+ """Cat file attachments received to a stream."""
+
+ def __init__(self, byte_stream):
+ self.stream = subunit.make_stream_binary(byte_stream)
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ if file_name is not None:
+ self.stream.write(file_bytes)
+ self.stream.flush()
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/__init__.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/__init__.py
new file mode 100644
index 00000000000..b45d7f94569
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/__init__.py
@@ -0,0 +1,63 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import sys
+from unittest import TestLoader
+
+
+# Before the test module imports to avoid circularity.
+# For testing: different pythons have different str() implementations.
+if sys.version_info > (3, 0):
+ _remote_exception_repr = "testtools.testresult.real._StringException"
+ _remote_exception_str = "Traceback (most recent call last):\ntesttools.testresult.real._StringException"
+ _remote_exception_str_chunked = "57\r\n" + _remote_exception_str + ": boo qux\n0\r\n"
+else:
+ _remote_exception_repr = "_StringException"
+ _remote_exception_str = "Traceback (most recent call last):\n_StringException"
+ _remote_exception_str_chunked = "3D\r\n" + _remote_exception_str + ": boo qux\n0\r\n"
+
+
+from subunit.tests import (
+ test_chunked,
+ test_details,
+ test_filters,
+ test_progress_model,
+ test_run,
+ test_subunit_filter,
+ test_subunit_stats,
+ test_subunit_tags,
+ test_tap2subunit,
+ test_test_protocol,
+ test_test_protocol2,
+ test_test_results,
+ )
+
+
+def test_suite():
+ loader = TestLoader()
+ result = loader.loadTestsFromModule(test_chunked)
+ result.addTest(loader.loadTestsFromModule(test_details))
+ result.addTest(loader.loadTestsFromModule(test_filters))
+ result.addTest(loader.loadTestsFromModule(test_progress_model))
+ result.addTest(loader.loadTestsFromModule(test_test_results))
+ result.addTest(loader.loadTestsFromModule(test_test_protocol))
+ result.addTest(loader.loadTestsFromModule(test_test_protocol2))
+ result.addTest(loader.loadTestsFromModule(test_tap2subunit))
+ result.addTest(loader.loadTestsFromModule(test_subunit_filter))
+ result.addTest(loader.loadTestsFromModule(test_subunit_tags))
+ result.addTest(loader.loadTestsFromModule(test_subunit_stats))
+ result.addTest(loader.loadTestsFromModule(test_run))
+ return result
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-script.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-script.py
new file mode 100755
index 00000000000..91838f6d6fb
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-script.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+import sys
+if sys.platform == "win32":
+ import msvcrt, os
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+if len(sys.argv) == 2:
+ # subunit.tests.test_test_protocol.TestExecTestCase.test_sample_method_args
+ # uses this code path to be sure that the arguments were passed to
+ # sample-script.py
+ print("test fail")
+ print("error fail")
+ sys.exit(0)
+print("test old mcdonald")
+print("success old mcdonald")
+print("test bing crosby")
+print("failure bing crosby [")
+print("foo.c:53:ERROR invalid state")
+print("]")
+print("test an error")
+print("error an error")
+sys.exit(0)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-two-script.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-two-script.py
new file mode 100755
index 00000000000..fc73dfc409d
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-two-script.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+import sys
+print("test old mcdonald")
+print("success old mcdonald")
+print("test bing crosby")
+print("success bing crosby")
+sys.exit(0)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_chunked.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_chunked.py
new file mode 100644
index 00000000000..5100b323892
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_chunked.py
@@ -0,0 +1,146 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+# Copyright (C) 2011 Martin Pool <mbp@sourcefrog.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import unittest
+
+from testtools.compat import _b, BytesIO
+
+import subunit.chunked
+
+
+class TestDecode(unittest.TestCase):
+
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ self.output = BytesIO()
+ self.decoder = subunit.chunked.Decoder(self.output)
+
+ def test_close_read_length_short_errors(self):
+ self.assertRaises(ValueError, self.decoder.close)
+
+ def test_close_body_short_errors(self):
+ self.assertEqual(None, self.decoder.write(_b('2\r\na')))
+ self.assertRaises(ValueError, self.decoder.close)
+
+ def test_close_body_buffered_data_errors(self):
+ self.assertEqual(None, self.decoder.write(_b('2\r')))
+ self.assertRaises(ValueError, self.decoder.close)
+
+ def test_close_after_finished_stream_safe(self):
+ self.assertEqual(None, self.decoder.write(_b('2\r\nab')))
+ self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
+ self.decoder.close()
+
+ def test_decode_nothing(self):
+ self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
+ self.assertEqual(_b(''), self.output.getvalue())
+
+ def test_decode_serialised_form(self):
+ self.assertEqual(None, self.decoder.write(_b("F\r\n")))
+ self.assertEqual(None, self.decoder.write(_b("serialised\n")))
+ self.assertEqual(_b(''), self.decoder.write(_b("form0\r\n")))
+
+ def test_decode_short(self):
+ self.assertEqual(_b(''), self.decoder.write(_b('3\r\nabc0\r\n')))
+ self.assertEqual(_b('abc'), self.output.getvalue())
+
+ def test_decode_combines_short(self):
+ self.assertEqual(_b(''), self.decoder.write(_b('6\r\nabcdef0\r\n')))
+ self.assertEqual(_b('abcdef'), self.output.getvalue())
+
+ def test_decode_excess_bytes_from_write(self):
+ self.assertEqual(_b('1234'), self.decoder.write(_b('3\r\nabc0\r\n1234')))
+ self.assertEqual(_b('abc'), self.output.getvalue())
+
+ def test_decode_write_after_finished_errors(self):
+ self.assertEqual(_b('1234'), self.decoder.write(_b('3\r\nabc0\r\n1234')))
+ self.assertRaises(ValueError, self.decoder.write, _b(''))
+
+ def test_decode_hex(self):
+ self.assertEqual(_b(''), self.decoder.write(_b('A\r\n12345678900\r\n')))
+ self.assertEqual(_b('1234567890'), self.output.getvalue())
+
+ def test_decode_long_ranges(self):
+ self.assertEqual(None, self.decoder.write(_b('10000\r\n')))
+ self.assertEqual(None, self.decoder.write(_b('1' * 65536)))
+ self.assertEqual(None, self.decoder.write(_b('10000\r\n')))
+ self.assertEqual(None, self.decoder.write(_b('2' * 65536)))
+ self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
+ self.assertEqual(_b('1' * 65536 + '2' * 65536), self.output.getvalue())
+
+ def test_decode_newline_nonstrict(self):
+ """Tolerate chunk markers with no CR character."""
+ # From <http://pad.lv/505078>
+ self.decoder = subunit.chunked.Decoder(self.output, strict=False)
+ self.assertEqual(None, self.decoder.write(_b('a\n')))
+ self.assertEqual(None, self.decoder.write(_b('abcdeabcde')))
+ self.assertEqual(_b(''), self.decoder.write(_b('0\n')))
+ self.assertEqual(_b('abcdeabcde'), self.output.getvalue())
+
+ def test_decode_strict_newline_only(self):
+ """Reject chunk markers with no CR character in strict mode."""
+ # From <http://pad.lv/505078>
+ self.assertRaises(ValueError,
+ self.decoder.write, _b('a\n'))
+
+ def test_decode_strict_multiple_crs(self):
+ self.assertRaises(ValueError,
+ self.decoder.write, _b('a\r\r\n'))
+
+ def test_decode_short_header(self):
+ self.assertRaises(ValueError,
+ self.decoder.write, _b('\n'))
+
+
+class TestEncode(unittest.TestCase):
+
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ self.output = BytesIO()
+ self.encoder = subunit.chunked.Encoder(self.output)
+
+ def test_encode_nothing(self):
+ self.encoder.close()
+ self.assertEqual(_b('0\r\n'), self.output.getvalue())
+
+ def test_encode_empty(self):
+ self.encoder.write(_b(''))
+ self.encoder.close()
+ self.assertEqual(_b('0\r\n'), self.output.getvalue())
+
+ def test_encode_short(self):
+ self.encoder.write(_b('abc'))
+ self.encoder.close()
+ self.assertEqual(_b('3\r\nabc0\r\n'), self.output.getvalue())
+
+ def test_encode_combines_short(self):
+ self.encoder.write(_b('abc'))
+ self.encoder.write(_b('def'))
+ self.encoder.close()
+ self.assertEqual(_b('6\r\nabcdef0\r\n'), self.output.getvalue())
+
+ def test_encode_over_9_is_in_hex(self):
+ self.encoder.write(_b('1234567890'))
+ self.encoder.close()
+ self.assertEqual(_b('A\r\n12345678900\r\n'), self.output.getvalue())
+
+ def test_encode_long_ranges_not_combined(self):
+ self.encoder.write(_b('1' * 65536))
+ self.encoder.write(_b('2' * 65536))
+ self.encoder.close()
+ self.assertEqual(_b('10000\r\n' + '1' * 65536 + '10000\r\n' +
+ '2' * 65536 + '0\r\n'), self.output.getvalue())
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_details.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_details.py
new file mode 100644
index 00000000000..8605c5ac951
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_details.py
@@ -0,0 +1,106 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import unittest
+
+from testtools.compat import _b, StringIO
+
+import subunit.tests
+from subunit import content, content_type, details
+
+
+class TestSimpleDetails(unittest.TestCase):
+
+ def test_lineReceived(self):
+ parser = details.SimpleDetailsParser(None)
+ parser.lineReceived(_b("foo\n"))
+ parser.lineReceived(_b("bar\n"))
+ self.assertEqual(_b("foo\nbar\n"), parser._message)
+
+ def test_lineReceived_escaped_bracket(self):
+ parser = details.SimpleDetailsParser(None)
+ parser.lineReceived(_b("foo\n"))
+ parser.lineReceived(_b(" ]are\n"))
+ parser.lineReceived(_b("bar\n"))
+ self.assertEqual(_b("foo\n]are\nbar\n"), parser._message)
+
+ def test_get_message(self):
+ parser = details.SimpleDetailsParser(None)
+ self.assertEqual(_b(""), parser.get_message())
+
+ def test_get_details(self):
+ parser = details.SimpleDetailsParser(None)
+ traceback = ""
+ expected = {}
+ expected['traceback'] = content.Content(
+ content_type.ContentType("text", "x-traceback",
+ {'charset': 'utf8'}),
+ lambda:[_b("")])
+ found = parser.get_details()
+ self.assertEqual(expected.keys(), found.keys())
+ self.assertEqual(expected['traceback'].content_type,
+ found['traceback'].content_type)
+ self.assertEqual(_b('').join(expected['traceback'].iter_bytes()),
+ _b('').join(found['traceback'].iter_bytes()))
+
+ def test_get_details_skip(self):
+ parser = details.SimpleDetailsParser(None)
+ traceback = ""
+ expected = {}
+ expected['reason'] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:[_b("")])
+ found = parser.get_details("skip")
+ self.assertEqual(expected, found)
+
+ def test_get_details_success(self):
+ parser = details.SimpleDetailsParser(None)
+ traceback = ""
+ expected = {}
+ expected['message'] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:[_b("")])
+ found = parser.get_details("success")
+ self.assertEqual(expected, found)
+
+
+class TestMultipartDetails(unittest.TestCase):
+
+ def test_get_message_is_None(self):
+ parser = details.MultipartDetailsParser(None)
+ self.assertEqual(None, parser.get_message())
+
+ def test_get_details(self):
+ parser = details.MultipartDetailsParser(None)
+ self.assertEqual({}, parser.get_details())
+
+ def test_parts(self):
+ parser = details.MultipartDetailsParser(None)
+ parser.lineReceived(_b("Content-Type: text/plain\n"))
+ parser.lineReceived(_b("something\n"))
+ parser.lineReceived(_b("F\r\n"))
+ parser.lineReceived(_b("serialised\n"))
+ parser.lineReceived(_b("form0\r\n"))
+ expected = {}
+ expected['something'] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:[_b("serialised\nform")])
+ found = parser.get_details()
+ self.assertEqual(expected.keys(), found.keys())
+ self.assertEqual(expected['something'].content_type,
+ found['something'].content_type)
+ self.assertEqual(_b('').join(expected['something'].iter_bytes()),
+ _b('').join(found['something'].iter_bytes()))
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_filters.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_filters.py
new file mode 100644
index 00000000000..0a5e7c74b71
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_filters.py
@@ -0,0 +1,35 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2013 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import sys
+from tempfile import NamedTemporaryFile
+
+from testtools import TestCase
+
+from subunit.filters import find_stream
+
+
+class TestFindStream(TestCase):
+
+ def test_no_argv(self):
+ self.assertEqual('foo', find_stream('foo', []))
+
+ def test_opens_file(self):
+ f = NamedTemporaryFile()
+ f.write(b'foo')
+ f.flush()
+ stream = find_stream('bar', [f.name])
+ self.assertEqual(b'foo', stream.read())
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_progress_model.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_progress_model.py
new file mode 100644
index 00000000000..2ca08888285
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_progress_model.py
@@ -0,0 +1,112 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import unittest
+
+import subunit
+from subunit.progress_model import ProgressModel
+
+
+class TestProgressModel(unittest.TestCase):
+
+ def assertProgressSummary(self, pos, total, progress):
+ """Assert that a progress model has reached a particular point."""
+ self.assertEqual(pos, progress.pos())
+ self.assertEqual(total, progress.width())
+
+ def test_new_progress_0_0(self):
+ progress = ProgressModel()
+ self.assertProgressSummary(0, 0, progress)
+
+ def test_advance_0_0(self):
+ progress = ProgressModel()
+ progress.advance()
+ self.assertProgressSummary(1, 0, progress)
+
+ def test_advance_1_0(self):
+ progress = ProgressModel()
+ progress.advance()
+ self.assertProgressSummary(1, 0, progress)
+
+ def test_set_width_absolute(self):
+ progress = ProgressModel()
+ progress.set_width(10)
+ self.assertProgressSummary(0, 10, progress)
+
+ def test_set_width_absolute_preserves_pos(self):
+ progress = ProgressModel()
+ progress.advance()
+ progress.set_width(2)
+ self.assertProgressSummary(1, 2, progress)
+
+ def test_adjust_width(self):
+ progress = ProgressModel()
+ progress.adjust_width(10)
+ self.assertProgressSummary(0, 10, progress)
+ progress.adjust_width(-10)
+ self.assertProgressSummary(0, 0, progress)
+
+ def test_adjust_width_preserves_pos(self):
+ progress = ProgressModel()
+ progress.advance()
+ progress.adjust_width(10)
+ self.assertProgressSummary(1, 10, progress)
+ progress.adjust_width(-10)
+ self.assertProgressSummary(1, 0, progress)
+
+ def test_push_preserves_progress(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ self.assertProgressSummary(1, 3, progress)
+
+ def test_advance_advances_substack(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.adjust_width(1)
+ progress.advance()
+ self.assertProgressSummary(2, 3, progress)
+
+ def test_adjust_width_adjusts_substack(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.adjust_width(2)
+ progress.advance()
+ self.assertProgressSummary(3, 6, progress)
+
+ def test_set_width_adjusts_substack(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.set_width(2)
+ progress.advance()
+ self.assertProgressSummary(3, 6, progress)
+
+ def test_pop_restores_progress(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.adjust_width(1)
+ progress.advance()
+ progress.pop()
+ self.assertProgressSummary(1, 3, progress)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_run.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_run.py
new file mode 100644
index 00000000000..6ac84e15d63
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_run.py
@@ -0,0 +1,64 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2011 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+from testtools.compat import BytesIO
+import unittest
+
+from testtools import PlaceHolder, TestCase
+from testtools.testresult.doubles import StreamResult
+
+import subunit
+from subunit import run
+from subunit.run import SubunitTestRunner
+
+
+class TestSubunitTestRunner(TestCase):
+
+ def test_includes_timing_output(self):
+ io = BytesIO()
+ runner = SubunitTestRunner(stream=io)
+ test = PlaceHolder('name')
+ runner.run(test)
+ io.seek(0)
+ eventstream = StreamResult()
+ subunit.ByteStreamToStreamResult(io).run(eventstream)
+ timestamps = [event[-1] for event in eventstream._events
+ if event is not None]
+ self.assertNotEqual([], timestamps)
+
+ def test_enumerates_tests_before_run(self):
+ io = BytesIO()
+ runner = SubunitTestRunner(stream=io)
+ test1 = PlaceHolder('name1')
+ test2 = PlaceHolder('name2')
+ case = unittest.TestSuite([test1, test2])
+ runner.run(case)
+ io.seek(0)
+ eventstream = StreamResult()
+ subunit.ByteStreamToStreamResult(io).run(eventstream)
+ self.assertEqual([
+ ('status', 'name1', 'exists'),
+ ('status', 'name2', 'exists'),
+ ], [event[:3] for event in eventstream._events[:2]])
+
+ def test_list_errors_if_errors_from_list_test(self):
+ io = BytesIO()
+ runner = SubunitTestRunner(stream=io)
+ def list_test(test):
+ return [], ['failed import']
+ self.patch(run, 'list_test', list_test)
+ exc = self.assertRaises(SystemExit, runner.list, None)
+ self.assertEqual((2,), exc.args)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_filter.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_filter.py
new file mode 100644
index 00000000000..5f34b3bc75d
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_filter.py
@@ -0,0 +1,346 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for subunit.TestResultFilter."""
+
+from datetime import datetime
+import os
+import subprocess
+import sys
+from subunit import iso8601
+import unittest
+
+from testtools import TestCase
+from testtools.compat import _b, BytesIO
+from testtools.testresult.doubles import ExtendedTestResult, StreamResult
+
+import subunit
+from subunit.test_results import make_tag_filter, TestResultFilter
+from subunit import ByteStreamToStreamResult, StreamResultToBytes
+
+
+class TestTestResultFilter(TestCase):
+ """Test for TestResultFilter, a TestResult object which filters tests."""
+
+ # While TestResultFilter works on python objects, using a subunit stream
+ # is an easy pithy way of getting a series of test objects to call into
+ # the TestResult, and as TestResultFilter is intended for use with subunit
+ # also has the benefit of detecting any interface skew issues.
+ example_subunit_stream = _b("""\
+tags: global
+test passed
+success passed
+test failed
+tags: local
+failure failed
+test error
+error error [
+error details
+]
+test skipped
+skip skipped
+test todo
+xfail todo
+""")
+
+ def run_tests(self, result_filter, input_stream=None):
+ """Run tests through the given filter.
+
+ :param result_filter: A filtering TestResult object.
+ :param input_stream: Bytes of subunit stream data. If not provided,
+ uses TestTestResultFilter.example_subunit_stream.
+ """
+ if input_stream is None:
+ input_stream = self.example_subunit_stream
+ test = subunit.ProtocolTestCase(BytesIO(input_stream))
+ test.run(result_filter)
+
+ def test_default(self):
+ """The default is to exclude success and include everything else."""
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result)
+ self.run_tests(result_filter)
+ # skips are seen as success by default python TestResult.
+ self.assertEqual(['error'],
+ [error[0].id() for error in filtered_result.errors])
+ self.assertEqual(['failed'],
+ [failure[0].id() for failure in
+ filtered_result.failures])
+ self.assertEqual(4, filtered_result.testsRun)
+
+ def test_tag_filter(self):
+ tag_filter = make_tag_filter(['global'], ['local'])
+ result = ExtendedTestResult()
+ result_filter = TestResultFilter(
+ result, filter_success=False, filter_predicate=tag_filter)
+ self.run_tests(result_filter)
+ tests_included = [
+ event[1] for event in result._events if event[0] == 'startTest']
+ tests_expected = list(map(
+ subunit.RemotedTestCase,
+ ['passed', 'error', 'skipped', 'todo']))
+ self.assertEquals(tests_expected, tests_included)
+
+ def test_tags_tracked_correctly(self):
+ tag_filter = make_tag_filter(['a'], [])
+ result = ExtendedTestResult()
+ result_filter = TestResultFilter(
+ result, filter_success=False, filter_predicate=tag_filter)
+ input_stream = _b(
+ "test: foo\n"
+ "tags: a\n"
+ "successful: foo\n"
+ "test: bar\n"
+ "successful: bar\n")
+ self.run_tests(result_filter, input_stream)
+ foo = subunit.RemotedTestCase('foo')
+ self.assertEquals(
+ [('startTest', foo),
+ ('tags', set(['a']), set()),
+ ('addSuccess', foo),
+ ('stopTest', foo),
+ ],
+ result._events)
+
+ def test_exclude_errors(self):
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result, filter_error=True)
+ self.run_tests(result_filter)
+ # skips are seen as errors by default python TestResult.
+ self.assertEqual([], filtered_result.errors)
+ self.assertEqual(['failed'],
+ [failure[0].id() for failure in
+ filtered_result.failures])
+ self.assertEqual(3, filtered_result.testsRun)
+
+ def test_fixup_expected_failures(self):
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result,
+ fixup_expected_failures=set(["failed"]))
+ self.run_tests(result_filter)
+ self.assertEqual(['failed', 'todo'],
+ [failure[0].id() for failure in filtered_result.expectedFailures])
+ self.assertEqual([], filtered_result.failures)
+ self.assertEqual(4, filtered_result.testsRun)
+
+ def test_fixup_expected_errors(self):
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result,
+ fixup_expected_failures=set(["error"]))
+ self.run_tests(result_filter)
+ self.assertEqual(['error', 'todo'],
+ [failure[0].id() for failure in filtered_result.expectedFailures])
+ self.assertEqual([], filtered_result.errors)
+ self.assertEqual(4, filtered_result.testsRun)
+
+ def test_fixup_unexpected_success(self):
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result, filter_success=False,
+ fixup_expected_failures=set(["passed"]))
+ self.run_tests(result_filter)
+ self.assertEqual(['passed'],
+ [passed.id() for passed in filtered_result.unexpectedSuccesses])
+ self.assertEqual(5, filtered_result.testsRun)
+
+ def test_exclude_failure(self):
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result, filter_failure=True)
+ self.run_tests(result_filter)
+ self.assertEqual(['error'],
+ [error[0].id() for error in filtered_result.errors])
+ self.assertEqual([],
+ [failure[0].id() for failure in
+ filtered_result.failures])
+ self.assertEqual(3, filtered_result.testsRun)
+
+ def test_exclude_skips(self):
+ filtered_result = subunit.TestResultStats(None)
+ result_filter = TestResultFilter(filtered_result, filter_skip=True)
+ self.run_tests(result_filter)
+ self.assertEqual(0, filtered_result.skipped_tests)
+ self.assertEqual(2, filtered_result.failed_tests)
+ self.assertEqual(3, filtered_result.testsRun)
+
+ def test_include_success(self):
+ """Successes can be included if requested."""
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result,
+ filter_success=False)
+ self.run_tests(result_filter)
+ self.assertEqual(['error'],
+ [error[0].id() for error in filtered_result.errors])
+ self.assertEqual(['failed'],
+ [failure[0].id() for failure in
+ filtered_result.failures])
+ self.assertEqual(5, filtered_result.testsRun)
+
+ def test_filter_predicate(self):
+ """You can filter by predicate callbacks"""
+ # 0.0.7 and earlier did not support the 'tags' parameter, so we need
+ # to test that we still support behaviour without it.
+ filtered_result = unittest.TestResult()
+ def filter_cb(test, outcome, err, details):
+ return outcome == 'success'
+ result_filter = TestResultFilter(filtered_result,
+ filter_predicate=filter_cb,
+ filter_success=False)
+ self.run_tests(result_filter)
+ # Only success should pass
+ self.assertEqual(1, filtered_result.testsRun)
+
+ def test_filter_predicate_with_tags(self):
+ """You can filter by predicate callbacks that accept tags"""
+ filtered_result = unittest.TestResult()
+ def filter_cb(test, outcome, err, details, tags):
+ return outcome == 'success'
+ result_filter = TestResultFilter(filtered_result,
+ filter_predicate=filter_cb,
+ filter_success=False)
+ self.run_tests(result_filter)
+ # Only success should pass
+ self.assertEqual(1, filtered_result.testsRun)
+
+ def test_time_ordering_preserved(self):
+ # Passing a subunit stream through TestResultFilter preserves the
+ # relative ordering of 'time' directives and any other subunit
+ # directives that are still included.
+ date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
+ date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
+ date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
+ subunit_stream = _b('\n'.join([
+ "time: %s",
+ "test: foo",
+ "time: %s",
+ "error: foo",
+ "time: %s",
+ ""]) % (date_a, date_b, date_c))
+ result = ExtendedTestResult()
+ result_filter = TestResultFilter(result)
+ self.run_tests(result_filter, subunit_stream)
+ foo = subunit.RemotedTestCase('foo')
+ self.maxDiff = None
+ self.assertEqual(
+ [('time', date_a),
+ ('time', date_b),
+ ('startTest', foo),
+ ('addError', foo, {}),
+ ('stopTest', foo),
+ ('time', date_c)], result._events)
+
+ def test_time_passes_through_filtered_tests(self):
+ # Passing a subunit stream through TestResultFilter preserves 'time'
+ # directives even if a specific test is filtered out.
+ date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
+ date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
+ date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
+ subunit_stream = _b('\n'.join([
+ "time: %s",
+ "test: foo",
+ "time: %s",
+ "success: foo",
+ "time: %s",
+ ""]) % (date_a, date_b, date_c))
+ result = ExtendedTestResult()
+ result_filter = TestResultFilter(result)
+ result_filter.startTestRun()
+ self.run_tests(result_filter, subunit_stream)
+ result_filter.stopTestRun()
+ foo = subunit.RemotedTestCase('foo')
+ self.maxDiff = None
+ self.assertEqual(
+ [('startTestRun',),
+ ('time', date_a),
+ ('time', date_c),
+ ('stopTestRun',),], result._events)
+
+ def test_skip_preserved(self):
+ subunit_stream = _b('\n'.join([
+ "test: foo",
+ "skip: foo",
+ ""]))
+ result = ExtendedTestResult()
+ result_filter = TestResultFilter(result)
+ self.run_tests(result_filter, subunit_stream)
+ foo = subunit.RemotedTestCase('foo')
+ self.assertEquals(
+ [('startTest', foo),
+ ('addSkip', foo, {}),
+ ('stopTest', foo), ], result._events)
+
+ if sys.version_info < (2, 7):
+ # These tests require Python >=2.7.
+ del test_fixup_expected_failures, test_fixup_expected_errors, test_fixup_unexpected_success
+
+
+class TestFilterCommand(TestCase):
+
+ def run_command(self, args, stream):
+ root = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
+ script_path = os.path.join(root, 'filters', 'subunit-filter')
+ command = [sys.executable, script_path] + list(args)
+ ps = subprocess.Popen(
+ command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = ps.communicate(stream)
+ if ps.returncode != 0:
+ raise RuntimeError("%s failed: %s" % (command, err))
+ return out
+
+ def test_default(self):
+ byte_stream = BytesIO()
+ stream = StreamResultToBytes(byte_stream)
+ stream.status(test_id="foo", test_status="inprogress")
+ stream.status(test_id="foo", test_status="skip")
+ output = self.run_command([], byte_stream.getvalue())
+ events = StreamResult()
+ ByteStreamToStreamResult(BytesIO(output)).run(events)
+ ids = set(event[1] for event in events._events)
+ self.assertEqual([
+ ('status', 'foo', 'inprogress'),
+ ('status', 'foo', 'skip'),
+ ], [event[:3] for event in events._events])
+
+ def test_tags(self):
+ byte_stream = BytesIO()
+ stream = StreamResultToBytes(byte_stream)
+ stream.status(
+ test_id="foo", test_status="inprogress", test_tags=set(["a"]))
+ stream.status(
+ test_id="foo", test_status="success", test_tags=set(["a"]))
+ stream.status(test_id="bar", test_status="inprogress")
+ stream.status(test_id="bar", test_status="inprogress")
+ stream.status(
+ test_id="baz", test_status="inprogress", test_tags=set(["a"]))
+ stream.status(
+ test_id="baz", test_status="success", test_tags=set(["a"]))
+ output = self.run_command(
+ ['-s', '--with-tag', 'a'], byte_stream.getvalue())
+ events = StreamResult()
+ ByteStreamToStreamResult(BytesIO(output)).run(events)
+ ids = set(event[1] for event in events._events)
+ self.assertEqual(set(['foo', 'baz']), ids)
+
+ def test_no_passthrough(self):
+ output = self.run_command(['--no-passthrough'], b'hi thar')
+ self.assertEqual(b'', output)
+
+ def test_passthrough(self):
+ output = self.run_command([], b'hi thar')
+ byte_stream = BytesIO()
+ stream = StreamResultToBytes(byte_stream)
+ stream.status(file_name="stdout", file_bytes=b'hi thar')
+ self.assertEqual(byte_stream.getvalue(), output)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_stats.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_stats.py
new file mode 100644
index 00000000000..7c5e42dff82
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_stats.py
@@ -0,0 +1,78 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for subunit.TestResultStats."""
+
+import unittest
+
+from testtools.compat import _b, BytesIO, StringIO
+
+import subunit
+
+
+class TestTestResultStats(unittest.TestCase):
+ """Test for TestResultStats, a TestResult object that generates stats."""
+
+ def setUp(self):
+ self.output = StringIO()
+ self.result = subunit.TestResultStats(self.output)
+ self.input_stream = BytesIO()
+ self.test = subunit.ProtocolTestCase(self.input_stream)
+
+ def test_stats_empty(self):
+ self.test.run(self.result)
+ self.assertEqual(0, self.result.total_tests)
+ self.assertEqual(0, self.result.passed_tests)
+ self.assertEqual(0, self.result.failed_tests)
+ self.assertEqual(set(), self.result.seen_tags)
+
+ def setUpUsedStream(self):
+ self.input_stream.write(_b("""tags: global
+test passed
+success passed
+test failed
+tags: local
+failure failed
+test error
+error error
+test skipped
+skip skipped
+test todo
+xfail todo
+"""))
+ self.input_stream.seek(0)
+ self.test.run(self.result)
+
+ def test_stats_smoke_everything(self):
+ # Statistics are calculated usefully.
+ self.setUpUsedStream()
+ self.assertEqual(5, self.result.total_tests)
+ self.assertEqual(2, self.result.passed_tests)
+ self.assertEqual(2, self.result.failed_tests)
+ self.assertEqual(1, self.result.skipped_tests)
+ self.assertEqual(set(["global", "local"]), self.result.seen_tags)
+
+ def test_stat_formatting(self):
+ expected = ("""
+Total tests: 5
+Passed tests: 2
+Failed tests: 2
+Skipped tests: 1
+Seen tags: global, local
+""")[1:]
+ self.setUpUsedStream()
+ self.result.formatStats()
+ self.assertEqual(expected, self.output.getvalue())
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_tags.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_tags.py
new file mode 100644
index 00000000000..a16edc11591
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_tags.py
@@ -0,0 +1,85 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for subunit.tag_stream."""
+
+from io import BytesIO
+
+import testtools
+from testtools.matchers import Contains
+
+import subunit
+import subunit.test_results
+
+
+class TestSubUnitTags(testtools.TestCase):
+
+ def setUp(self):
+ super(TestSubUnitTags, self).setUp()
+ self.original = BytesIO()
+ self.filtered = BytesIO()
+
+ def test_add_tag(self):
+ # Literal values to avoid set sort-order dependencies. Python code show
+ # derivation.
+ # reference = BytesIO()
+ # stream = subunit.StreamResultToBytes(reference)
+ # stream.status(
+ # test_id='test', test_status='inprogress', test_tags=set(['quux', 'foo']))
+ # stream.status(
+ # test_id='test', test_status='success', test_tags=set(['bar', 'quux', 'foo']))
+ reference = [
+ b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+ b'\x83\x1b\x04test\x03\x03bar\x04quux\x03fooqn\xab)',
+ b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+ b'\x83\x1b\x04test\x03\x04quux\x03foo\x03bar\xaf\xbd\x9d\xd6',
+ b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+ b'\x83\x1b\x04test\x03\x04quux\x03bar\x03foo\x03\x04b\r',
+ b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+ b'\x83\x1b\x04test\x03\x03bar\x03foo\x04quux\xd2\x18\x1bC',
+ b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
+ b'\x83\x1b\x04test\x03\x03foo\x04quux\x03bar\x08\xc2X\x83',
+ b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
+ b'\x83\x1b\x04test\x03\x03bar\x03foo\x04quux\xd2\x18\x1bC',
+ b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
+ b'\x83\x1b\x04test\x03\x03foo\x03bar\x04quux:\x05e\x80',
+ ]
+ stream = subunit.StreamResultToBytes(self.original)
+ stream.status(
+ test_id='test', test_status='inprogress', test_tags=set(['foo']))
+ stream.status(
+ test_id='test', test_status='success', test_tags=set(['foo', 'bar']))
+ self.original.seek(0)
+ self.assertEqual(
+ 0, subunit.tag_stream(self.original, self.filtered, ["quux"]))
+ self.assertThat(reference, Contains(self.filtered.getvalue()))
+
+ def test_remove_tag(self):
+ reference = BytesIO()
+ stream = subunit.StreamResultToBytes(reference)
+ stream.status(
+ test_id='test', test_status='inprogress', test_tags=set(['foo']))
+ stream.status(
+ test_id='test', test_status='success', test_tags=set(['foo']))
+ stream = subunit.StreamResultToBytes(self.original)
+ stream.status(
+ test_id='test', test_status='inprogress', test_tags=set(['foo']))
+ stream.status(
+ test_id='test', test_status='success', test_tags=set(['foo', 'bar']))
+ self.original.seek(0)
+ self.assertEqual(
+ 0, subunit.tag_stream(self.original, self.filtered, ["-bar"]))
+ self.assertEqual(reference.getvalue(), self.filtered.getvalue())
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_tap2subunit.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_tap2subunit.py
new file mode 100644
index 00000000000..5b7c07a2eb3
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_tap2subunit.py
@@ -0,0 +1,387 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for TAP2SubUnit."""
+
+from io import BytesIO, StringIO
+import unittest
+
+from testtools import TestCase
+from testtools.compat import _u
+from testtools.testresult.doubles import StreamResult
+
+import subunit
+
+UTF8_TEXT = 'text/plain; charset=UTF8'
+
+
+class TestTAP2SubUnit(TestCase):
+ """Tests for TAP2SubUnit.
+
+ These tests test TAP string data in, and subunit string data out.
+ This is ok because the subunit protocol is intended to be stable,
+ but it might be easier/pithier to write tests against TAP string in,
+ parsed subunit objects out (by hooking the subunit stream to a subunit
+ protocol server.
+ """
+
+ def setUp(self):
+ super(TestTAP2SubUnit, self).setUp()
+ self.tap = StringIO()
+ self.subunit = BytesIO()
+
+ def test_skip_entire_file(self):
+ # A file
+ # 1..- # Skipped: comment
+ # results in a single skipped test.
+ self.tap.write(_u("1..0 # Skipped: entire file skipped\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'file skip', 'skip', None, True,
+ 'tap comment', b'Skipped: entire file skipped', True, None, None,
+ None)])
+
+ def test_ok_test_pass(self):
+ # A file
+ # ok
+ # results in a passed test with name 'test 1' (a synthetic name as tap
+ # does not require named fixtures - it is the first test in the tap
+ # stream).
+ self.tap.write(_u("ok\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'success', None, False, None,
+ None, True, None, None, None)])
+
+ def test_ok_test_number_pass(self):
+ # A file
+ # ok 1
+ # results in a passed test with name 'test 1'
+ self.tap.write(_u("ok 1\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'success', None, False, None,
+ None, True, None, None, None)])
+
+ def test_ok_test_number_description_pass(self):
+ # A file
+ # ok 1 - There is a description
+ # results in a passed test with name 'test 1 - There is a description'
+ self.tap.write(_u("ok 1 - There is a description\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1 - There is a description',
+ 'success', None, False, None, None, True, None, None, None)])
+
+ def test_ok_test_description_pass(self):
+ # A file
+ # ok There is a description
+ # results in a passed test with name 'test 1 There is a description'
+ self.tap.write(_u("ok There is a description\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1 There is a description',
+ 'success', None, False, None, None, True, None, None, None)])
+
+ def test_ok_SKIP_skip(self):
+ # A file
+ # ok # SKIP
+ # results in a skkip test with name 'test 1'
+ self.tap.write(_u("ok # SKIP\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'skip', None, False, None,
+ None, True, None, None, None)])
+
+ def test_ok_skip_number_comment_lowercase(self):
+ self.tap.write(_u("ok 1 # skip no samba environment available, skipping compilation\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'skip', None, False, 'tap comment',
+ b'no samba environment available, skipping compilation', True,
+ 'text/plain; charset=UTF8', None, None)])
+
+ def test_ok_number_description_SKIP_skip_comment(self):
+ # A file
+ # ok 1 foo # SKIP Not done yet
+ # results in a skip test with name 'test 1 foo' and a log of
+ # Not done yet
+ self.tap.write(_u("ok 1 foo # SKIP Not done yet\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1 foo', 'skip', None, False,
+ 'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
+ None, None)])
+
+ def test_ok_SKIP_skip_comment(self):
+ # A file
+ # ok # SKIP Not done yet
+ # results in a skip test with name 'test 1' and a log of Not done yet
+ self.tap.write(_u("ok # SKIP Not done yet\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'skip', None, False,
+ 'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
+ None, None)])
+
+ def test_ok_TODO_xfail(self):
+ # A file
+ # ok # TODO
+ # results in a xfail test with name 'test 1'
+ self.tap.write(_u("ok # TODO\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'xfail', None, False, None,
+ None, True, None, None, None)])
+
+ def test_ok_TODO_xfail_comment(self):
+ # A file
+ # ok # TODO Not done yet
+ # results in a xfail test with name 'test 1' and a log of Not done yet
+ self.tap.write(_u("ok # TODO Not done yet\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'xfail', None, False,
+ 'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
+ None, None)])
+
+ def test_bail_out_errors(self):
+ # A file with line in it
+ # Bail out! COMMENT
+ # is treated as an error
+ self.tap.write(_u("ok 1 foo\n"))
+ self.tap.write(_u("Bail out! Lifejacket engaged\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 foo', 'success', None, False, None, None, True,
+ None, None, None),
+ ('status', 'Bail out! Lifejacket engaged', 'fail', None, False,
+ None, None, True, None, None, None)])
+
+ def test_missing_test_at_end_with_plan_adds_error(self):
+ # A file
+ # 1..3
+ # ok first test
+ # not ok third test
+ # results in three tests, with the third being created
+ self.tap.write(_u('1..3\n'))
+ self.tap.write(_u('ok first test\n'))
+ self.tap.write(_u('not ok second test\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 first test', 'success', None, False, None,
+ None, True, None, None, None),
+ ('status', 'test 2 second test', 'fail', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 3', 'fail', None, False, 'tap meta',
+ b'test missing from TAP output', True, 'text/plain; charset=UTF8',
+ None, None)])
+
+ def test_missing_test_with_plan_adds_error(self):
+ # A file
+ # 1..3
+ # ok first test
+ # not ok 3 third test
+ # results in three tests, with the second being created
+ self.tap.write(_u('1..3\n'))
+ self.tap.write(_u('ok first test\n'))
+ self.tap.write(_u('not ok 3 third test\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 first test', 'success', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 2', 'fail', None, False, 'tap meta',
+ b'test missing from TAP output', True, 'text/plain; charset=UTF8',
+ None, None),
+ ('status', 'test 3 third test', 'fail', None, False, None, None,
+ True, None, None, None)])
+
+ def test_missing_test_no_plan_adds_error(self):
+ # A file
+ # ok first test
+ # not ok 3 third test
+ # results in three tests, with the second being created
+ self.tap.write(_u('ok first test\n'))
+ self.tap.write(_u('not ok 3 third test\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 first test', 'success', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 2', 'fail', None, False, 'tap meta',
+ b'test missing from TAP output', True, 'text/plain; charset=UTF8',
+ None, None),
+ ('status', 'test 3 third test', 'fail', None, False, None, None,
+ True, None, None, None)])
+
+ def test_four_tests_in_a_row_trailing_plan(self):
+ # A file
+ # ok 1 - first test in a script with no plan at all
+ # not ok 2 - second
+ # ok 3 - third
+ # not ok 4 - fourth
+ # 1..4
+ # results in four tests numbered and named
+ self.tap.write(_u('ok 1 - first test in a script with trailing plan\n'))
+ self.tap.write(_u('not ok 2 - second\n'))
+ self.tap.write(_u('ok 3 - third\n'))
+ self.tap.write(_u('not ok 4 - fourth\n'))
+ self.tap.write(_u('1..4\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 - first test in a script with trailing plan',
+ 'success', None, False, None, None, True, None, None, None),
+ ('status', 'test 2 - second', 'fail', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 3 - third', 'success', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 4 - fourth', 'fail', None, False, None, None,
+ True, None, None, None)])
+
+ def test_four_tests_in_a_row_with_plan(self):
+ # A file
+ # 1..4
+ # ok 1 - first test in a script with no plan at all
+ # not ok 2 - second
+ # ok 3 - third
+ # not ok 4 - fourth
+ # results in four tests numbered and named
+ self.tap.write(_u('1..4\n'))
+ self.tap.write(_u('ok 1 - first test in a script with a plan\n'))
+ self.tap.write(_u('not ok 2 - second\n'))
+ self.tap.write(_u('ok 3 - third\n'))
+ self.tap.write(_u('not ok 4 - fourth\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 - first test in a script with a plan',
+ 'success', None, False, None, None, True, None, None, None),
+ ('status', 'test 2 - second', 'fail', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 3 - third', 'success', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 4 - fourth', 'fail', None, False, None, None,
+ True, None, None, None)])
+
+ def test_four_tests_in_a_row_no_plan(self):
+ # A file
+ # ok 1 - first test in a script with no plan at all
+ # not ok 2 - second
+ # ok 3 - third
+ # not ok 4 - fourth
+ # results in four tests numbered and named
+ self.tap.write(_u('ok 1 - first test in a script with no plan at all\n'))
+ self.tap.write(_u('not ok 2 - second\n'))
+ self.tap.write(_u('ok 3 - third\n'))
+ self.tap.write(_u('not ok 4 - fourth\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 - first test in a script with no plan at all',
+ 'success', None, False, None, None, True, None, None, None),
+ ('status', 'test 2 - second', 'fail', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 3 - third', 'success', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 4 - fourth', 'fail', None, False, None, None,
+ True, None, None, None)])
+
+ def test_todo_and_skip(self):
+ # A file
+ # not ok 1 - a fail but # TODO but is TODO
+ # not ok 2 - another fail # SKIP instead
+ # results in two tests, numbered and commented.
+ self.tap.write(_u("not ok 1 - a fail but # TODO but is TODO\n"))
+ self.tap.write(_u("not ok 2 - another fail # SKIP instead\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.subunit.seek(0)
+ events = StreamResult()
+ subunit.ByteStreamToStreamResult(self.subunit).run(events)
+ self.check_events([
+ ('status', 'test 1 - a fail but', 'xfail', None, False,
+ 'tap comment', b'but is TODO', True, 'text/plain; charset=UTF8',
+ None, None),
+ ('status', 'test 2 - another fail', 'skip', None, False,
+ 'tap comment', b'instead', True, 'text/plain; charset=UTF8',
+ None, None)])
+
+ def test_leading_comments_add_to_next_test_log(self):
+ # A file
+ # # comment
+ # ok
+ # ok
+ # results in a single test with the comment included
+ # in the first test and not the second.
+ self.tap.write(_u("# comment\n"))
+ self.tap.write(_u("ok\n"))
+ self.tap.write(_u("ok\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1', 'success', None, False, 'tap comment',
+ b'# comment', True, 'text/plain; charset=UTF8', None, None),
+ ('status', 'test 2', 'success', None, False, None, None, True,
+ None, None, None)])
+
+ def test_trailing_comments_are_included_in_last_test_log(self):
+ # A file
+ # ok foo
+ # ok foo
+ # # comment
+ # results in a two tests, with the second having the comment
+ # attached to its log.
+ self.tap.write(_u("ok\n"))
+ self.tap.write(_u("ok\n"))
+ self.tap.write(_u("# comment\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1', 'success', None, False, None, None, True,
+ None, None, None),
+ ('status', 'test 2', 'success', None, False, 'tap comment',
+ b'# comment', True, 'text/plain; charset=UTF8', None, None)])
+
+ def check_events(self, events):
+ self.subunit.seek(0)
+ eventstream = StreamResult()
+ subunit.ByteStreamToStreamResult(self.subunit).run(eventstream)
+ self.assertEqual(events, eventstream._events)
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol.py
new file mode 100644
index 00000000000..c6008f42eb2
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol.py
@@ -0,0 +1,1362 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import datetime
+import unittest
+import os
+
+from testtools import PlaceHolder, skipIf, TestCase, TestResult
+from testtools.compat import _b, _u, BytesIO
+from testtools.content import Content, TracebackContent, text_content
+from testtools.content_type import ContentType
+try:
+ from testtools.testresult.doubles import (
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ )
+except ImportError:
+ from testtools.tests.helpers import (
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ )
+from testtools.matchers import Contains
+
+import subunit
+from subunit.tests import (
+ _remote_exception_repr,
+ _remote_exception_str,
+ _remote_exception_str_chunked,
+ )
+import subunit.iso8601 as iso8601
+
+
+def details_to_str(details):
+ return TestResult()._err_details_to_string(None, details=details)
+
+
+class TestTestImports(unittest.TestCase):
+
+ def test_imports(self):
+ from subunit import DiscardStream
+ from subunit import TestProtocolServer
+ from subunit import RemotedTestCase
+ from subunit import RemoteError
+ from subunit import ExecTestCase
+ from subunit import IsolatedTestCase
+ from subunit import TestProtocolClient
+ from subunit import ProtocolTestCase
+
+
+class TestDiscardStream(unittest.TestCase):
+
+ def test_write(self):
+ subunit.DiscardStream().write("content")
+
+
+class TestProtocolServerForward(unittest.TestCase):
+
+ def test_story(self):
+ client = unittest.TestResult()
+ out = BytesIO()
+ protocol = subunit.TestProtocolServer(client, forward_stream=out)
+ pipe = BytesIO(_b("test old mcdonald\n"
+ "success old mcdonald\n"))
+ protocol.readFrom(pipe)
+ self.assertEqual(client.testsRun, 1)
+ self.assertEqual(pipe.getvalue(), out.getvalue())
+
+ def test_not_command(self):
+ client = unittest.TestResult()
+ out = BytesIO()
+ protocol = subunit.TestProtocolServer(client,
+ stream=subunit.DiscardStream(), forward_stream=out)
+ pipe = BytesIO(_b("success old mcdonald\n"))
+ protocol.readFrom(pipe)
+ self.assertEqual(client.testsRun, 0)
+ self.assertEqual(_b(""), out.getvalue())
+
+
+class TestTestProtocolServerPipe(unittest.TestCase):
+
+ def test_story(self):
+ client = unittest.TestResult()
+ protocol = subunit.TestProtocolServer(client)
+ traceback = "foo.c:53:ERROR invalid state\n"
+ pipe = BytesIO(_b("test old mcdonald\n"
+ "success old mcdonald\n"
+ "test bing crosby\n"
+ "failure bing crosby [\n"
+ + traceback +
+ "]\n"
+ "test an error\n"
+ "error an error\n"))
+ protocol.readFrom(pipe)
+ bing = subunit.RemotedTestCase("bing crosby")
+ an_error = subunit.RemotedTestCase("an error")
+ self.assertEqual(client.errors,
+ [(an_error, _remote_exception_repr + '\n')])
+ self.assertEqual(
+ client.failures,
+ [(bing, _remote_exception_repr + ": "
+ + details_to_str({'traceback': text_content(traceback)}) + "\n")])
+ self.assertEqual(client.testsRun, 3)
+
+ def test_non_test_characters_forwarded_immediately(self):
+ pass
+
+
+class TestTestProtocolServerStartTest(unittest.TestCase):
+
+ def setUp(self):
+ self.client = Python26TestResult()
+ self.stream = BytesIO()
+ self.protocol = subunit.TestProtocolServer(self.client, self.stream)
+
+ def test_start_test(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+ def test_start_testing(self):
+ self.protocol.lineReceived(_b("testing old mcdonald\n"))
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+ def test_start_test_colon(self):
+ self.protocol.lineReceived(_b("test: old mcdonald\n"))
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+ def test_indented_test_colon_ignored(self):
+ ignored_line = _b(" test: old mcdonald\n")
+ self.protocol.lineReceived(ignored_line)
+ self.assertEqual([], self.client._events)
+ self.assertEqual(self.stream.getvalue(), ignored_line)
+
+ def test_start_testing_colon(self):
+ self.protocol.lineReceived(_b("testing: old mcdonald\n"))
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+
+class TestTestProtocolServerPassThrough(unittest.TestCase):
+
+ def setUp(self):
+ self.stdout = BytesIO()
+ self.test = subunit.RemotedTestCase("old mcdonald")
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client, self.stdout)
+
+ def keywords_before_test(self):
+ self.protocol.lineReceived(_b("failure a\n"))
+ self.protocol.lineReceived(_b("failure: a\n"))
+ self.protocol.lineReceived(_b("error a\n"))
+ self.protocol.lineReceived(_b("error: a\n"))
+ self.protocol.lineReceived(_b("success a\n"))
+ self.protocol.lineReceived(_b("success: a\n"))
+ self.protocol.lineReceived(_b("successful a\n"))
+ self.protocol.lineReceived(_b("successful: a\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.assertEqual(self.stdout.getvalue(), _b("failure a\n"
+ "failure: a\n"
+ "error a\n"
+ "error: a\n"
+ "success a\n"
+ "success: a\n"
+ "successful a\n"
+ "successful: a\n"
+ "]\n"))
+
+ def test_keywords_before_test(self):
+ self.keywords_before_test()
+ self.assertEqual(self.client._events, [])
+
+ def test_keywords_after_error(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("error old mcdonald\n"))
+ self.keywords_before_test()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, {}),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_keywords_after_failure(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("failure old mcdonald\n"))
+ self.keywords_before_test()
+ self.assertEqual(self.client._events, [
+ ('startTest', self.test),
+ ('addFailure', self.test, {}),
+ ('stopTest', self.test),
+ ])
+
+ def test_keywords_after_success(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("success old mcdonald\n"))
+ self.keywords_before_test()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_keywords_after_test(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("failure a\n"))
+ self.protocol.lineReceived(_b("failure: a\n"))
+ self.protocol.lineReceived(_b("error a\n"))
+ self.protocol.lineReceived(_b("error: a\n"))
+ self.protocol.lineReceived(_b("success a\n"))
+ self.protocol.lineReceived(_b("success: a\n"))
+ self.protocol.lineReceived(_b("successful a\n"))
+ self.protocol.lineReceived(_b("successful: a\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.protocol.lineReceived(_b("failure old mcdonald\n"))
+ self.assertEqual(self.stdout.getvalue(), _b("test old mcdonald\n"
+ "failure a\n"
+ "failure: a\n"
+ "error a\n"
+ "error: a\n"
+ "success a\n"
+ "success: a\n"
+ "successful a\n"
+ "successful: a\n"
+ "]\n"))
+ self.assertEqual(self.client._events, [
+ ('startTest', self.test),
+ ('addFailure', self.test, {}),
+ ('stopTest', self.test),
+ ])
+
+ def test_keywords_during_failure(self):
+ # A smoke test to make sure that the details parsers have control
+ # appropriately.
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("failure: old mcdonald [\n"))
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("failure a\n"))
+ self.protocol.lineReceived(_b("failure: a\n"))
+ self.protocol.lineReceived(_b("error a\n"))
+ self.protocol.lineReceived(_b("error: a\n"))
+ self.protocol.lineReceived(_b("success a\n"))
+ self.protocol.lineReceived(_b("success: a\n"))
+ self.protocol.lineReceived(_b("successful a\n"))
+ self.protocol.lineReceived(_b("successful: a\n"))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.assertEqual(self.stdout.getvalue(), _b(""))
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}),
+ lambda:[_b(
+ "test old mcdonald\n"
+ "failure a\n"
+ "failure: a\n"
+ "error a\n"
+ "error: a\n"
+ "success a\n"
+ "success: a\n"
+ "successful a\n"
+ "successful: a\n"
+ "]\n")])
+ self.assertEqual(self.client._events, [
+ ('startTest', self.test),
+ ('addFailure', self.test, details),
+ ('stopTest', self.test),
+ ])
+
+ def test_stdout_passthrough(self):
+ """Lines received which cannot be interpreted as any protocol action
+ should be passed through to sys.stdout.
+ """
+ bytes = _b("randombytes\n")
+ self.protocol.lineReceived(bytes)
+ self.assertEqual(self.stdout.getvalue(), bytes)
+
+
+class TestTestProtocolServerLostConnection(unittest.TestCase):
+
+ def setUp(self):
+ self.client = Python26TestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.test = subunit.RemotedTestCase("old mcdonald")
+
+ def test_lost_connection_no_input(self):
+ self.protocol.lostConnection()
+ self.assertEqual([], self.client._events)
+
+ def test_lost_connection_after_start(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lostConnection()
+ failure = subunit.RemoteError(
+ _u("lost connection during test 'old mcdonald'"))
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, failure),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_lost_connected_after_error(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("error old mcdonald\n"))
+ self.protocol.lostConnection()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, subunit.RemoteError(_u(""))),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def do_connection_lost(self, outcome, opening):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("%s old mcdonald %s" % (outcome, opening)))
+ self.protocol.lostConnection()
+ failure = subunit.RemoteError(
+ _u("lost connection during %s report of test 'old mcdonald'") %
+ outcome)
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, failure),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_lost_connection_during_error(self):
+ self.do_connection_lost("error", "[\n")
+
+ def test_lost_connection_during_error_details(self):
+ self.do_connection_lost("error", "[ multipart\n")
+
+ def test_lost_connected_after_failure(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("failure old mcdonald\n"))
+ self.protocol.lostConnection()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addFailure', self.test, subunit.RemoteError(_u(""))),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_lost_connection_during_failure(self):
+ self.do_connection_lost("failure", "[\n")
+
+ def test_lost_connection_during_failure_details(self):
+ self.do_connection_lost("failure", "[ multipart\n")
+
+ def test_lost_connection_after_success(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("success old mcdonald\n"))
+ self.protocol.lostConnection()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_lost_connection_during_success(self):
+ self.do_connection_lost("success", "[\n")
+
+ def test_lost_connection_during_success_details(self):
+ self.do_connection_lost("success", "[ multipart\n")
+
+ def test_lost_connection_during_skip(self):
+ self.do_connection_lost("skip", "[\n")
+
+ def test_lost_connection_during_skip_details(self):
+ self.do_connection_lost("skip", "[ multipart\n")
+
+ def test_lost_connection_during_xfail(self):
+ self.do_connection_lost("xfail", "[\n")
+
+ def test_lost_connection_during_xfail_details(self):
+ self.do_connection_lost("xfail", "[ multipart\n")
+
+ def test_lost_connection_during_uxsuccess(self):
+ self.do_connection_lost("uxsuccess", "[\n")
+
+ def test_lost_connection_during_uxsuccess_details(self):
+ self.do_connection_lost("uxsuccess", "[ multipart\n")
+
+
+class TestInTestMultipart(unittest.TestCase):
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = subunit.RemotedTestCase(_u("mcdonalds farm"))
+
+ def test__outcome_sets_details_parser(self):
+ self.protocol._reading_success_details.details_parser = None
+ self.protocol._state._outcome(0, _b("mcdonalds farm [ multipart\n"),
+ None, self.protocol._reading_success_details)
+ parser = self.protocol._reading_success_details.details_parser
+ self.assertNotEqual(None, parser)
+ self.assertTrue(isinstance(parser,
+ subunit.details.MultipartDetailsParser))
+
+
+class TestTestProtocolServerAddError(unittest.TestCase):
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = subunit.RemotedTestCase("mcdonalds farm")
+
+ def simple_error_keyword(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ details = {}
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_simple_error(self):
+ self.simple_error_keyword("error")
+
+ def test_simple_error_colon(self):
+ self.simple_error_keyword("error:")
+
+ def test_error_empty_message(self):
+ self.protocol.lineReceived(_b("error mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[_b("")])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def error_quoted_bracket(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[_b("]\n")])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_error_quoted_bracket(self):
+ self.error_quoted_bracket("error")
+
+ def test_error_colon_quoted_bracket(self):
+ self.error_quoted_bracket("error:")
+
+
+class TestTestProtocolServerAddFailure(unittest.TestCase):
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = subunit.RemotedTestCase("mcdonalds farm")
+
+ def assertFailure(self, details):
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addFailure', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def simple_failure_keyword(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ details = {}
+ self.assertFailure(details)
+
+ def test_simple_failure(self):
+ self.simple_failure_keyword("failure")
+
+ def test_simple_failure_colon(self):
+ self.simple_failure_keyword("failure:")
+
+ def test_failure_empty_message(self):
+ self.protocol.lineReceived(_b("failure mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[_b("")])
+ self.assertFailure(details)
+
+ def failure_quoted_bracket(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[_b("]\n")])
+ self.assertFailure(details)
+
+ def test_failure_quoted_bracket(self):
+ self.failure_quoted_bracket("failure")
+
+ def test_failure_colon_quoted_bracket(self):
+ self.failure_quoted_bracket("failure:")
+
+
+class TestTestProtocolServerAddxFail(unittest.TestCase):
+ """Tests for the xfail keyword.
+
+ In Python this can thunk through to Success due to stdlib limitations (see
+ README).
+ """
+
+ def capture_expected_failure(self, test, err):
+ self._events.append((test, err))
+
+ def setup_python26(self):
+ """Setup a test object ready to be xfailed and thunk to success."""
+ self.client = Python26TestResult()
+ self.setup_protocol()
+
+ def setup_python27(self):
+ """Setup a test object ready to be xfailed."""
+ self.client = Python27TestResult()
+ self.setup_protocol()
+
+ def setup_python_ex(self):
+ """Setup a test object ready to be xfailed with details."""
+ self.client = ExtendedTestResult()
+ self.setup_protocol()
+
+ def setup_protocol(self):
+ """Setup the protocol based on self.client."""
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = self.client._events[-1][-1]
+
+ def simple_xfail_keyword(self, keyword, as_success):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ self.check_success_or_xfail(as_success)
+
+ def check_success_or_xfail(self, as_success, error_message=None):
+ if as_success:
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+ else:
+ details = {}
+ if error_message is not None:
+ details['traceback'] = Content(
+ ContentType("text", "x-traceback", {'charset': 'utf8'}),
+ lambda:[_b(error_message)])
+ if isinstance(self.client, ExtendedTestResult):
+ value = details
+ else:
+ if error_message is not None:
+ value = subunit.RemoteError(details_to_str(details))
+ else:
+ value = subunit.RemoteError()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addExpectedFailure', self.test, value),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_simple_xfail(self):
+ self.setup_python26()
+ self.simple_xfail_keyword("xfail", True)
+ self.setup_python27()
+ self.simple_xfail_keyword("xfail", False)
+ self.setup_python_ex()
+ self.simple_xfail_keyword("xfail", False)
+
+ def test_simple_xfail_colon(self):
+ self.setup_python26()
+ self.simple_xfail_keyword("xfail:", True)
+ self.setup_python27()
+ self.simple_xfail_keyword("xfail:", False)
+ self.setup_python_ex()
+ self.simple_xfail_keyword("xfail:", False)
+
+ def test_xfail_empty_message(self):
+ self.setup_python26()
+ self.empty_message(True)
+ self.setup_python27()
+ self.empty_message(False)
+ self.setup_python_ex()
+ self.empty_message(False, error_message="")
+
+ def empty_message(self, as_success, error_message="\n"):
+ self.protocol.lineReceived(_b("xfail mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.check_success_or_xfail(as_success, error_message)
+
+ def xfail_quoted_bracket(self, keyword, as_success):
+ # This tests it is accepted, but cannot test it is used today, because
+ # of not having a way to expose it in Python so far.
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.check_success_or_xfail(as_success, "]\n")
+
+ def test_xfail_quoted_bracket(self):
+ self.setup_python26()
+ self.xfail_quoted_bracket("xfail", True)
+ self.setup_python27()
+ self.xfail_quoted_bracket("xfail", False)
+ self.setup_python_ex()
+ self.xfail_quoted_bracket("xfail", False)
+
+ def test_xfail_colon_quoted_bracket(self):
+ self.setup_python26()
+ self.xfail_quoted_bracket("xfail:", True)
+ self.setup_python27()
+ self.xfail_quoted_bracket("xfail:", False)
+ self.setup_python_ex()
+ self.xfail_quoted_bracket("xfail:", False)
+
+
+class TestTestProtocolServerAddunexpectedSuccess(TestCase):
+ """Tests for the uxsuccess keyword."""
+
+ def capture_expected_failure(self, test, err):
+ self._events.append((test, err))
+
+ def setup_python26(self):
+ """Setup a test object ready to be xfailed and thunk to success."""
+ self.client = Python26TestResult()
+ self.setup_protocol()
+
+ def setup_python27(self):
+ """Setup a test object ready to be xfailed."""
+ self.client = Python27TestResult()
+ self.setup_protocol()
+
+ def setup_python_ex(self):
+ """Setup a test object ready to be xfailed with details."""
+ self.client = ExtendedTestResult()
+ self.setup_protocol()
+
+ def setup_protocol(self):
+ """Setup the protocol based on self.client."""
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = self.client._events[-1][-1]
+
+ def simple_uxsuccess_keyword(self, keyword, as_fail):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ self.check_fail_or_uxsuccess(as_fail)
+
+ def check_fail_or_uxsuccess(self, as_fail, error_message=None):
+ details = {}
+ if error_message is not None:
+ details['traceback'] = Content(
+ ContentType("text", "x-traceback", {'charset': 'utf8'}),
+ lambda:[_b(error_message)])
+ if isinstance(self.client, ExtendedTestResult):
+ value = details
+ else:
+ value = None
+ if as_fail:
+ self.client._events[1] = self.client._events[1][:2]
+ # The value is generated within the extended to original decorator:
+ # todo use the testtools matcher to check on this.
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addFailure', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+ elif value:
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addUnexpectedSuccess', self.test, value),
+ ('stopTest', self.test),
+ ], self.client._events)
+ else:
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addUnexpectedSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_simple_uxsuccess(self):
+ self.setup_python26()
+ self.simple_uxsuccess_keyword("uxsuccess", True)
+ self.setup_python27()
+ self.simple_uxsuccess_keyword("uxsuccess", False)
+ self.setup_python_ex()
+ self.simple_uxsuccess_keyword("uxsuccess", False)
+
+ def test_simple_uxsuccess_colon(self):
+ self.setup_python26()
+ self.simple_uxsuccess_keyword("uxsuccess:", True)
+ self.setup_python27()
+ self.simple_uxsuccess_keyword("uxsuccess:", False)
+ self.setup_python_ex()
+ self.simple_uxsuccess_keyword("uxsuccess:", False)
+
+ def test_uxsuccess_empty_message(self):
+ self.setup_python26()
+ self.empty_message(True)
+ self.setup_python27()
+ self.empty_message(False)
+ self.setup_python_ex()
+ self.empty_message(False, error_message="")
+
+ def empty_message(self, as_fail, error_message="\n"):
+ self.protocol.lineReceived(_b("uxsuccess mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.check_fail_or_uxsuccess(as_fail, error_message)
+
+ def uxsuccess_quoted_bracket(self, keyword, as_fail):
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.check_fail_or_uxsuccess(as_fail, "]\n")
+
+ def test_uxsuccess_quoted_bracket(self):
+ self.setup_python26()
+ self.uxsuccess_quoted_bracket("uxsuccess", True)
+ self.setup_python27()
+ self.uxsuccess_quoted_bracket("uxsuccess", False)
+ self.setup_python_ex()
+ self.uxsuccess_quoted_bracket("uxsuccess", False)
+
+ def test_uxsuccess_colon_quoted_bracket(self):
+ self.setup_python26()
+ self.uxsuccess_quoted_bracket("uxsuccess:", True)
+ self.setup_python27()
+ self.uxsuccess_quoted_bracket("uxsuccess:", False)
+ self.setup_python_ex()
+ self.uxsuccess_quoted_bracket("uxsuccess:", False)
+
+
+class TestTestProtocolServerAddSkip(unittest.TestCase):
+ """Tests for the skip keyword.
+
+ In Python this meets the testtools extended TestResult contract.
+ (See https://launchpad.net/testtools).
+ """
+
+ def setUp(self):
+ """Setup a test object ready to be skipped."""
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = self.client._events[-1][-1]
+
+ def assertSkip(self, reason):
+ details = {}
+ if reason is not None:
+ details['reason'] = Content(
+ ContentType("text", "plain"), lambda:[reason])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSkip', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def simple_skip_keyword(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ self.assertSkip(None)
+
+ def test_simple_skip(self):
+ self.simple_skip_keyword("skip")
+
+ def test_simple_skip_colon(self):
+ self.simple_skip_keyword("skip:")
+
+ def test_skip_empty_message(self):
+ self.protocol.lineReceived(_b("skip mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.assertSkip(_b(""))
+
+ def skip_quoted_bracket(self, keyword):
+ # This tests it is accepted, but cannot test it is used today, because
+ # of not having a way to expose it in Python so far.
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.assertSkip(_b("]\n"))
+
+ def test_skip_quoted_bracket(self):
+ self.skip_quoted_bracket("skip")
+
+ def test_skip_colon_quoted_bracket(self):
+ self.skip_quoted_bracket("skip:")
+
+
+class TestTestProtocolServerAddSuccess(unittest.TestCase):
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = subunit.RemotedTestCase("mcdonalds farm")
+
+ def simple_success_keyword(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_simple_success(self):
+ self.simple_success_keyword("successful")
+
+ def test_simple_success_colon(self):
+ self.simple_success_keyword("successful:")
+
+ def assertSuccess(self, details):
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_success_empty_message(self):
+ self.protocol.lineReceived(_b("success mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['message'] = Content(ContentType("text", "plain"),
+ lambda:[_b("")])
+ self.assertSuccess(details)
+
+ def success_quoted_bracket(self, keyword):
+ # This tests it is accepted, but cannot test it is used today, because
+ # of not having a way to expose it in Python so far.
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['message'] = Content(ContentType("text", "plain"),
+ lambda:[_b("]\n")])
+ self.assertSuccess(details)
+
+ def test_success_quoted_bracket(self):
+ self.success_quoted_bracket("success")
+
+ def test_success_colon_quoted_bracket(self):
+ self.success_quoted_bracket("success:")
+
+
+class TestTestProtocolServerProgress(unittest.TestCase):
+ """Test receipt of progress: directives."""
+
+ def test_progress_accepted_stdlib(self):
+ self.result = Python26TestResult()
+ self.stream = BytesIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived(_b("progress: 23"))
+ self.protocol.lineReceived(_b("progress: -2"))
+ self.protocol.lineReceived(_b("progress: +4"))
+ self.assertEqual(_b(""), self.stream.getvalue())
+
+ def test_progress_accepted_extended(self):
+ # With a progress capable TestResult, progress events are emitted.
+ self.result = ExtendedTestResult()
+ self.stream = BytesIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived(_b("progress: 23"))
+ self.protocol.lineReceived(_b("progress: push"))
+ self.protocol.lineReceived(_b("progress: -2"))
+ self.protocol.lineReceived(_b("progress: pop"))
+ self.protocol.lineReceived(_b("progress: +4"))
+ self.assertEqual(_b(""), self.stream.getvalue())
+ self.assertEqual([
+ ('progress', 23, subunit.PROGRESS_SET),
+ ('progress', None, subunit.PROGRESS_PUSH),
+ ('progress', -2, subunit.PROGRESS_CUR),
+ ('progress', None, subunit.PROGRESS_POP),
+ ('progress', 4, subunit.PROGRESS_CUR),
+ ], self.result._events)
+
+
+class TestTestProtocolServerStreamTags(unittest.TestCase):
+ """Test managing tags on the protocol level."""
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+
+ def test_initial_tags(self):
+ self.protocol.lineReceived(_b("tags: foo bar:baz quux\n"))
+ self.assertEqual([
+ ('tags', set(["foo", "bar:baz", "quux"]), set()),
+ ], self.client._events)
+
+ def test_minus_removes_tags(self):
+ self.protocol.lineReceived(_b("tags: -bar quux\n"))
+ self.assertEqual([
+ ('tags', set(["quux"]), set(["bar"])),
+ ], self.client._events)
+
+ def test_tags_do_not_get_set_on_test(self):
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ test = self.client._events[0][-1]
+ self.assertEqual(None, getattr(test, 'tags', None))
+
+ def test_tags_do_not_get_set_on_global_tags(self):
+ self.protocol.lineReceived(_b("tags: foo bar\n"))
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ test = self.client._events[-1][-1]
+ self.assertEqual(None, getattr(test, 'tags', None))
+
+ def test_tags_get_set_on_test_tags(self):
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ test = self.client._events[-1][-1]
+ self.protocol.lineReceived(_b("tags: foo bar\n"))
+ self.protocol.lineReceived(_b("success mcdonalds farm\n"))
+ self.assertEqual(None, getattr(test, 'tags', None))
+
+
+class TestTestProtocolServerStreamTime(unittest.TestCase):
+ """Test managing time information at the protocol level."""
+
+ def test_time_accepted_stdlib(self):
+ self.result = Python26TestResult()
+ self.stream = BytesIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived(_b("time: 2001-12-12 12:59:59Z\n"))
+ self.assertEqual(_b(""), self.stream.getvalue())
+
+ def test_time_accepted_extended(self):
+ self.result = ExtendedTestResult()
+ self.stream = BytesIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived(_b("time: 2001-12-12 12:59:59Z\n"))
+ self.assertEqual(_b(""), self.stream.getvalue())
+ self.assertEqual([
+ ('time', datetime.datetime(2001, 12, 12, 12, 59, 59, 0,
+ iso8601.Utc()))
+ ], self.result._events)
+
+
+class TestRemotedTestCase(unittest.TestCase):
+
+ def test_simple(self):
+ test = subunit.RemotedTestCase("A test description")
+ self.assertRaises(NotImplementedError, test.setUp)
+ self.assertRaises(NotImplementedError, test.tearDown)
+ self.assertEqual("A test description",
+ test.shortDescription())
+ self.assertEqual("A test description",
+ test.id())
+ self.assertEqual("A test description (subunit.RemotedTestCase)", "%s" % test)
+ self.assertEqual("<subunit.RemotedTestCase description="
+ "'A test description'>", "%r" % test)
+ result = unittest.TestResult()
+ test.run(result)
+ self.assertEqual([(test, _remote_exception_repr + ": "
+ "Cannot run RemotedTestCases.\n\n")],
+ result.errors)
+ self.assertEqual(1, result.testsRun)
+ another_test = subunit.RemotedTestCase("A test description")
+ self.assertEqual(test, another_test)
+ different_test = subunit.RemotedTestCase("ofo")
+ self.assertNotEqual(test, different_test)
+ self.assertNotEqual(another_test, different_test)
+
+
+class TestRemoteError(unittest.TestCase):
+
+ def test_eq(self):
+ error = subunit.RemoteError(_u("Something went wrong"))
+ another_error = subunit.RemoteError(_u("Something went wrong"))
+ different_error = subunit.RemoteError(_u("boo!"))
+ self.assertEqual(error, another_error)
+ self.assertNotEqual(error, different_error)
+ self.assertNotEqual(different_error, another_error)
+
+ def test_empty_constructor(self):
+ self.assertEqual(subunit.RemoteError(), subunit.RemoteError(_u("")))
+
+
+class TestExecTestCase(unittest.TestCase):
+
+ class SampleExecTestCase(subunit.ExecTestCase):
+
+ def test_sample_method(self):
+ """sample-script.py"""
+ # the sample script runs three tests, one each
+ # that fails, errors and succeeds
+
+ def test_sample_method_args(self):
+ """sample-script.py foo"""
+ # sample that will run just one test.
+
+ def test_construct(self):
+ test = self.SampleExecTestCase("test_sample_method")
+ self.assertEqual(test.script,
+ subunit.join_dir(__file__, 'sample-script.py'))
+
+ def test_args(self):
+ result = unittest.TestResult()
+ test = self.SampleExecTestCase("test_sample_method_args")
+ test.run(result)
+ self.assertEqual(1, result.testsRun)
+
+ def test_run(self):
+ result = ExtendedTestResult()
+ test = self.SampleExecTestCase("test_sample_method")
+ test.run(result)
+ mcdonald = subunit.RemotedTestCase("old mcdonald")
+ bing = subunit.RemotedTestCase("bing crosby")
+ bing_details = {}
+ bing_details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[_b("foo.c:53:ERROR invalid state\n")])
+ an_error = subunit.RemotedTestCase("an error")
+ error_details = {}
+ self.assertEqual([
+ ('startTest', mcdonald),
+ ('addSuccess', mcdonald),
+ ('stopTest', mcdonald),
+ ('startTest', bing),
+ ('addFailure', bing, bing_details),
+ ('stopTest', bing),
+ ('startTest', an_error),
+ ('addError', an_error, error_details),
+ ('stopTest', an_error),
+ ], result._events)
+
+ def test_debug(self):
+ test = self.SampleExecTestCase("test_sample_method")
+ test.debug()
+
+ def test_count_test_cases(self):
+ """TODO run the child process and count responses to determine the count."""
+
+ def test_join_dir(self):
+ sibling = subunit.join_dir(__file__, 'foo')
+ filedir = os.path.abspath(os.path.dirname(__file__))
+ expected = os.path.join(filedir, 'foo')
+ self.assertEqual(sibling, expected)
+
+
+class DoExecTestCase(subunit.ExecTestCase):
+
+ def test_working_script(self):
+ """sample-two-script.py"""
+
+
+class TestIsolatedTestCase(TestCase):
+
+ class SampleIsolatedTestCase(subunit.IsolatedTestCase):
+
+ SETUP = False
+ TEARDOWN = False
+ TEST = False
+
+ def setUp(self):
+ TestIsolatedTestCase.SampleIsolatedTestCase.SETUP = True
+
+ def tearDown(self):
+ TestIsolatedTestCase.SampleIsolatedTestCase.TEARDOWN = True
+
+ def test_sets_global_state(self):
+ TestIsolatedTestCase.SampleIsolatedTestCase.TEST = True
+
+
+ def test_construct(self):
+ self.SampleIsolatedTestCase("test_sets_global_state")
+
+ @skipIf(os.name != "posix", "Need a posix system for forking tests")
+ def test_run(self):
+ result = unittest.TestResult()
+ test = self.SampleIsolatedTestCase("test_sets_global_state")
+ test.run(result)
+ self.assertEqual(result.testsRun, 1)
+ self.assertEqual(self.SampleIsolatedTestCase.SETUP, False)
+ self.assertEqual(self.SampleIsolatedTestCase.TEARDOWN, False)
+ self.assertEqual(self.SampleIsolatedTestCase.TEST, False)
+
+ def test_debug(self):
+ pass
+ #test = self.SampleExecTestCase("test_sample_method")
+ #test.debug()
+
+
+class TestIsolatedTestSuite(TestCase):
+
+ class SampleTestToIsolate(unittest.TestCase):
+
+ SETUP = False
+ TEARDOWN = False
+ TEST = False
+
+ def setUp(self):
+ TestIsolatedTestSuite.SampleTestToIsolate.SETUP = True
+
+ def tearDown(self):
+ TestIsolatedTestSuite.SampleTestToIsolate.TEARDOWN = True
+
+ def test_sets_global_state(self):
+ TestIsolatedTestSuite.SampleTestToIsolate.TEST = True
+
+
+ def test_construct(self):
+ subunit.IsolatedTestSuite()
+
+ @skipIf(os.name != "posix", "Need a posix system for forking tests")
+ def test_run(self):
+ result = unittest.TestResult()
+ suite = subunit.IsolatedTestSuite()
+ sub_suite = unittest.TestSuite()
+ sub_suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
+ sub_suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
+ suite.addTest(sub_suite)
+ suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
+ suite.run(result)
+ self.assertEqual(result.testsRun, 3)
+ self.assertEqual(self.SampleTestToIsolate.SETUP, False)
+ self.assertEqual(self.SampleTestToIsolate.TEARDOWN, False)
+ self.assertEqual(self.SampleTestToIsolate.TEST, False)
+
+
+class TestTestProtocolClient(TestCase):
+
+ def setUp(self):
+ super(TestTestProtocolClient, self).setUp()
+ self.io = BytesIO()
+ self.protocol = subunit.TestProtocolClient(self.io)
+ self.unicode_test = PlaceHolder(_u('\u2603'))
+ self.test = TestTestProtocolClient("test_start_test")
+ self.sample_details = {'something':Content(
+ ContentType('text', 'plain'), lambda:[_b('serialised\nform')])}
+ self.sample_tb_details = dict(self.sample_details)
+ self.sample_tb_details['traceback'] = TracebackContent(
+ subunit.RemoteError(_u("boo qux")), self.test)
+
+ def test_start_test(self):
+ """Test startTest on a TestProtocolClient."""
+ self.protocol.startTest(self.test)
+ self.assertEqual(self.io.getvalue(), _b("test: %s\n" % self.test.id()))
+
+ def test_start_test_unicode_id(self):
+ """Test startTest on a TestProtocolClient."""
+ self.protocol.startTest(self.unicode_test)
+ expected = _b("test: ") + _u('\u2603').encode('utf8') + _b("\n")
+ self.assertEqual(expected, self.io.getvalue())
+
+ def test_stop_test(self):
+ # stopTest doesn't output anything.
+ self.protocol.stopTest(self.test)
+ self.assertEqual(self.io.getvalue(), _b(""))
+
+ def test_add_success(self):
+ """Test addSuccess on a TestProtocolClient."""
+ self.protocol.addSuccess(self.test)
+ self.assertEqual(
+ self.io.getvalue(), _b("successful: %s\n" % self.test.id()))
+
+ def test_add_outcome_unicode_id(self):
+ """Test addSuccess on a TestProtocolClient."""
+ self.protocol.addSuccess(self.unicode_test)
+ expected = _b("successful: ") + _u('\u2603').encode('utf8') + _b("\n")
+ self.assertEqual(expected, self.io.getvalue())
+
+ def test_add_success_details(self):
+ """Test addSuccess on a TestProtocolClient with details."""
+ self.protocol.addSuccess(self.test, details=self.sample_details)
+ self.assertEqual(
+ self.io.getvalue(), _b("successful: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n]\n" % self.test.id()))
+
+ def test_add_failure(self):
+ """Test addFailure on a TestProtocolClient."""
+ self.protocol.addFailure(
+ self.test, subunit.RemoteError(_u("boo qux")))
+ self.assertEqual(
+ self.io.getvalue(),
+ _b(('failure: %s [\n' + _remote_exception_str + ': boo qux\n]\n')
+ % self.test.id()))
+
+ def test_add_failure_details(self):
+ """Test addFailure on a TestProtocolClient with details."""
+ self.protocol.addFailure(
+ self.test, details=self.sample_tb_details)
+ self.assertThat([
+ _b(("failure: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ _b(("failure: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;language=python,charset=utf8\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ ],
+ Contains(self.io.getvalue())),
+
+ def test_add_error(self):
+ """Test stopTest on a TestProtocolClient."""
+ self.protocol.addError(
+ self.test, subunit.RemoteError(_u("phwoar crikey")))
+ self.assertEqual(
+ self.io.getvalue(),
+ _b(('error: %s [\n' +
+ _remote_exception_str + ": phwoar crikey\n"
+ "]\n") % self.test.id()))
+
+ def test_add_error_details(self):
+ """Test stopTest on a TestProtocolClient with details."""
+ self.protocol.addError(
+ self.test, details=self.sample_tb_details)
+ self.assertThat([
+ _b(("error: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ _b(("error: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;language=python,charset=utf8\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ ],
+ Contains(self.io.getvalue())),
+
+ def test_add_expected_failure(self):
+ """Test addExpectedFailure on a TestProtocolClient."""
+ self.protocol.addExpectedFailure(
+ self.test, subunit.RemoteError(_u("phwoar crikey")))
+ self.assertEqual(
+ self.io.getvalue(),
+ _b(('xfail: %s [\n' +
+ _remote_exception_str + ": phwoar crikey\n"
+ "]\n") % self.test.id()))
+
+ def test_add_expected_failure_details(self):
+ """Test addExpectedFailure on a TestProtocolClient with details."""
+ self.protocol.addExpectedFailure(
+ self.test, details=self.sample_tb_details)
+ self.assertThat([
+ _b(("xfail: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ _b(("xfail: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;language=python,charset=utf8\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ ],
+ Contains(self.io.getvalue())),
+
+ def test_add_skip(self):
+ """Test addSkip on a TestProtocolClient."""
+ self.protocol.addSkip(
+ self.test, "Has it really?")
+ self.assertEqual(
+ self.io.getvalue(),
+ _b('skip: %s [\nHas it really?\n]\n' % self.test.id()))
+
+ def test_add_skip_details(self):
+ """Test addSkip on a TestProtocolClient with details."""
+ details = {'reason':Content(
+ ContentType('text', 'plain'), lambda:[_b('Has it really?')])}
+ self.protocol.addSkip(self.test, details=details)
+ self.assertEqual(
+ self.io.getvalue(),
+ _b("skip: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "reason\n"
+ "E\r\nHas it really?0\r\n"
+ "]\n" % self.test.id()))
+
+ def test_progress_set(self):
+ self.protocol.progress(23, subunit.PROGRESS_SET)
+ self.assertEqual(self.io.getvalue(), _b('progress: 23\n'))
+
+ def test_progress_neg_cur(self):
+ self.protocol.progress(-23, subunit.PROGRESS_CUR)
+ self.assertEqual(self.io.getvalue(), _b('progress: -23\n'))
+
+ def test_progress_pos_cur(self):
+ self.protocol.progress(23, subunit.PROGRESS_CUR)
+ self.assertEqual(self.io.getvalue(), _b('progress: +23\n'))
+
+ def test_progress_pop(self):
+ self.protocol.progress(1234, subunit.PROGRESS_POP)
+ self.assertEqual(self.io.getvalue(), _b('progress: pop\n'))
+
+ def test_progress_push(self):
+ self.protocol.progress(1234, subunit.PROGRESS_PUSH)
+ self.assertEqual(self.io.getvalue(), _b('progress: push\n'))
+
+ def test_time(self):
+ # Calling time() outputs a time signal immediately.
+ self.protocol.time(
+ datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc()))
+ self.assertEqual(
+ _b("time: 2009-10-11 12:13:14.000015Z\n"),
+ self.io.getvalue())
+
+ def test_add_unexpected_success(self):
+ """Test addUnexpectedSuccess on a TestProtocolClient."""
+ self.protocol.addUnexpectedSuccess(self.test)
+ self.assertEqual(
+ self.io.getvalue(), _b("uxsuccess: %s\n" % self.test.id()))
+
+ def test_add_unexpected_success_details(self):
+ """Test addUnexpectedSuccess on a TestProtocolClient with details."""
+ self.protocol.addUnexpectedSuccess(self.test, details=self.sample_details)
+ self.assertEqual(
+ self.io.getvalue(), _b("uxsuccess: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n]\n" % self.test.id()))
+
+ def test_tags_empty(self):
+ self.protocol.tags(set(), set())
+ self.assertEqual(_b(""), self.io.getvalue())
+
+ def test_tags_add(self):
+ self.protocol.tags(set(['foo']), set())
+ self.assertEqual(_b("tags: foo\n"), self.io.getvalue())
+
+ def test_tags_both(self):
+ self.protocol.tags(set(['quux']), set(['bar']))
+ self.assertThat(
+ [b"tags: quux -bar\n", b"tags: -bar quux\n"],
+ Contains(self.io.getvalue()))
+
+ def test_tags_gone(self):
+ self.protocol.tags(set(), set(['bar']))
+ self.assertEqual(_b("tags: -bar\n"), self.io.getvalue())
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol2.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol2.py
new file mode 100644
index 00000000000..c21392ceb9c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol2.py
@@ -0,0 +1,436 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2013 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+from io import BytesIO
+import datetime
+
+from testtools import TestCase
+from testtools.matchers import Contains, HasLength
+from testtools.tests.test_testresult import TestStreamResultContract
+from testtools.testresult.doubles import StreamResult
+
+import subunit
+import subunit.iso8601 as iso8601
+
+CONSTANT_ENUM = b'\xb3)\x01\x0c\x03foo\x08U_\x1b'
+CONSTANT_INPROGRESS = b'\xb3)\x02\x0c\x03foo\x8e\xc1-\xb5'
+CONSTANT_SUCCESS = b'\xb3)\x03\x0c\x03fooE\x9d\xfe\x10'
+CONSTANT_UXSUCCESS = b'\xb3)\x04\x0c\x03fooX\x98\xce\xa8'
+CONSTANT_SKIP = b'\xb3)\x05\x0c\x03foo\x93\xc4\x1d\r'
+CONSTANT_FAIL = b'\xb3)\x06\x0c\x03foo\x15Po\xa3'
+CONSTANT_XFAIL = b'\xb3)\x07\x0c\x03foo\xde\x0c\xbc\x06'
+CONSTANT_EOF = b'\xb3!\x10\x08S\x15\x88\xdc'
+CONSTANT_FILE_CONTENT = b'\xb3!@\x13\x06barney\x03wooA5\xe3\x8c'
+CONSTANT_MIME = b'\xb3! #\x1aapplication/foo; charset=1x3Q\x15'
+CONSTANT_TIMESTAMP = b'\xb3+\x03\x13<\x17T\xcf\x80\xaf\xc8\x03barI\x96>-'
+CONSTANT_ROUTE_CODE = b'\xb3-\x03\x13\x03bar\x06source\x9cY9\x19'
+CONSTANT_RUNNABLE = b'\xb3(\x03\x0c\x03foo\xe3\xea\xf5\xa4'
+CONSTANT_TAGS = [
+ b'\xb3)\x80\x15\x03bar\x02\x03foo\x03barTHn\xb4',
+ b'\xb3)\x80\x15\x03bar\x02\x03bar\x03foo\xf8\xf1\x91o',
+ ]
+
+
+class TestStreamResultToBytesContract(TestCase, TestStreamResultContract):
+ """Check that StreamResult behaves as testtools expects."""
+
+ def _make_result(self):
+ return subunit.StreamResultToBytes(BytesIO())
+
+
+class TestStreamResultToBytes(TestCase):
+
+ def _make_result(self):
+ output = BytesIO()
+ return subunit.StreamResultToBytes(output), output
+
+ def test_numbers(self):
+ result = subunit.StreamResultToBytes(BytesIO())
+ packet = []
+ self.assertRaises(Exception, result._write_number, -1, packet)
+ self.assertEqual([], packet)
+ result._write_number(0, packet)
+ self.assertEqual([b'\x00'], packet)
+ del packet[:]
+ result._write_number(63, packet)
+ self.assertEqual([b'\x3f'], packet)
+ del packet[:]
+ result._write_number(64, packet)
+ self.assertEqual([b'\x40\x40'], packet)
+ del packet[:]
+ result._write_number(16383, packet)
+ self.assertEqual([b'\x7f\xff'], packet)
+ del packet[:]
+ result._write_number(16384, packet)
+ self.assertEqual([b'\x80\x40', b'\x00'], packet)
+ del packet[:]
+ result._write_number(4194303, packet)
+ self.assertEqual([b'\xbf\xff', b'\xff'], packet)
+ del packet[:]
+ result._write_number(4194304, packet)
+ self.assertEqual([b'\xc0\x40\x00\x00'], packet)
+ del packet[:]
+ result._write_number(1073741823, packet)
+ self.assertEqual([b'\xff\xff\xff\xff'], packet)
+ del packet[:]
+ self.assertRaises(Exception, result._write_number, 1073741824, packet)
+ self.assertEqual([], packet)
+
+ def test_volatile_length(self):
+ # if the length of the packet data before the length itself is
+ # considered is right on the boundary for length's variable length
+ # encoding, it is easy to get the length wrong by not accounting for
+ # length itself.
+ # that is, the encoder has to ensure that length == sum (length_of_rest
+ # + length_of_length)
+ result, output = self._make_result()
+ # 1 byte short:
+ result.status(file_name="", file_bytes=b'\xff'*0)
+ self.assertThat(output.getvalue(), HasLength(10))
+ self.assertEqual(b'\x0a', output.getvalue()[3:4])
+ output.seek(0)
+ output.truncate()
+ # 1 byte long:
+ result.status(file_name="", file_bytes=b'\xff'*53)
+ self.assertThat(output.getvalue(), HasLength(63))
+ self.assertEqual(b'\x3f', output.getvalue()[3:4])
+ output.seek(0)
+ output.truncate()
+ # 2 bytes short
+ result.status(file_name="", file_bytes=b'\xff'*54)
+ self.assertThat(output.getvalue(), HasLength(65))
+ self.assertEqual(b'\x40\x41', output.getvalue()[3:5])
+ output.seek(0)
+ output.truncate()
+ # 2 bytes long
+ result.status(file_name="", file_bytes=b'\xff'*16371)
+ self.assertThat(output.getvalue(), HasLength(16383))
+ self.assertEqual(b'\x7f\xff', output.getvalue()[3:5])
+ output.seek(0)
+ output.truncate()
+ # 3 bytes short
+ result.status(file_name="", file_bytes=b'\xff'*16372)
+ self.assertThat(output.getvalue(), HasLength(16385))
+ self.assertEqual(b'\x80\x40\x01', output.getvalue()[3:6])
+ output.seek(0)
+ output.truncate()
+ # 3 bytes long
+ result.status(file_name="", file_bytes=b'\xff'*4194289)
+ self.assertThat(output.getvalue(), HasLength(4194303))
+ self.assertEqual(b'\xbf\xff\xff', output.getvalue()[3:6])
+ output.seek(0)
+ output.truncate()
+ self.assertRaises(Exception, result.status, file_name="",
+ file_bytes=b'\xff'*4194290)
+
+ def test_trivial_enumeration(self):
+ result, output = self._make_result()
+ result.status("foo", 'exists')
+ self.assertEqual(CONSTANT_ENUM, output.getvalue())
+
+ def test_inprogress(self):
+ result, output = self._make_result()
+ result.status("foo", 'inprogress')
+ self.assertEqual(CONSTANT_INPROGRESS, output.getvalue())
+
+ def test_success(self):
+ result, output = self._make_result()
+ result.status("foo", 'success')
+ self.assertEqual(CONSTANT_SUCCESS, output.getvalue())
+
+ def test_uxsuccess(self):
+ result, output = self._make_result()
+ result.status("foo", 'uxsuccess')
+ self.assertEqual(CONSTANT_UXSUCCESS, output.getvalue())
+
+ def test_skip(self):
+ result, output = self._make_result()
+ result.status("foo", 'skip')
+ self.assertEqual(CONSTANT_SKIP, output.getvalue())
+
+ def test_fail(self):
+ result, output = self._make_result()
+ result.status("foo", 'fail')
+ self.assertEqual(CONSTANT_FAIL, output.getvalue())
+
+ def test_xfail(self):
+ result, output = self._make_result()
+ result.status("foo", 'xfail')
+ self.assertEqual(CONSTANT_XFAIL, output.getvalue())
+
+ def test_unknown_status(self):
+ result, output = self._make_result()
+ self.assertRaises(Exception, result.status, "foo", 'boo')
+ self.assertEqual(b'', output.getvalue())
+
+ def test_eof(self):
+ result, output = self._make_result()
+ result.status(eof=True)
+ self.assertEqual(CONSTANT_EOF, output.getvalue())
+
+ def test_file_content(self):
+ result, output = self._make_result()
+ result.status(file_name="barney", file_bytes=b"woo")
+ self.assertEqual(CONSTANT_FILE_CONTENT, output.getvalue())
+
+ def test_mime(self):
+ result, output = self._make_result()
+ result.status(mime_type="application/foo; charset=1")
+ self.assertEqual(CONSTANT_MIME, output.getvalue())
+
+ def test_route_code(self):
+ result, output = self._make_result()
+ result.status(test_id="bar", test_status='success',
+ route_code="source")
+ self.assertEqual(CONSTANT_ROUTE_CODE, output.getvalue())
+
+ def test_runnable(self):
+ result, output = self._make_result()
+ result.status("foo", 'success', runnable=False)
+ self.assertEqual(CONSTANT_RUNNABLE, output.getvalue())
+
+ def test_tags(self):
+ result, output = self._make_result()
+ result.status(test_id="bar", test_tags=set(['foo', 'bar']))
+ self.assertThat(CONSTANT_TAGS, Contains(output.getvalue()))
+
+ def test_timestamp(self):
+ timestamp = datetime.datetime(2001, 12, 12, 12, 59, 59, 45,
+ iso8601.Utc())
+ result, output = self._make_result()
+ result.status(test_id="bar", test_status='success', timestamp=timestamp)
+ self.assertEqual(CONSTANT_TIMESTAMP, output.getvalue())
+
+
+class TestByteStreamToStreamResult(TestCase):
+
+ def test_non_subunit_encapsulated(self):
+ source = BytesIO(b"foo\nbar\n")
+ result = StreamResult()
+ subunit.ByteStreamToStreamResult(
+ source, non_subunit_name="stdout").run(result)
+ self.assertEqual([
+ ('status', None, None, None, True, 'stdout', b'f', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'o', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'o', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'\n', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'b', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'a', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'r', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'\n', False, None, None, None),
+ ], result._events)
+ self.assertEqual(b'', source.read())
+
+ def test_signature_middle_utf8_char(self):
+ utf8_bytes = b'\xe3\xb3\x8a'
+ source = BytesIO(utf8_bytes)
+ # Should be treated as one character (it is u'\u3cca') and wrapped
+ result = StreamResult()
+ subunit.ByteStreamToStreamResult(
+ source, non_subunit_name="stdout").run(
+ result)
+ self.assertEqual([
+ ('status', None, None, None, True, 'stdout', b'\xe3', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'\xb3', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'\x8a', False, None, None, None),
+ ], result._events)
+
+ def test_non_subunit_disabled_raises(self):
+ source = BytesIO(b"foo\nbar\n")
+ result = StreamResult()
+ case = subunit.ByteStreamToStreamResult(source)
+ e = self.assertRaises(Exception, case.run, result)
+ self.assertEqual(b'f', e.args[1])
+ self.assertEqual(b'oo\nbar\n', source.read())
+ self.assertEqual([], result._events)
+
+ def test_trivial_enumeration(self):
+ source = BytesIO(CONSTANT_ENUM)
+ result = StreamResult()
+ subunit.ByteStreamToStreamResult(
+ source, non_subunit_name="stdout").run(result)
+ self.assertEqual(b'', source.read())
+ self.assertEqual([
+ ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
+ ], result._events)
+
+ def test_multiple_events(self):
+ source = BytesIO(CONSTANT_ENUM + CONSTANT_ENUM)
+ result = StreamResult()
+ subunit.ByteStreamToStreamResult(
+ source, non_subunit_name="stdout").run(result)
+ self.assertEqual(b'', source.read())
+ self.assertEqual([
+ ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
+ ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
+ ], result._events)
+
+ def test_inprogress(self):
+ self.check_event(CONSTANT_INPROGRESS, 'inprogress')
+
+ def test_success(self):
+ self.check_event(CONSTANT_SUCCESS, 'success')
+
+ def test_uxsuccess(self):
+ self.check_event(CONSTANT_UXSUCCESS, 'uxsuccess')
+
+ def test_skip(self):
+ self.check_event(CONSTANT_SKIP, 'skip')
+
+ def test_fail(self):
+ self.check_event(CONSTANT_FAIL, 'fail')
+
+ def test_xfail(self):
+ self.check_event(CONSTANT_XFAIL, 'xfail')
+
+ def check_events(self, source_bytes, events):
+ source = BytesIO(source_bytes)
+ result = StreamResult()
+ subunit.ByteStreamToStreamResult(
+ source, non_subunit_name="stdout").run(result)
+ self.assertEqual(b'', source.read())
+ self.assertEqual(events, result._events)
+ #- any file attachments should be byte contents [as users assume that].
+ for event in result._events:
+ if event[5] is not None:
+ self.assertIsInstance(event[6], bytes)
+
+ def check_event(self, source_bytes, test_status=None, test_id="foo",
+ route_code=None, timestamp=None, tags=None, mime_type=None,
+ file_name=None, file_bytes=None, eof=False, runnable=True):
+ event = self._event(test_id=test_id, test_status=test_status,
+ tags=tags, runnable=runnable, file_name=file_name,
+ file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+ route_code=route_code, timestamp=timestamp)
+ self.check_events(source_bytes, [event])
+
+ def _event(self, test_status=None, test_id=None, route_code=None,
+ timestamp=None, tags=None, mime_type=None, file_name=None,
+ file_bytes=None, eof=False, runnable=True):
+ return ('status', test_id, test_status, tags, runnable, file_name,
+ file_bytes, eof, mime_type, route_code, timestamp)
+
+ def test_eof(self):
+ self.check_event(CONSTANT_EOF, test_id=None, eof=True)
+
+ def test_file_content(self):
+ self.check_event(CONSTANT_FILE_CONTENT,
+ test_id=None, file_name="barney", file_bytes=b"woo")
+
+ def test_file_content_length_into_checksum(self):
+ # A bad file content length which creeps into the checksum.
+ bad_file_length_content = b'\xb3!@\x13\x06barney\x04woo\xdc\xe2\xdb\x35'
+ self.check_events(bad_file_length_content, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=bad_file_length_content,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b"File content extends past end of packet: claimed 4 bytes, 3 available",
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_packet_length_4_word_varint(self):
+ packet_data = b'\xb3!@\xc0\x00\x11'
+ self.check_events(packet_data, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=packet_data,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b"3 byte maximum given but 4 byte value found.",
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_mime(self):
+ self.check_event(CONSTANT_MIME,
+ test_id=None, mime_type='application/foo; charset=1')
+
+ def test_route_code(self):
+ self.check_event(CONSTANT_ROUTE_CODE,
+ 'success', route_code="source", test_id="bar")
+
+ def test_runnable(self):
+ self.check_event(CONSTANT_RUNNABLE,
+ test_status='success', runnable=False)
+
+ def test_tags(self):
+ self.check_event(CONSTANT_TAGS[0],
+ None, tags=set(['foo', 'bar']), test_id="bar")
+
+ def test_timestamp(self):
+ timestamp = datetime.datetime(2001, 12, 12, 12, 59, 59, 45,
+ iso8601.Utc())
+ self.check_event(CONSTANT_TIMESTAMP,
+ 'success', test_id='bar', timestamp=timestamp)
+
+ def test_bad_crc_errors_via_status(self):
+ file_bytes = CONSTANT_MIME[:-1] + b'\x00'
+ self.check_events( file_bytes, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=file_bytes,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b'Bad checksum - calculated (0x78335115), '
+ b'stored (0x78335100)',
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_not_utf8_in_string(self):
+ file_bytes = CONSTANT_ROUTE_CODE[:5] + b'\xb4' + CONSTANT_ROUTE_CODE[6:-4] + b'\xce\x56\xc6\x17'
+ self.check_events(file_bytes, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=file_bytes,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b'UTF8 string at offset 2 is not UTF8',
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_NULL_in_string(self):
+ file_bytes = CONSTANT_ROUTE_CODE[:6] + b'\x00' + CONSTANT_ROUTE_CODE[7:-4] + b'\xd7\x41\xac\xfe'
+ self.check_events(file_bytes, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=file_bytes,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b'UTF8 string at offset 2 contains NUL byte',
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_bad_utf8_stringlength(self):
+ file_bytes = CONSTANT_ROUTE_CODE[:4] + b'\x3f' + CONSTANT_ROUTE_CODE[5:-4] + b'\xbe\x29\xe0\xc2'
+ self.check_events(file_bytes, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=file_bytes,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b'UTF8 string at offset 2 extends past end of '
+ b'packet: claimed 63 bytes, 10 available',
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_route_code_and_file_content(self):
+ content = BytesIO()
+ subunit.StreamResultToBytes(content).status(
+ route_code='0', mime_type='text/plain', file_name='bar',
+ file_bytes=b'foo')
+ self.check_event(content.getvalue(), test_id=None, file_name='bar',
+ route_code='0', mime_type='text/plain', file_bytes=b'foo')
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_results.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_results.py
new file mode 100644
index 00000000000..44f95b34c97
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_results.py
@@ -0,0 +1,566 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import csv
+import datetime
+import sys
+import unittest
+
+from testtools import TestCase
+from testtools.compat import StringIO
+from testtools.content import (
+ text_content,
+ TracebackContent,
+ )
+from testtools.testresult.doubles import ExtendedTestResult
+
+import subunit
+import subunit.iso8601 as iso8601
+import subunit.test_results
+
+import testtools
+
+
+class LoggingDecorator(subunit.test_results.HookedTestResultDecorator):
+
+ def __init__(self, decorated):
+ self._calls = 0
+ super(LoggingDecorator, self).__init__(decorated)
+
+ def _before_event(self):
+ self._calls += 1
+
+
+class AssertBeforeTestResult(LoggingDecorator):
+ """A TestResult for checking preconditions."""
+
+ def __init__(self, decorated, test):
+ self.test = test
+ super(AssertBeforeTestResult, self).__init__(decorated)
+
+ def _before_event(self):
+ self.test.assertEqual(1, self.earlier._calls)
+ super(AssertBeforeTestResult, self)._before_event()
+
+
+class TimeCapturingResult(unittest.TestResult):
+
+ def __init__(self):
+ super(TimeCapturingResult, self).__init__()
+ self._calls = []
+ self.failfast = False
+
+ def time(self, a_datetime):
+ self._calls.append(a_datetime)
+
+
+class TestHookedTestResultDecorator(unittest.TestCase):
+
+ def setUp(self):
+ # An end to the chain
+ terminal = unittest.TestResult()
+ # Asserts that the call was made to self.result before asserter was
+ # called.
+ asserter = AssertBeforeTestResult(terminal, self)
+ # The result object we call, which much increase its call count.
+ self.result = LoggingDecorator(asserter)
+ asserter.earlier = self.result
+ self.decorated = asserter
+
+ def tearDown(self):
+ # The hook in self.result must have been called
+ self.assertEqual(1, self.result._calls)
+ # The hook in asserter must have been called too, otherwise the
+ # assertion about ordering won't have completed.
+ self.assertEqual(1, self.decorated._calls)
+
+ def test_startTest(self):
+ self.result.startTest(self)
+
+ def test_startTestRun(self):
+ self.result.startTestRun()
+
+ def test_stopTest(self):
+ self.result.stopTest(self)
+
+ def test_stopTestRun(self):
+ self.result.stopTestRun()
+
+ def test_addError(self):
+ self.result.addError(self, subunit.RemoteError())
+
+ def test_addError_details(self):
+ self.result.addError(self, details={})
+
+ def test_addFailure(self):
+ self.result.addFailure(self, subunit.RemoteError())
+
+ def test_addFailure_details(self):
+ self.result.addFailure(self, details={})
+
+ def test_addSuccess(self):
+ self.result.addSuccess(self)
+
+ def test_addSuccess_details(self):
+ self.result.addSuccess(self, details={})
+
+ def test_addSkip(self):
+ self.result.addSkip(self, "foo")
+
+ def test_addSkip_details(self):
+ self.result.addSkip(self, details={})
+
+ def test_addExpectedFailure(self):
+ self.result.addExpectedFailure(self, subunit.RemoteError())
+
+ def test_addExpectedFailure_details(self):
+ self.result.addExpectedFailure(self, details={})
+
+ def test_addUnexpectedSuccess(self):
+ self.result.addUnexpectedSuccess(self)
+
+ def test_addUnexpectedSuccess_details(self):
+ self.result.addUnexpectedSuccess(self, details={})
+
+ def test_progress(self):
+ self.result.progress(1, subunit.PROGRESS_SET)
+
+ def test_wasSuccessful(self):
+ self.result.wasSuccessful()
+
+ def test_shouldStop(self):
+ self.result.shouldStop
+
+ def test_stop(self):
+ self.result.stop()
+
+ def test_time(self):
+ self.result.time(None)
+
+
+class TestAutoTimingTestResultDecorator(unittest.TestCase):
+
+ def setUp(self):
+ # And end to the chain which captures time events.
+ terminal = TimeCapturingResult()
+ # The result object under test.
+ self.result = subunit.test_results.AutoTimingTestResultDecorator(
+ terminal)
+ self.decorated = terminal
+
+ def test_without_time_calls_time_is_called_and_not_None(self):
+ self.result.startTest(self)
+ self.assertEqual(1, len(self.decorated._calls))
+ self.assertNotEqual(None, self.decorated._calls[0])
+
+ def test_no_time_from_progress(self):
+ self.result.progress(1, subunit.PROGRESS_CUR)
+ self.assertEqual(0, len(self.decorated._calls))
+
+ def test_no_time_from_shouldStop(self):
+ self.decorated.stop()
+ self.result.shouldStop
+ self.assertEqual(0, len(self.decorated._calls))
+
+ def test_calling_time_inhibits_automatic_time(self):
+ # Calling time() outputs a time signal immediately and prevents
+ # automatically adding one when other methods are called.
+ time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
+ self.result.time(time)
+ self.result.startTest(self)
+ self.result.stopTest(self)
+ self.assertEqual(1, len(self.decorated._calls))
+ self.assertEqual(time, self.decorated._calls[0])
+
+ def test_calling_time_None_enables_automatic_time(self):
+ time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
+ self.result.time(time)
+ self.assertEqual(1, len(self.decorated._calls))
+ self.assertEqual(time, self.decorated._calls[0])
+ # Calling None passes the None through, in case other results care.
+ self.result.time(None)
+ self.assertEqual(2, len(self.decorated._calls))
+ self.assertEqual(None, self.decorated._calls[1])
+ # Calling other methods doesn't generate an automatic time event.
+ self.result.startTest(self)
+ self.assertEqual(3, len(self.decorated._calls))
+ self.assertNotEqual(None, self.decorated._calls[2])
+
+ def test_set_failfast_True(self):
+ self.assertFalse(self.decorated.failfast)
+ self.result.failfast = True
+ self.assertTrue(self.decorated.failfast)
+
+
+class TestTagCollapsingDecorator(TestCase):
+
+ def test_tags_collapsed_outside_of_tests(self):
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ tag_collapser.tags(set(['a']), set())
+ tag_collapser.tags(set(['b']), set())
+ tag_collapser.startTest(self)
+ self.assertEquals(
+ [('tags', set(['a', 'b']), set([])),
+ ('startTest', self),
+ ], result._events)
+
+ def test_tags_collapsed_outside_of_tests_are_flushed(self):
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ tag_collapser.startTestRun()
+ tag_collapser.tags(set(['a']), set())
+ tag_collapser.tags(set(['b']), set())
+ tag_collapser.startTest(self)
+ tag_collapser.addSuccess(self)
+ tag_collapser.stopTest(self)
+ tag_collapser.stopTestRun()
+ self.assertEquals(
+ [('startTestRun',),
+ ('tags', set(['a', 'b']), set([])),
+ ('startTest', self),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ('stopTestRun',),
+ ], result._events)
+
+ def test_tags_forwarded_after_tests(self):
+ test = subunit.RemotedTestCase('foo')
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ tag_collapser.startTestRun()
+ tag_collapser.startTest(test)
+ tag_collapser.addSuccess(test)
+ tag_collapser.stopTest(test)
+ tag_collapser.tags(set(['a']), set(['b']))
+ tag_collapser.stopTestRun()
+ self.assertEqual(
+ [('startTestRun',),
+ ('startTest', test),
+ ('addSuccess', test),
+ ('stopTest', test),
+ ('tags', set(['a']), set(['b'])),
+ ('stopTestRun',),
+ ],
+ result._events)
+
+ def test_tags_collapsed_inside_of_tests(self):
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ test = subunit.RemotedTestCase('foo')
+ tag_collapser.startTest(test)
+ tag_collapser.tags(set(['a']), set())
+ tag_collapser.tags(set(['b']), set(['a']))
+ tag_collapser.tags(set(['c']), set())
+ tag_collapser.stopTest(test)
+ self.assertEquals(
+ [('startTest', test),
+ ('tags', set(['b', 'c']), set(['a'])),
+ ('stopTest', test)],
+ result._events)
+
+ def test_tags_collapsed_inside_of_tests_different_ordering(self):
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ test = subunit.RemotedTestCase('foo')
+ tag_collapser.startTest(test)
+ tag_collapser.tags(set(), set(['a']))
+ tag_collapser.tags(set(['a', 'b']), set())
+ tag_collapser.tags(set(['c']), set())
+ tag_collapser.stopTest(test)
+ self.assertEquals(
+ [('startTest', test),
+ ('tags', set(['a', 'b', 'c']), set()),
+ ('stopTest', test)],
+ result._events)
+
+ def test_tags_sent_before_result(self):
+ # Because addSuccess and friends tend to send subunit output
+ # immediately, and because 'tags:' before a result line means
+ # something different to 'tags:' after a result line, we need to be
+ # sure that tags are emitted before 'addSuccess' (or whatever).
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ test = subunit.RemotedTestCase('foo')
+ tag_collapser.startTest(test)
+ tag_collapser.tags(set(['a']), set())
+ tag_collapser.addSuccess(test)
+ tag_collapser.stopTest(test)
+ self.assertEquals(
+ [('startTest', test),
+ ('tags', set(['a']), set()),
+ ('addSuccess', test),
+ ('stopTest', test)],
+ result._events)
+
+
+class TestTimeCollapsingDecorator(TestCase):
+
+ def make_time(self):
+ # Heh heh.
+ return datetime.datetime(
+ 2000, 1, self.getUniqueInteger(), tzinfo=iso8601.UTC)
+
+ def test_initial_time_forwarded(self):
+ # We always forward the first time event we see.
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+ a_time = self.make_time()
+ tag_collapser.time(a_time)
+ self.assertEquals([('time', a_time)], result._events)
+
+ def test_time_collapsed_to_first_and_last(self):
+ # If there are many consecutive time events, only the first and last
+ # are sent through.
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+ times = [self.make_time() for i in range(5)]
+ for a_time in times:
+ tag_collapser.time(a_time)
+ tag_collapser.startTest(subunit.RemotedTestCase('foo'))
+ self.assertEquals(
+ [('time', times[0]), ('time', times[-1])], result._events[:-1])
+
+ def test_only_one_time_sent(self):
+ # If we receive a single time event followed by a non-time event, we
+ # send exactly one time event.
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+ a_time = self.make_time()
+ tag_collapser.time(a_time)
+ tag_collapser.startTest(subunit.RemotedTestCase('foo'))
+ self.assertEquals([('time', a_time)], result._events[:-1])
+
+ def test_duplicate_times_not_sent(self):
+ # Many time events with the exact same time are collapsed into one
+ # time event.
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+ a_time = self.make_time()
+ for i in range(5):
+ tag_collapser.time(a_time)
+ tag_collapser.startTest(subunit.RemotedTestCase('foo'))
+ self.assertEquals([('time', a_time)], result._events[:-1])
+
+ def test_no_times_inserted(self):
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+ a_time = self.make_time()
+ tag_collapser.time(a_time)
+ foo = subunit.RemotedTestCase('foo')
+ tag_collapser.startTest(foo)
+ tag_collapser.addSuccess(foo)
+ tag_collapser.stopTest(foo)
+ self.assertEquals(
+ [('time', a_time),
+ ('startTest', foo),
+ ('addSuccess', foo),
+ ('stopTest', foo)], result._events)
+
+
+class TestByTestResultTests(testtools.TestCase):
+
+ def setUp(self):
+ super(TestByTestResultTests, self).setUp()
+ self.log = []
+ self.result = subunit.test_results.TestByTestResult(self.on_test)
+ if sys.version_info >= (3, 0):
+ self.result._now = iter(range(5)).__next__
+ else:
+ self.result._now = iter(range(5)).next
+
+ def assertCalled(self, **kwargs):
+ defaults = {
+ 'test': self,
+ 'tags': set(),
+ 'details': None,
+ 'start_time': 0,
+ 'stop_time': 1,
+ }
+ defaults.update(kwargs)
+ self.assertEqual([defaults], self.log)
+
+ def on_test(self, **kwargs):
+ self.log.append(kwargs)
+
+ def test_no_tests_nothing_reported(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertEqual([], self.log)
+
+ def test_add_success(self):
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success')
+
+ def test_add_success_details(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addSuccess(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', details=details)
+
+ def test_tags(self):
+ if not getattr(self.result, 'tags', None):
+ self.skipTest("No tags in testtools")
+ self.result.tags(['foo'], [])
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', tags=set(['foo']))
+
+ def test_add_error(self):
+ self.result.startTest(self)
+ try:
+ 1/0
+ except ZeroDivisionError:
+ error = sys.exc_info()
+ self.result.addError(self, error)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='error',
+ details={'traceback': TracebackContent(error, self)})
+
+ def test_add_error_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addError(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='error', details=details)
+
+ def test_add_failure(self):
+ self.result.startTest(self)
+ try:
+ self.fail("intentional failure")
+ except self.failureException:
+ failure = sys.exc_info()
+ self.result.addFailure(self, failure)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='failure',
+ details={'traceback': TracebackContent(failure, self)})
+
+ def test_add_failure_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addFailure(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='failure', details=details)
+
+ def test_add_xfail(self):
+ self.result.startTest(self)
+ try:
+ 1/0
+ except ZeroDivisionError:
+ error = sys.exc_info()
+ self.result.addExpectedFailure(self, error)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='xfail',
+ details={'traceback': TracebackContent(error, self)})
+
+ def test_add_xfail_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addExpectedFailure(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='xfail', details=details)
+
+ def test_add_unexpected_success(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addUnexpectedSuccess(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', details=details)
+
+ def test_add_skip_reason(self):
+ self.result.startTest(self)
+ reason = self.getUniqueString()
+ self.result.addSkip(self, reason)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='skip', details={'reason': text_content(reason)})
+
+ def test_add_skip_details(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addSkip(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='skip', details=details)
+
+ def test_twice(self):
+ self.result.startTest(self)
+ self.result.addSuccess(self, details={'foo': 'bar'})
+ self.result.stopTest(self)
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertEqual(
+ [{'test': self,
+ 'status': 'success',
+ 'start_time': 0,
+ 'stop_time': 1,
+ 'tags': set(),
+ 'details': {'foo': 'bar'}},
+ {'test': self,
+ 'status': 'success',
+ 'start_time': 2,
+ 'stop_time': 3,
+ 'tags': set(),
+ 'details': None},
+ ],
+ self.log)
+
+
+class TestCsvResult(testtools.TestCase):
+
+ def parse_stream(self, stream):
+ stream.seek(0)
+ reader = csv.reader(stream)
+ return list(reader)
+
+ def test_csv_output(self):
+ stream = StringIO()
+ result = subunit.test_results.CsvResult(stream)
+ if sys.version_info >= (3, 0):
+ result._now = iter(range(5)).__next__
+ else:
+ result._now = iter(range(5)).next
+ result.startTestRun()
+ result.startTest(self)
+ result.addSuccess(self)
+ result.stopTest(self)
+ result.stopTestRun()
+ self.assertEqual(
+ [['test', 'status', 'start_time', 'stop_time'],
+ [self.id(), 'success', '0', '1'],
+ ],
+ self.parse_stream(stream))
+
+ def test_just_header_when_no_tests(self):
+ stream = StringIO()
+ result = subunit.test_results.CsvResult(stream)
+ result.startTestRun()
+ result.stopTestRun()
+ self.assertEqual(
+ [['test', 'status', 'start_time', 'stop_time']],
+ self.parse_stream(stream))
+
+ def test_no_output_before_events(self):
+ stream = StringIO()
+ subunit.test_results.CsvResult(stream)
+ self.assertEqual([], self.parse_stream(stream))
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/v2.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/v2.py
new file mode 100644
index 00000000000..057f65c3bdd
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python/subunit/v2.py
@@ -0,0 +1,495 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2013 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import codecs
+utf_8_decode = codecs.utf_8_decode
+import datetime
+from io import UnsupportedOperation
+import os
+import select
+import struct
+import zlib
+
+from extras import safe_hasattr, try_imports
+builtins = try_imports(['__builtin__', 'builtins'])
+
+import subunit
+import subunit.iso8601 as iso8601
+
+__all__ = [
+ 'ByteStreamToStreamResult',
+ 'StreamResultToBytes',
+ ]
+
+SIGNATURE = b'\xb3'
+FMT_8 = '>B'
+FMT_16 = '>H'
+FMT_24 = '>HB'
+FMT_32 = '>I'
+FMT_TIMESTAMP = '>II'
+FLAG_TEST_ID = 0x0800
+FLAG_ROUTE_CODE = 0x0400
+FLAG_TIMESTAMP = 0x0200
+FLAG_RUNNABLE = 0x0100
+FLAG_TAGS = 0x0080
+FLAG_MIME_TYPE = 0x0020
+FLAG_EOF = 0x0010
+FLAG_FILE_CONTENT = 0x0040
+EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=iso8601.Utc())
+NUL_ELEMENT = b'\0'[0]
+# Contains True for types for which 'nul in thing' falsely returns false.
+_nul_test_broken = {}
+
+
+def has_nul(buffer_or_bytes):
+ """Return True if a null byte is present in buffer_or_bytes."""
+ # Simple "if NUL_ELEMENT in utf8_bytes:" fails on Python 3.1 and 3.2 with
+ # memoryviews. See https://bugs.launchpad.net/subunit/+bug/1216246
+ buffer_type = type(buffer_or_bytes)
+ broken = _nul_test_broken.get(buffer_type)
+ if broken is None:
+ reference = buffer_type(b'\0')
+ broken = not NUL_ELEMENT in reference
+ _nul_test_broken[buffer_type] = broken
+ if broken:
+ return b'\0' in buffer_or_bytes
+ else:
+ return NUL_ELEMENT in buffer_or_bytes
+
+
+class ParseError(Exception):
+ """Used to pass error messages within the parser."""
+
+
+class StreamResultToBytes(object):
+ """Convert StreamResult API calls to bytes.
+
+ The StreamResult API is defined by testtools.StreamResult.
+ """
+
+ status_mask = {
+ None: 0,
+ 'exists': 0x1,
+ 'inprogress': 0x2,
+ 'success': 0x3,
+ 'uxsuccess': 0x4,
+ 'skip': 0x5,
+ 'fail': 0x6,
+ 'xfail': 0x7,
+ }
+
+ zero_b = b'\0'[0]
+
+ def __init__(self, output_stream):
+ """Create a StreamResultToBytes with output written to output_stream.
+
+ :param output_stream: A file-like object. Must support write(bytes)
+ and flush() methods. Flush will be called after each write.
+ The stream will be passed through subunit.make_stream_binary,
+ to handle regular cases such as stdout.
+ """
+ self.output_stream = subunit.make_stream_binary(output_stream)
+
+ def startTestRun(self):
+ pass
+
+ def stopTestRun(self):
+ pass
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ self._write_packet(test_id=test_id, test_status=test_status,
+ test_tags=test_tags, runnable=runnable, file_name=file_name,
+ file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+ route_code=route_code, timestamp=timestamp)
+
+ def _write_utf8(self, a_string, packet):
+ utf8 = a_string.encode('utf-8')
+ self._write_number(len(utf8), packet)
+ packet.append(utf8)
+
+ def _write_len16(self, length, packet):
+ assert length < 65536
+ packet.append(struct.pack(FMT_16, length))
+
+ def _write_number(self, value, packet):
+ packet.extend(self._encode_number(value))
+
+ def _encode_number(self, value):
+ assert value >= 0
+ if value < 64:
+ return [struct.pack(FMT_8, value)]
+ elif value < 16384:
+ value = value | 0x4000
+ return [struct.pack(FMT_16, value)]
+ elif value < 4194304:
+ value = value | 0x800000
+ return [struct.pack(FMT_16, value >> 8),
+ struct.pack(FMT_8, value & 0xff)]
+ elif value < 1073741824:
+ value = value | 0xc0000000
+ return [struct.pack(FMT_32, value)]
+ else:
+ raise ValueError('value too large to encode: %r' % (value,))
+
+ def _write_packet(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ packet = [SIGNATURE]
+ packet.append(b'FF') # placeholder for flags
+ # placeholder for length, but see below as length is variable.
+ packet.append(b'')
+ flags = 0x2000 # Version 0x2
+ if timestamp is not None:
+ flags = flags | FLAG_TIMESTAMP
+ since_epoch = timestamp - EPOCH
+ nanoseconds = since_epoch.microseconds * 1000
+ seconds = (since_epoch.seconds + since_epoch.days * 24 * 3600)
+ packet.append(struct.pack(FMT_32, seconds))
+ self._write_number(nanoseconds, packet)
+ if test_id is not None:
+ flags = flags | FLAG_TEST_ID
+ self._write_utf8(test_id, packet)
+ if test_tags:
+ flags = flags | FLAG_TAGS
+ self._write_number(len(test_tags), packet)
+ for tag in test_tags:
+ self._write_utf8(tag, packet)
+ if runnable:
+ flags = flags | FLAG_RUNNABLE
+ if mime_type:
+ flags = flags | FLAG_MIME_TYPE
+ self._write_utf8(mime_type, packet)
+ if file_name is not None:
+ flags = flags | FLAG_FILE_CONTENT
+ self._write_utf8(file_name, packet)
+ self._write_number(len(file_bytes), packet)
+ packet.append(file_bytes)
+ if eof:
+ flags = flags | FLAG_EOF
+ if route_code is not None:
+ flags = flags | FLAG_ROUTE_CODE
+ self._write_utf8(route_code, packet)
+ # 0x0008 - not used in v2.
+ flags = flags | self.status_mask[test_status]
+ packet[1] = struct.pack(FMT_16, flags)
+ base_length = sum(map(len, packet)) + 4
+ if base_length <= 62:
+ # one byte to encode length, 62+1 = 63
+ length_length = 1
+ elif base_length <= 16381:
+ # two bytes to encode length, 16381+2 = 16383
+ length_length = 2
+ elif base_length <= 4194300:
+ # three bytes to encode length, 419430+3=4194303
+ length_length = 3
+ else:
+ # Longer than policy:
+ # TODO: chunk the packet automatically?
+ # - strip all but file data
+ # - do 4M chunks of that till done
+ # - include original data in final chunk.
+ raise ValueError("Length too long: %r" % base_length)
+ packet[2:3] = self._encode_number(base_length + length_length)
+ # We could either do a partial application of crc32 over each chunk
+ # or a single join to a temp variable then a final join
+ # or two writes (that python might then split).
+ # For now, simplest code: join, crc32, join, output
+ content = b''.join(packet)
+ self.output_stream.write(content + struct.pack(
+ FMT_32, zlib.crc32(content) & 0xffffffff))
+ self.output_stream.flush()
+
+
+class ByteStreamToStreamResult(object):
+ """Parse a subunit byte stream.
+
+ Mixed streams that contain non-subunit content is supported when a
+ non_subunit_name is passed to the contructor. The default is to raise an
+ error containing the non-subunit byte after it has been read from the
+ stream.
+
+ Typical use:
+
+ >>> case = ByteStreamToStreamResult(sys.stdin.buffer)
+ >>> result = StreamResult()
+ >>> result.startTestRun()
+ >>> case.run(result)
+ >>> result.stopTestRun()
+ """
+
+ status_lookup = {
+ 0x0: None,
+ 0x1: 'exists',
+ 0x2: 'inprogress',
+ 0x3: 'success',
+ 0x4: 'uxsuccess',
+ 0x5: 'skip',
+ 0x6: 'fail',
+ 0x7: 'xfail',
+ }
+
+ def __init__(self, source, non_subunit_name=None):
+ """Create a ByteStreamToStreamResult.
+
+ :param source: A file like object to read bytes from. Must support
+ read(<count>) and return bytes. The file is not closed by
+ ByteStreamToStreamResult. subunit.make_stream_binary() is
+ called on the stream to get it into bytes mode.
+ :param non_subunit_name: If set to non-None, non subunit content
+ encountered in the stream will be converted into file packets
+ labelled with this name.
+ """
+ self.non_subunit_name = non_subunit_name
+ self.source = subunit.make_stream_binary(source)
+ self.codec = codecs.lookup('utf8').incrementaldecoder()
+
+ def run(self, result):
+ """Parse source and emit events to result.
+
+ This is a blocking call: it will run until EOF is detected on source.
+ """
+ self.codec.reset()
+ mid_character = False
+ while True:
+ # We're in blocking mode; read one char
+ content = self.source.read(1)
+ if not content:
+ # EOF
+ return
+ if not mid_character and content[0] == SIGNATURE[0]:
+ self._parse_packet(result)
+ continue
+ if self.non_subunit_name is None:
+ raise Exception("Non subunit content", content)
+ try:
+ if self.codec.decode(content):
+ # End of a character
+ mid_character = False
+ else:
+ mid_character = True
+ except UnicodeDecodeError:
+ # Bad unicode, not our concern.
+ mid_character = False
+ # Aggregate all content that is not subunit until either
+ # 1MiB is accumulated or 50ms has passed with no input.
+ # Both are arbitrary amounts intended to give a simple
+ # balance between efficiency (avoiding death by a thousand
+ # one-byte packets), buffering (avoiding overlarge state
+ # being hidden on intermediary nodes) and interactivity
+ # (when driving a debugger, slow response to typing is
+ # annoying).
+ buffered = [content]
+ while len(buffered[-1]):
+ try:
+ self.source.fileno()
+ except:
+ # Won't be able to select, fallback to
+ # one-byte-at-a-time.
+ break
+ # Note: this has a very low timeout because with stdin, the
+ # BufferedIO layer typically has all the content available
+ # from the stream when e.g. pdb is dropped into, leading to
+ # select always timing out when in fact we could have read
+ # (from the buffer layer) - we typically fail to aggregate
+ # any content on 3.x Pythons.
+ readable = select.select([self.source], [], [], 0.000001)[0]
+ if readable:
+ content = self.source.read(1)
+ if not len(content):
+ # EOF - break and emit buffered.
+ break
+ if not mid_character and content[0] == SIGNATURE[0]:
+ # New packet, break, emit buffered, then parse.
+ break
+ buffered.append(content)
+ # Feed into the codec.
+ try:
+ if self.codec.decode(content):
+ # End of a character
+ mid_character = False
+ else:
+ mid_character = True
+ except UnicodeDecodeError:
+ # Bad unicode, not our concern.
+ mid_character = False
+ if not readable or len(buffered) >= 1048576:
+ # timeout or too much data, emit what we have.
+ break
+ result.status(
+ file_name=self.non_subunit_name,
+ file_bytes=b''.join(buffered))
+ if mid_character or not len(content) or content[0] != SIGNATURE[0]:
+ continue
+ # Otherwise, parse a data packet.
+ self._parse_packet(result)
+
+ def _parse_packet(self, result):
+ try:
+ packet = [SIGNATURE]
+ self._parse(packet, result)
+ except ParseError as error:
+ result.status(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=b''.join(packet),
+ mime_type="application/octet-stream")
+ result.status(test_id="subunit.parser", test_status='fail',
+ eof=True, file_name="Parser Error",
+ file_bytes=(error.args[0]).encode('utf8'),
+ mime_type="text/plain;charset=utf8")
+
+ def _to_bytes(self, data, pos, length):
+ """Return a slice of data from pos for length as bytes."""
+ # memoryview in 2.7.3 and 3.2 isn't directly usable with struct :(.
+ # see https://bugs.launchpad.net/subunit/+bug/1216163
+ result = data[pos:pos+length]
+ if type(result) is not bytes:
+ return result.tobytes()
+ return result
+
+ def _parse_varint(self, data, pos, max_3_bytes=False):
+ # because the only incremental IO we do is at the start, and the 32 bit
+ # CRC means we can always safely read enough to cover any varint, we
+ # can be sure that there should be enough data - and if not it is an
+ # error not a normal situation.
+ data_0 = struct.unpack(FMT_8, self._to_bytes(data, pos, 1))[0]
+ typeenum = data_0 & 0xc0
+ value_0 = data_0 & 0x3f
+ if typeenum == 0x00:
+ return value_0, 1
+ elif typeenum == 0x40:
+ data_1 = struct.unpack(FMT_8, self._to_bytes(data, pos+1, 1))[0]
+ return (value_0 << 8) | data_1, 2
+ elif typeenum == 0x80:
+ data_1 = struct.unpack(FMT_16, self._to_bytes(data, pos+1, 2))[0]
+ return (value_0 << 16) | data_1, 3
+ else:
+ if max_3_bytes:
+ raise ParseError('3 byte maximum given but 4 byte value found.')
+ data_1, data_2 = struct.unpack(FMT_24, self._to_bytes(data, pos+1, 3))
+ result = (value_0 << 24) | data_1 << 8 | data_2
+ return result, 4
+
+ def _parse(self, packet, result):
+ # 2 bytes flags, at most 3 bytes length.
+ packet.append(self.source.read(5))
+ flags = struct.unpack(FMT_16, packet[-1][:2])[0]
+ length, consumed = self._parse_varint(
+ packet[-1], 2, max_3_bytes=True)
+ remainder = self.source.read(length - 6)
+ if len(remainder) != length - 6:
+ raise ParseError(
+ 'Short read - got %d bytes, wanted %d bytes' % (
+ len(remainder), length - 6))
+ if consumed != 3:
+ # Avoid having to parse torn values
+ packet[-1] += remainder
+ pos = 2 + consumed
+ else:
+ # Avoid copying potentially lots of data.
+ packet.append(remainder)
+ pos = 0
+ crc = zlib.crc32(packet[0])
+ for fragment in packet[1:-1]:
+ crc = zlib.crc32(fragment, crc)
+ crc = zlib.crc32(packet[-1][:-4], crc) & 0xffffffff
+ packet_crc = struct.unpack(FMT_32, packet[-1][-4:])[0]
+ if crc != packet_crc:
+ # Bad CRC, report it and stop parsing the packet.
+ raise ParseError(
+ 'Bad checksum - calculated (0x%x), stored (0x%x)'
+ % (crc, packet_crc))
+ if safe_hasattr(builtins, 'memoryview'):
+ body = memoryview(packet[-1])
+ else:
+ body = packet[-1]
+ # Discard CRC-32
+ body = body[:-4]
+ # One packet could have both file and status data; the Python API
+ # presents these separately (perhaps it shouldn't?)
+ if flags & FLAG_TIMESTAMP:
+ seconds = struct.unpack(FMT_32, self._to_bytes(body, pos, 4))[0]
+ nanoseconds, consumed = self._parse_varint(body, pos+4)
+ pos = pos + 4 + consumed
+ timestamp = EPOCH + datetime.timedelta(
+ seconds=seconds, microseconds=nanoseconds/1000)
+ else:
+ timestamp = None
+ if flags & FLAG_TEST_ID:
+ test_id, pos = self._read_utf8(body, pos)
+ else:
+ test_id = None
+ if flags & FLAG_TAGS:
+ tag_count, consumed = self._parse_varint(body, pos)
+ pos += consumed
+ test_tags = set()
+ for _ in range(tag_count):
+ tag, pos = self._read_utf8(body, pos)
+ test_tags.add(tag)
+ else:
+ test_tags = None
+ if flags & FLAG_MIME_TYPE:
+ mime_type, pos = self._read_utf8(body, pos)
+ else:
+ mime_type = None
+ if flags & FLAG_FILE_CONTENT:
+ file_name, pos = self._read_utf8(body, pos)
+ content_length, consumed = self._parse_varint(body, pos)
+ pos += consumed
+ file_bytes = self._to_bytes(body, pos, content_length)
+ if len(file_bytes) != content_length:
+ raise ParseError('File content extends past end of packet: '
+ 'claimed %d bytes, %d available' % (
+ content_length, len(file_bytes)))
+ pos += content_length
+ else:
+ file_name = None
+ file_bytes = None
+ if flags & FLAG_ROUTE_CODE:
+ route_code, pos = self._read_utf8(body, pos)
+ else:
+ route_code = None
+ runnable = bool(flags & FLAG_RUNNABLE)
+ eof = bool(flags & FLAG_EOF)
+ test_status = self.status_lookup[flags & 0x0007]
+ result.status(test_id=test_id, test_status=test_status,
+ test_tags=test_tags, runnable=runnable, mime_type=mime_type,
+ eof=eof, file_name=file_name, file_bytes=file_bytes,
+ route_code=route_code, timestamp=timestamp)
+ __call__ = run
+
+ def _read_utf8(self, buf, pos):
+ length, consumed = self._parse_varint(buf, pos)
+ pos += consumed
+ utf8_bytes = buf[pos:pos+length]
+ if length != len(utf8_bytes):
+ raise ParseError(
+ 'UTF8 string at offset %d extends past end of packet: '
+ 'claimed %d bytes, %d available' % (pos - 2, length,
+ len(utf8_bytes)))
+ if has_nul(utf8_bytes):
+ raise ParseError('UTF8 string at offset %d contains NUL byte' % (
+ pos-2,))
+ try:
+ utf8, decoded_bytes = utf_8_decode(utf8_bytes)
+ if decoded_bytes != length:
+ raise ParseError("Invalid (partially decodable) string at "
+ "offset %d, %d undecoded bytes" % (
+ pos-2, length - decoded_bytes))
+ return utf8, length+pos
+ except UnicodeDecodeError:
+ raise ParseError('UTF8 string at offset %d is not UTF8' % (pos-2,))
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/PKG-INFO b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/PKG-INFO
new file mode 100644
index 00000000000..de79389b594
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/PKG-INFO
@@ -0,0 +1,483 @@
+Metadata-Version: 1.0
+Name: python-subunit
+Version: 0.0.16
+Summary: Python implementation of subunit test streaming protocol
+Home-page: http://launchpad.net/subunit
+Author: Robert Collins
+Author-email: subunit-dev@lists.launchpad.net
+License: UNKNOWN
+Description:
+ subunit: A streaming protocol for test results
+ Copyright (C) 2005-2013 Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+ See the COPYING file for full details on the licensing of Subunit.
+
+ subunit reuses iso8601 by Michael Twomey, distributed under an MIT style
+ licence - see python/iso8601/LICENSE for details.
+
+ Subunit
+ -------
+
+ Subunit is a streaming protocol for test results.
+
+ There are two major revisions of the protocol. Version 1 was trivially human
+ readable but had significant defects as far as highly parallel testing was
+ concerned - it had no room for doing discovery and execution in parallel,
+ required substantial buffering when multiplexing and was fragile - a corrupt
+ byte could cause an entire stream to be misparsed. Version 1.1 added
+ encapsulation of binary streams which mitigated some of the issues but the
+ core remained.
+
+ Version 2 shares many of the good characteristics of Version 1 - it can be
+ embedded into a regular text stream (e.g. from a build system) and it still
+ models xUnit style test execution. It also fixes many of the issues with
+ Version 1 - Version 2 can be multiplexed without excessive buffering (in
+ time or space), it has a well defined recovery mechanism for dealing with
+ corrupted streams (e.g. where two processes write to the same stream
+ concurrently, or where the stream generator suffers a bug).
+
+ More details on both protocol version s can be found in the 'Protocol' section
+ of this document.
+
+ Subunit comes with command line filters to process a subunit stream and
+ language bindings for python, C, C++ and shell. Bindings are easy to write
+ for other languages.
+
+ A number of useful things can be done easily with subunit:
+ * Test aggregation: Tests run separately can be combined and then
+ reported/displayed together. For instance, tests from different languages
+ can be shown as a seamless whole, and tests running on multiple machines
+ can be aggregated into a single stream through a multiplexer.
+ * Test archiving: A test run may be recorded and replayed later.
+ * Test isolation: Tests that may crash or otherwise interact badly with each
+ other can be run seperately and then aggregated, rather than interfering
+ with each other or requiring an adhoc test->runner reporting protocol.
+ * Grid testing: subunit can act as the necessary serialisation and
+ deserialiation to get test runs on distributed machines to be reported in
+ real time.
+
+ Subunit supplies the following filters:
+ * tap2subunit - convert perl's TestAnythingProtocol to subunit.
+ * subunit2csv - convert a subunit stream to csv.
+ * subunit2pyunit - convert a subunit stream to pyunit test results.
+ * subunit2gtk - show a subunit stream in GTK.
+ * subunit2junitxml - convert a subunit stream to JUnit's XML format.
+ * subunit-diff - compare two subunit streams.
+ * subunit-filter - filter out tests from a subunit stream.
+ * subunit-ls - list info about tests present in a subunit stream.
+ * subunit-stats - generate a summary of a subunit stream.
+ * subunit-tags - add or remove tags from a stream.
+
+ Integration with other tools
+ ----------------------------
+
+ Subunit's language bindings act as integration with various test runners like
+ 'check', 'cppunit', Python's 'unittest'. Beyond that a small amount of glue
+ (typically a few lines) will allow Subunit to be used in more sophisticated
+ ways.
+
+ Python
+ ======
+
+ Subunit has excellent Python support: most of the filters and tools are written
+ in python and there are facilities for using Subunit to increase test isolation
+ seamlessly within a test suite.
+
+ The most common way is to run an existing python test suite and have it output
+ subunit via the ``subunit.run`` module::
+
+ $ python -m subunit.run mypackage.tests.test_suite
+
+ For more information on the Python support Subunit offers , please see
+ ``pydoc subunit``, or the source in ``python/subunit/``
+
+ C
+ =
+
+ Subunit has C bindings to emit the protocol. The 'check' C unit testing project
+ has included subunit support in their project for some years now. See
+ 'c/README' for more details.
+
+ C++
+ ===
+
+ The C library is includable and usable directly from C++. A TestListener for
+ CPPUnit is included in the Subunit distribution. See 'c++/README' for details.
+
+ shell
+ =====
+
+ There are two sets of shell tools. There are filters, which accept a subunit
+ stream on stdin and output processed data (or a transformed stream) on stdout.
+
+ Then there are unittest facilities similar to those for C : shell bindings
+ consisting of simple functions to output protocol elements, and a patch for
+ adding subunit output to the 'ShUnit' shell test runner. See 'shell/README' for
+ details.
+
+ Filter recipes
+ --------------
+
+ To ignore some failing tests whose root cause is already known::
+
+ subunit-filter --without 'AttributeError.*flavor'
+
+
+ The xUnit test model
+ --------------------
+
+ Subunit implements a slightly modified xUnit test model. The stock standard
+ model is that there are tests, which have an id(), can be run, and when run
+ start, emit an outcome (like success or failure) and then finish.
+
+ Subunit extends this with the idea of test enumeration (find out about tests
+ a runner has without running them), tags (allow users to describe tests in
+ ways the test framework doesn't apply any semantic value to), file attachments
+ (allow arbitrary data to make analysing a failure easy) and timestamps.
+
+ The protocol
+ ------------
+
+ Version 2, or v2 is new and still under development, but is intended to
+ supercede version 1 in the very near future. Subunit's bundled tools accept
+ only version 2 and only emit version 2, but the new filters subunit-1to2 and
+ subunit-2to1 can be used to interoperate with older third party libraries.
+
+ Version 2
+ =========
+
+ Version 2 is a binary protocol consisting of independent packets that can be
+ embedded in the output from tools like make - as long as each packet has no
+ other bytes mixed in with it (which 'make -j N>1' has a tendency of doing).
+ Version 2 is currently in draft form, and early adopters should be willing
+ to either discard stored results (if protocol changes are made), or bulk
+ convert them back to v1 and then to a newer edition of v2.
+
+ The protocol synchronises at the start of the stream, after a packet, or
+ after any 0x0A byte. That is, a subunit v2 packet starts after a newline or
+ directly after the end of the prior packet.
+
+ Subunit is intended to be transported over a reliable streaming protocol such
+ as TCP. As such it does not concern itself with out of order delivery of
+ packets. However, because of the possibility of corruption due to either
+ bugs in the sender, or due to mixed up data from concurrent writes to the same
+ fd when being embedded, subunit strives to recover reasonably gracefully from
+ damaged data.
+
+ A key design goal for Subunit version 2 is to allow processing and multiplexing
+ without forcing buffering for semantic correctness, as buffering tends to hide
+ hung or otherwise misbehaving tests. That said, limited time based buffering
+ for network efficiency is a good idea - this is ultimately implementator
+ choice. Line buffering is also discouraged for subunit streams, as dropping
+ into a debugger or other tool may require interactive traffic even if line
+ buffering would not otherwise be a problem.
+
+ In version two there are two conceptual events - a test status event and a file
+ attachment event. Events may have timestamps, and the path of multiplexers that
+ an event is routed through is recorded to permit sending actions back to the
+ source (such as new tests to run or stdin for driving debuggers and other
+ interactive input). Test status events are used to enumerate tests, to report
+ tests and test helpers as they run. Tests may have tags, used to allow
+ tunnelling extra meanings through subunit without requiring parsing of
+ arbitrary file attachments. Things that are not standalone tests get marked
+ as such by setting the 'Runnable' flag to false. (For instance, individual
+ assertions in TAP are not runnable tests, only the top level TAP test script
+ is runnable).
+
+ File attachments are used to provide rich detail about the nature of a failure.
+ File attachments can also be used to encapsulate stdout and stderr both during
+ and outside tests.
+
+ Most numbers are stored in network byte order - Most Significant Byte first
+ encoded using a variation of http://www.dlugosz.com/ZIP2/VLI.html. The first
+ byte's top 2 high order bits encode the total number of octets in the number.
+ This encoding can encode values from 0 to 2**30-1, enough to encode a
+ nanosecond. Numbers that are not variable length encoded are still stored in
+ MSB order.
+
+ prefix octets max max
+ +-------+--------+---------+------------+
+ | 00 | 1 | 2**6-1 | 63 |
+ | 01 | 2 | 2**14-1 | 16383 |
+ | 10 | 3 | 2**22-1 | 4194303 |
+ | 11 | 4 | 2**30-1 | 1073741823 |
+ +-------+--------+---------+------------+
+
+ All variable length elements of the packet are stored with a length prefix
+ number allowing them to be skipped over for consumers that don't need to
+ interpret them.
+
+ UTF-8 strings are with no terminating NUL and should not have any embedded NULs
+ (implementations SHOULD validate any such strings that they process and take
+ some remedial action (such as discarding the packet as corrupt).
+
+ In short the structure of a packet is:
+ PACKET := SIGNATURE FLAGS PACKET_LENGTH TIMESTAMP? TESTID? TAGS? MIME?
+ FILECONTENT? ROUTING_CODE? CRC32
+
+ In more detail...
+
+ Packets are identified by a single byte signature - 0xB3, which is never legal
+ in a UTF-8 stream as the first byte of a character. 0xB3 starts with the first
+ bit set and the second not, which is the UTF-8 signature for a continuation
+ byte. 0xB3 was chosen as 0x73 ('s' in ASCII') with the top two bits replaced by
+ the 1 and 0 for a continuation byte.
+
+ If subunit packets are being embedded in a non-UTF-8 text stream, where 0x73 is
+ a legal character, consider either recoding the text to UTF-8, or using
+ subunit's 'file' packets to embed the text stream in subunit, rather than the
+ other way around.
+
+ Following the signature byte comes a 16-bit flags field, which includes a
+ 4-bit version field - if the version is not 0x2 then the packet cannot be
+ read. It is recommended to signal an error at this point (e.g. by emitting
+ a synthetic error packet and returning to the top level loop to look for
+ new packets, or exiting with an error). If recovery is desired, treat the
+ packet signature as an opaque byte and scan for a new synchronisation point.
+ NB: Subunit V1 and V2 packets may legitimately included 0xB3 internally,
+ as they are an 8-bit safe container format, so recovery from this situation
+ may involve an arbitrary number of false positives until an actual packet
+ is encountered : and even then it may still be false, failing after passing
+ the version check due to coincidence.
+
+ Flags are stored in network byte order too.
+ +-------------------------+------------------------+
+ | High byte | Low byte |
+ | 15 14 13 12 11 10 9 8 | 7 6 5 4 3 2 1 0 |
+ | VERSION |feature bits| |
+ +------------+------------+------------------------+
+
+ Valid version values are:
+ 0x2 - version 2
+
+ Feature bits:
+ Bit 11 - mask 0x0800 - Test id present.
+ Bit 10 - mask 0x0400 - Routing code present.
+ Bit 9 - mask 0x0200 - Timestamp present.
+ Bit 8 - mask 0x0100 - Test is 'runnable'.
+ Bit 7 - mask 0x0080 - Tags are present.
+ Bit 6 - mask 0x0040 - File content is present.
+ Bit 5 - mask 0x0020 - File MIME type is present.
+ Bit 4 - mask 0x0010 - EOF marker.
+ Bit 3 - mask 0x0008 - Must be zero in version 2.
+
+ Test status gets three bits:
+ Bit 2 | Bit 1 | Bit 0 - mask 0x0007 - A test status enum lookup:
+ 000 - undefined / no test
+ 001 - Enumeration / existence
+ 002 - In progress
+ 003 - Success
+ 004 - Unexpected Success
+ 005 - Skipped
+ 006 - Failed
+ 007 - Expected failure
+
+ After the flags field is a number field giving the length in bytes for the
+ entire packet including the signature and the checksum. This length must
+ be less than 4MiB - 4194303 bytes. The encoding can obviously record a larger
+ number but one of the goals is to avoid requiring large buffers, or causing
+ large latency in the packet forward/processing pipeline. Larger file
+ attachments can be communicated in multiple packets, and the overhead in such a
+ 4MiB packet is approximately 0.2%.
+
+ The rest of the packet is a series of optional features as specified by the set
+ feature bits in the flags field. When absent they are entirely absent.
+
+ Forwarding and multiplexing of packets can be done without interpreting the
+ remainder of the packet until the routing code and checksum (which are both at
+ the end of the packet). Additionally, routers can often avoid copying or moving
+ the bulk of the packet, as long as the routing code size increase doesn't force
+ the length encoding to take up a new byte (which will only happen to packets
+ less than or equal to 16KiB in length) - large packets are very efficient to
+ route.
+
+ Timestamp when present is a 32 bit unsigned integer for secnods, and a variable
+ length number for nanoseconds, representing UTC time since Unix Epoch in
+ seconds and nanoseconds.
+
+ Test id when present is a UTF-8 string. The test id should uniquely identify
+ runnable tests such that they can be selected individually. For tests and other
+ actions which cannot be individually run (such as test
+ fixtures/layers/subtests) uniqueness is not required (though being human
+ meaningful is highly recommended).
+
+ Tags when present is a length prefixed vector of UTF-8 strings, one per tag.
+ There are no restrictions on tag content (other than the restrictions on UTF-8
+ strings in subunit in general). Tags have no ordering.
+
+ When a MIME type is present, it defines the MIME type for the file across all
+ packets same file (routing code + testid + name uniquely identifies a file,
+ reset when EOF is flagged). If a file never has a MIME type set, it should be
+ treated as application/octet-stream.
+
+ File content when present is a UTF-8 string for the name followed by the length
+ in bytes of the content, and then the content octets.
+
+ If present routing code is a UTF-8 string. The routing code is used to
+ determine which test backend a test was running on when doing data analysis,
+ and to route stdin to the test process if interaction is required.
+
+ Multiplexers SHOULD add a routing code if none is present, and prefix any
+ existing routing code with a routing code ('/' separated) if one is already
+ present. For example, a multiplexer might label each stream it is multiplexing
+ with a simple ordinal ('0', '1' etc), and given an incoming packet with route
+ code '3' from stream '0' would adjust the route code when forwarding the packet
+ to be '0/3'.
+
+ Following the end of the packet is a CRC-32 checksum of the contents of the
+ packet including the signature.
+
+ Example packets
+ ~~~~~~~~~~~~~~~
+
+ Trivial test "foo" enumeration packet, with test id, runnable set,
+ status=enumeration. Spaces below are to visually break up signature / flags /
+ length / testid / crc32
+
+ b3 2901 0c 03666f6f 08555f1b
+
+
+ Version 1 (and 1.1)
+ ===================
+
+ Version 1 (and 1.1) are mostly human readable protocols.
+
+ Sample subunit wire contents
+ ----------------------------
+
+ The following::
+ test: test foo works
+ success: test foo works.
+ test: tar a file.
+ failure: tar a file. [
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+ ]
+ a writeln to stdout
+
+ When run through subunit2pyunit::
+ .F
+ a writeln to stdout
+
+ ========================
+ FAILURE: tar a file.
+ -------------------
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+
+
+ Subunit protocol description
+ ============================
+
+ This description is being ported to an EBNF style. Currently its only partly in
+ that style, but should be fairly clear all the same. When in doubt, refer the
+ source (and ideally help fix up the description!). Generally the protocol is
+ line orientated and consists of either directives and their parameters, or
+ when outside a DETAILS region unexpected lines which are not interpreted by
+ the parser - they should be forwarded unaltered.
+
+ test|testing|test:|testing: test LABEL
+ success|success:|successful|successful: test LABEL
+ success|success:|successful|successful: test LABEL DETAILS
+ failure: test LABEL
+ failure: test LABEL DETAILS
+ error: test LABEL
+ error: test LABEL DETAILS
+ skip[:] test LABEL
+ skip[:] test LABEL DETAILS
+ xfail[:] test LABEL
+ xfail[:] test LABEL DETAILS
+ uxsuccess[:] test LABEL
+ uxsuccess[:] test LABEL DETAILS
+ progress: [+|-]X
+ progress: push
+ progress: pop
+ tags: [-]TAG ...
+ time: YYYY-MM-DD HH:MM:SSZ
+
+ LABEL: UTF8*
+ NAME: UTF8*
+ DETAILS ::= BRACKETED | MULTIPART
+ BRACKETED ::= '[' CR UTF8-lines ']' CR
+ MULTIPART ::= '[ multipart' CR PART* ']' CR
+ PART ::= PART_TYPE CR NAME CR PART_BYTES CR
+ PART_TYPE ::= Content-Type: type/sub-type(;parameter=value,parameter=value)
+ PART_BYTES ::= (DIGITS CR LF BYTE{DIGITS})* '0' CR LF
+
+ unexpected output on stdout -> stdout.
+ exit w/0 or last test completing -> error
+
+ Tags given outside a test are applied to all following tests
+ Tags given after a test: line and before the result line for the same test
+ apply only to that test, and inherit the current global tags.
+ A '-' before a tag is used to remove tags - e.g. to prevent a global tag
+ applying to a single test, or to cancel a global tag.
+
+ The progress directive is used to provide progress information about a stream
+ so that stream consumer can provide completion estimates, progress bars and so
+ on. Stream generators that know how many tests will be present in the stream
+ should output "progress: COUNT". Stream filters that add tests should output
+ "progress: +COUNT", and those that remove tests should output
+ "progress: -COUNT". An absolute count should reset the progress indicators in
+ use - it indicates that two separate streams from different generators have
+ been trivially concatenated together, and there is no knowledge of how many
+ more complete streams are incoming. Smart concatenation could scan each stream
+ for their count and sum them, or alternatively translate absolute counts into
+ relative counts inline. It is recommended that outputters avoid absolute counts
+ unless necessary. The push and pop directives are used to provide local regions
+ for progress reporting. This fits with hierarchically operating test
+ environments - such as those that organise tests into suites - the top-most
+ runner can report on the number of suites, and each suite surround its output
+ with a (push, pop) pair. Interpreters should interpret a pop as also advancing
+ the progress of the restored level by one step. Encountering progress
+ directives between the start and end of a test pair indicates that a previous
+ test was interrupted and did not cleanly terminate: it should be implicitly
+ closed with an error (the same as when a stream ends with no closing test
+ directive for the most recently started test).
+
+ The time directive acts as a clock event - it sets the time for all future
+ events. The value should be a valid ISO8601 time.
+
+ The skip, xfail and uxsuccess outcomes are not supported by all testing
+ environments. In Python the testttools (https://launchpad.net/testtools)
+ library is used to translate these automatically if an older Python version
+ that does not support them is in use. See the testtools documentation for the
+ translation policy.
+
+ skip is used to indicate a test was discovered but not executed. xfail is used
+ to indicate a test that errored in some expected fashion (also know as "TODO"
+ tests in some frameworks). uxsuccess is used to indicate and unexpected success
+ where a test though to be failing actually passes. It is complementary to
+ xfail.
+
+ Hacking on subunit
+ ------------------
+
+ Releases
+ ========
+
+ * Update versions in configure.ac and python/subunit/__init__.py.
+ * Make PyPI and regular tarball releases. Upload the regular one to LP, the
+ PyPI one to PyPI.
+ * Push a tagged commit.
+
+
+Keywords: python test streaming
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Testing
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/SOURCES.txt b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/SOURCES.txt
new file mode 100644
index 00000000000..59a1de8f12c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/SOURCES.txt
@@ -0,0 +1,44 @@
+MANIFEST.in
+NEWS
+README
+setup.py
+filters/subunit-1to2
+filters/subunit-2to1
+filters/subunit-filter
+filters/subunit-ls
+filters/subunit-notify
+filters/subunit-stats
+filters/subunit-tags
+filters/subunit2gtk
+filters/subunit2junitxml
+filters/subunit2pyunit
+filters/tap2subunit
+python/subunit/__init__.py
+python/subunit/chunked.py
+python/subunit/details.py
+python/subunit/filters.py
+python/subunit/iso8601.py
+python/subunit/progress_model.py
+python/subunit/run.py
+python/subunit/test_results.py
+python/subunit/v2.py
+python/subunit/tests/__init__.py
+python/subunit/tests/sample-script.py
+python/subunit/tests/sample-two-script.py
+python/subunit/tests/test_chunked.py
+python/subunit/tests/test_details.py
+python/subunit/tests/test_filters.py
+python/subunit/tests/test_progress_model.py
+python/subunit/tests/test_run.py
+python/subunit/tests/test_subunit_filter.py
+python/subunit/tests/test_subunit_stats.py
+python/subunit/tests/test_subunit_tags.py
+python/subunit/tests/test_tap2subunit.py
+python/subunit/tests/test_test_protocol.py
+python/subunit/tests/test_test_protocol2.py
+python/subunit/tests/test_test_results.py
+python_subunit.egg-info/PKG-INFO
+python_subunit.egg-info/SOURCES.txt
+python_subunit.egg-info/dependency_links.txt
+python_subunit.egg-info/requires.txt
+python_subunit.egg-info/top_level.txt \ No newline at end of file
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/dependency_links.txt b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/dependency_links.txt
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/requires.txt b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/requires.txt
new file mode 100644
index 00000000000..865fcc9f52e
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/requires.txt
@@ -0,0 +1,2 @@
+extras
+testtools>=0.9.34 \ No newline at end of file
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/top_level.txt b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/top_level.txt
new file mode 100644
index 00000000000..d12b7b93e10
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/top_level.txt
@@ -0,0 +1 @@
+subunit
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/setup.cfg b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/setup.cfg
new file mode 100644
index 00000000000..861a9f55426
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/setup.py b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/setup.py
new file mode 100755
index 00000000000..9917977556e
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/python-subunit-0.0.16/setup.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+try:
+ # If the user has setuptools / distribute installed, use it
+ from setuptools import setup
+except ImportError:
+ # Otherwise, fall back to distutils.
+ from distutils.core import setup
+ extra = {}
+else:
+ extra = {
+ 'install_requires': [
+ 'extras',
+ 'testtools>=0.9.34',
+ ]
+ }
+
+
+def _get_version_from_file(filename, start_of_line, split_marker):
+ """Extract version from file, giving last matching value or None"""
+ try:
+ return [x for x in open(filename)
+ if x.startswith(start_of_line)][-1].split(split_marker)[1].strip()
+ except (IOError, IndexError):
+ return None
+
+
+VERSION = (
+ # Assume we are in a distribution, which has PKG-INFO
+ _get_version_from_file('PKG-INFO', 'Version:', ':')
+ # Must be a development checkout, so use the Makefile
+ or _get_version_from_file('Makefile', 'VERSION', '=')
+ or "0.0")
+
+
+setup(
+ name='python-subunit',
+ version=VERSION,
+ description=('Python implementation of subunit test streaming protocol'),
+ long_description=open('README').read(),
+ classifiers=[
+ 'Intended Audience :: Developers',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python',
+ 'Topic :: Software Development :: Testing',
+ ],
+ keywords='python test streaming',
+ author='Robert Collins',
+ author_email='subunit-dev@lists.launchpad.net',
+ url='http://launchpad.net/subunit',
+ packages=['subunit', 'subunit.tests'],
+ package_dir={'subunit': 'python/subunit'},
+ scripts = [
+ 'filters/subunit-1to2',
+ 'filters/subunit-2to1',
+ 'filters/subunit2gtk',
+ 'filters/subunit2junitxml',
+ 'filters/subunit2pyunit',
+ 'filters/subunit-filter',
+ 'filters/subunit-ls',
+ 'filters/subunit-notify',
+ 'filters/subunit-stats',
+ 'filters/subunit-tags',
+ 'filters/tap2subunit',
+ ],
+ **extra
+)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/.bzrignore b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/.bzrignore
new file mode 100644
index 00000000000..336aaca369d
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/.bzrignore
@@ -0,0 +1,5 @@
+TAGS
+tags
+lib/testtools
+MANIFEST
+dist
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/Apache-2.0 b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/Apache-2.0
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/Apache-2.0
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/BSD b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/BSD
new file mode 100644
index 00000000000..0e75db647b3
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/BSD
@@ -0,0 +1,26 @@
+Copyright (c) Robert Collins and Testscenarios contributors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Robert Collins nor the names of Subunit contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ROBERT COLLINS AND SUBUNIT CONTRIBUTORS ``AS IS''
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/COPYING b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/COPYING
new file mode 100644
index 00000000000..ee16c4ecaf9
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/COPYING
@@ -0,0 +1,31 @@
+Testscenarios is licensed under two licenses, the Apache License, Version 2.0
+or the 3-clause BSD License. You may use this project under either of these
+licenses - choose the one that works best for you.
+
+We require contributions to be licensed under both licenses. The primary
+difference between them is that the Apache license takes care of potential
+issues with Patents and other intellectual property concerns that some users
+or contributors may find important.
+
+Generally every source file in Testscenarios needs a license grant under both
+these licenses. As the code is shipped as a single unit, a brief form is used:
+----
+Copyright (c) [yyyy][,yyyy]* [name or 'Testscenarios Contributors']
+
+Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+license at the users choice. A copy of both licenses are available in the
+project source as Apache-2.0 and BSD. You may not use this file except in
+compliance with one of these two licences.
+
+Unless required by applicable law or agreed to in writing, software
+distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+license you chose for the specific language governing permissions and
+limitations under that license.
+----
+
+Code that has been incorporated into Testscenarios from other projects will
+naturally be under its own license, and will retain that license.
+
+A known list of such code is maintained here:
+* No entries.
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/GOALS b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/GOALS
new file mode 100644
index 00000000000..68be00129b2
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/GOALS
@@ -0,0 +1,25 @@
+
+testscenarios goals
+===================
+
+ * nice, declarative interface for multiplying tests by scenarios.
+
+ * plays nice with testresources - when a scenario uses a resource, the
+ resource ordering logic should be able to group them together.
+
+ * (at user discretion) plays nice with $random test discovery
+
+ * arbitrary post-load multiplication.
+
+ * cross-productable scenarios (for X and for Y)
+
+ * extenable scenarios (for X using Y)
+
+ * scenarios and the tests that use them are loosely coupled
+
+ * tests that use scenarios should be easy to debug
+
+ * fast
+
+ * usable in trial, bzr, Zope testrunner, nose and the default unittest
+ TestRunner
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/HACKING b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/HACKING
new file mode 100644
index 00000000000..0c68ee7da90
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/HACKING
@@ -0,0 +1,38 @@
+Contributing to testscenarios
+=============================
+
+Code access
++++++++++++
+
+Branch from the trunk (all patches should be for trunk unless there are
+exceptional circumstances)::
+
+ bzr branch lp:testscenarios path-to-new-local-branch
+
+Publish your branches whereever you like, I encourage launchpad hosting though,
+as it can notify me of new testscenarios branches::
+
+ bzr push lp:~YOURUSERNAME/testscearios/YOURBRANCHNAME
+
+Copyright
++++++++++
+
+Testscenarios is Copyright (C) 2009 Robert Collins. I'd like to be able to
+offer it up for stdlib inclusion once it has proved itself, so am asking for
+copyright assignment to me - or for your contributions to be under either the
+BSD or Apache-2.0 licences that Testscenarios are with (which permit inclusion
+in Python).
+
+Coding standards
+++++++++++++++++
+
+PEP-8 coding style please, though I'm not nitpicky. Make sure that 'make check'
+passes before sending in a patch.
+
+Code arrangement
+++++++++++++++++
+
+The ``testscenarios`` module should simply import classes and functions from
+more specific modules, rather than becoming large and bloated itself. For
+instance, TestWithScenarios lives in testscenarios.testcase, and is imported in
+the testscenarios __init__.py.
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/MANIFEST.in b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/MANIFEST.in
new file mode 100644
index 00000000000..0edefa19588
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/MANIFEST.in
@@ -0,0 +1,10 @@
+include .bzrignore
+include Apache-2.0
+include BSD
+include COPYING
+include GOALS
+include HACKING
+include MANIFEST.in
+include Makefile
+include NEWS
+include doc/*.py
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/Makefile b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/Makefile
new file mode 100644
index 00000000000..c38edf6bfe7
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/Makefile
@@ -0,0 +1,19 @@
+PYTHONPATH:=$(shell pwd)/lib:${PYTHONPATH}
+PYTHON ?= python
+
+all: check
+
+check:
+ PYTHONPATH=$(PYTHONPATH) $(PYTHON) -m testtools.run \
+ testscenarios.test_suite
+
+clean:
+ find . -name '*.pyc' -print0 | xargs -0 rm -f
+
+TAGS: lib/testscenarios/*.py lib/testscenarios/tests/*.py
+ ctags -e -R lib/testscenarios/
+
+tags: lib/testscenarios/*.py lib/testscenarios/tests/*.py
+ ctags -R lib/testscenarios/
+
+.PHONY: all check clean
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/NEWS b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/NEWS
new file mode 100644
index 00000000000..fc3a10c469a
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/NEWS
@@ -0,0 +1,56 @@
+---------------------------
+testscenarios release notes
+---------------------------
+
+
+IN DEVELOPMENT
+~~~~~~~~~~~~~~
+
+0.4
+~~~
+
+IMPROVEMENTS
+------------
+
+* Python 3.2 support added. (Robert Collins)
+
+0.3
+~~~
+
+CHANGES
+-------
+
+* New function ``per_module_scenarios`` for tests that should be applied across
+ multiple modules providing the same interface, some of which may not be
+ available at run time. (Martin Pool)
+
+* ``TestWithScenarios`` is now backed by a mixin - WithScenarios - which can be
+ mixed into different unittest implementations more cleanly (e.g. unittest2).
+ (James Polley, Robert Collins)
+
+0.2
+~~~
+
+CHANGES
+-------
+
+* Adjust the cloned tests ``shortDescription`` if one is present. (Ben Finney)
+
+* Provide a load_tests implementation for easy use, and multiply_scenarios to
+ create the cross product of scenarios. (Martin Pool)
+
+0.1
+~~~
+
+CHANGES
+-------
+
+* Created project. The primary interfaces are
+ ``testscenarios.TestWithScenarios`` and
+ ``testscenarios.generate_scenarios``. Documentation is primarily in README.
+ (Robert Collins)
+
+* Make the README documentation doctest compatible, to be sure it works.
+ Also various presentation and language touchups. (Martin Pool)
+ (Adjusted to use doctest directly, and to not print the demo runners
+ output to stderror during make check - Robert Collins)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/PKG-INFO b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/PKG-INFO
new file mode 100644
index 00000000000..f3ab96a5653
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/PKG-INFO
@@ -0,0 +1,335 @@
+Metadata-Version: 1.1
+Name: testscenarios
+Version: 0.4
+Summary: Testscenarios, a pyunit extension for dependency injection
+Home-page: https://launchpad.net/testscenarios
+Author: Robert Collins
+Author-email: robertc@robertcollins.net
+License: UNKNOWN
+Description: *****************************************************************
+ testscenarios: extensions to python unittest to support scenarios
+ *****************************************************************
+
+ Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+
+ testscenarios provides clean dependency injection for python unittest style
+ tests. This can be used for interface testing (testing many implementations via
+ a single test suite) or for classic dependency injection (provide tests with
+ dependencies externally to the test code itself, allowing easy testing in
+ different situations).
+
+ Dependencies
+ ============
+
+ * Python 2.4+
+ * testtools <https://launchpad.net/testtools>
+
+
+ Why TestScenarios
+ =================
+
+ Standard Python unittest.py provides on obvious method for running a single
+ test_foo method with two (or more) scenarios: by creating a mix-in that
+ provides the functions, objects or settings that make up the scenario. This is
+ however limited and unsatisfying. Firstly, when two projects are cooperating
+ on a test suite (for instance, a plugin to a larger project may want to run
+ the standard tests for a given interface on its implementation), then it is
+ easy for them to get out of sync with each other: when the list of TestCase
+ classes to mix-in with changes, the plugin will either fail to run some tests
+ or error trying to run deleted tests. Secondly, its not as easy to work with
+ runtime-created-subclasses (a way of dealing with the aforementioned skew)
+ because they require more indirection to locate the source of the test, and will
+ often be ignored by e.g. pyflakes pylint etc.
+
+ It is the intent of testscenarios to make dynamically running a single test
+ in multiple scenarios clear, easy to debug and work with even when the list
+ of scenarios is dynamically generated.
+
+
+ Defining Scenarios
+ ==================
+
+ A **scenario** is a tuple of a string name for the scenario, and a dict of
+ parameters describing the scenario. The name is appended to the test name, and
+ the parameters are made available to the test instance when it's run.
+
+ Scenarios are presented in **scenario lists** which are typically Python lists
+ but may be any iterable.
+
+
+ Getting Scenarios applied
+ =========================
+
+ At its heart the concept is simple. For a given test object with a list of
+ scenarios we prepare a new test object for each scenario. This involves:
+
+ * Clone the test to a new test with a new id uniquely distinguishing it.
+ * Apply the scenario to the test by setting each key, value in the scenario
+ as attributes on the test object.
+
+ There are some complicating factors around making this happen seamlessly. These
+ factors are in two areas:
+
+ * Choosing what scenarios to use. (See Setting Scenarios For A Test).
+ * Getting the multiplication to happen.
+
+ Subclasssing
+ ++++++++++++
+
+ If you can subclass TestWithScenarios, then the ``run()`` method in
+ TestWithScenarios will take care of test multiplication. It will at test
+ execution act as a generator causing multiple tests to execute. For this to
+ work reliably TestWithScenarios must be first in the MRO and you cannot
+ override run() or __call__. This is the most robust method, in the sense
+ that any test runner or test loader that obeys the python unittest protocol
+ will run all your scenarios.
+
+ Manual generation
+ +++++++++++++++++
+
+ If you cannot subclass TestWithScenarios (e.g. because you are using
+ TwistedTestCase, or TestCaseWithResources, or any one of a number of other
+ useful test base classes, or need to override run() or __call__ yourself) then
+ you can cause scenario application to happen later by calling
+ ``testscenarios.generate_scenarios()``. For instance::
+
+ >>> import unittest
+ >>> try:
+ ... from StringIO import StringIO
+ ... except ImportError:
+ ... from io import StringIO
+ >>> from testscenarios.scenarios import generate_scenarios
+
+ This can work with loaders and runners from the standard library, or possibly other
+ implementations::
+
+ >>> loader = unittest.TestLoader()
+ >>> test_suite = unittest.TestSuite()
+ >>> runner = unittest.TextTestRunner(stream=StringIO())
+
+ >>> mytests = loader.loadTestsFromNames(['doc.test_sample'])
+ >>> test_suite.addTests(generate_scenarios(mytests))
+ >>> runner.run(test_suite)
+ <unittest...TextTestResult run=1 errors=0 failures=0>
+
+ Testloaders
+ +++++++++++
+
+ Some test loaders support hooks like ``load_tests`` and ``test_suite``.
+ Ensuring your tests have had scenario application done through these hooks can
+ be a good idea - it means that external test runners (which support these hooks
+ like ``nose``, ``trial``, ``tribunal``) will still run your scenarios. (Of
+ course, if you are using the subclassing approach this is already a surety).
+ With ``load_tests``::
+
+ >>> def load_tests(standard_tests, module, loader):
+ ... result = loader.suiteClass()
+ ... result.addTests(generate_scenarios(standard_tests))
+ ... return result
+
+ as a convenience, this is available in ``load_tests_apply_scenarios``, so a
+ module using scenario tests need only say ::
+
+ >>> from testscenarios import load_tests_apply_scenarios as load_tests
+
+ Python 2.7 and greater support a different calling convention for `load_tests``
+ <https://bugs.launchpad.net/bzr/+bug/607412>. `load_tests_apply_scenarios`
+ copes with both.
+
+ With ``test_suite``::
+
+ >>> def test_suite():
+ ... loader = TestLoader()
+ ... tests = loader.loadTestsFromName(__name__)
+ ... result = loader.suiteClass()
+ ... result.addTests(generate_scenarios(tests))
+ ... return result
+
+
+ Setting Scenarios for a test
+ ============================
+
+ A sample test using scenarios can be found in the doc/ folder.
+
+ See `pydoc testscenarios` for details.
+
+ On the TestCase
+ +++++++++++++++
+
+ You can set a scenarios attribute on the test case::
+
+ >>> class MyTest(unittest.TestCase):
+ ...
+ ... scenarios = [
+ ... ('scenario1', dict(param=1)),
+ ... ('scenario2', dict(param=2)),]
+
+ This provides the main interface by which scenarios are found for a given test.
+ Subclasses will inherit the scenarios (unless they override the attribute).
+
+ After loading
+ +++++++++++++
+
+ Test scenarios can also be generated arbitrarily later, as long as the test has
+ not yet run. Simply replace (or alter, but be aware that many tests may share a
+ single scenarios attribute) the scenarios attribute. For instance in this
+ example some third party tests are extended to run with a custom scenario. ::
+
+ >>> import testtools
+ >>> class TestTransport:
+ ... """Hypothetical test case for bzrlib transport tests"""
+ ... pass
+ ...
+ >>> stock_library_tests = unittest.TestLoader().loadTestsFromNames(
+ ... ['doc.test_sample'])
+ ...
+ >>> for test in testtools.iterate_tests(stock_library_tests):
+ ... if isinstance(test, TestTransport):
+ ... test.scenarios = test.scenarios + [my_vfs_scenario]
+ ...
+ >>> suite = unittest.TestSuite()
+ >>> suite.addTests(generate_scenarios(stock_library_tests))
+
+ Generated tests don't have a ``scenarios`` list, because they don't normally
+ require any more expansion. However, you can add a ``scenarios`` list back on
+ to them, and then run them through ``generate_scenarios`` again to generate the
+ cross product of tests. ::
+
+ >>> class CrossProductDemo(unittest.TestCase):
+ ... scenarios = [('scenario_0_0', {}),
+ ... ('scenario_0_1', {})]
+ ... def test_foo(self):
+ ... return
+ ...
+ >>> suite = unittest.TestSuite()
+ >>> suite.addTests(generate_scenarios(CrossProductDemo("test_foo")))
+ >>> for test in testtools.iterate_tests(suite):
+ ... test.scenarios = [
+ ... ('scenario_1_0', {}),
+ ... ('scenario_1_1', {})]
+ ...
+ >>> suite2 = unittest.TestSuite()
+ >>> suite2.addTests(generate_scenarios(suite))
+ >>> print(suite2.countTestCases())
+ 4
+
+ Dynamic Scenarios
+ +++++++++++++++++
+
+ A common use case is to have the list of scenarios be dynamic based on plugins
+ and available libraries. An easy way to do this is to provide a global scope
+ scenarios somewhere relevant to the tests that will use it, and then that can
+ be customised, or dynamically populate your scenarios from a registry etc.
+ For instance::
+
+ >>> hash_scenarios = []
+ >>> try:
+ ... from hashlib import md5
+ ... except ImportError:
+ ... pass
+ ... else:
+ ... hash_scenarios.append(("md5", dict(hash=md5)))
+ >>> try:
+ ... from hashlib import sha1
+ ... except ImportError:
+ ... pass
+ ... else:
+ ... hash_scenarios.append(("sha1", dict(hash=sha1)))
+ ...
+ >>> class TestHashContract(unittest.TestCase):
+ ...
+ ... scenarios = hash_scenarios
+ ...
+ >>> class TestHashPerformance(unittest.TestCase):
+ ...
+ ... scenarios = hash_scenarios
+
+
+ Forcing Scenarios
+ +++++++++++++++++
+
+ The ``apply_scenarios`` function can be useful to apply scenarios to a test
+ that has none applied. ``apply_scenarios`` is the workhorse for
+ ``generate_scenarios``, except it takes the scenarios passed in rather than
+ introspecting the test object to determine the scenarios. The
+ ``apply_scenarios`` function does not reset the test scenarios attribute,
+ allowing it to be used to layer scenarios without affecting existing scenario
+ selection.
+
+
+ Generating Scenarios
+ ====================
+
+ Some functions (currently one :-) are available to ease generation of scenario
+ lists for common situations.
+
+ Testing Per Implementation Module
+ +++++++++++++++++++++++++++++++++
+
+ It is reasonably common to have multiple Python modules that provide the same
+ capabilities and interface, and to want apply the same tests to all of them.
+
+ In some cases, not all of the statically defined implementations will be able
+ to be used in a particular testing environment. For example, there may be both
+ a C and a pure-Python implementation of a module. You want to test the C
+ module if it can be loaded, but also to have the tests pass if the C module has
+ not been compiled.
+
+ The ``per_module_scenarios`` function generates a scenario for each named
+ module. The module object of the imported module is set in the supplied
+ attribute name of the resulting scenario.
+ Modules which raise ``ImportError`` during import will have the
+ ``sys.exc_info()`` of the exception set instead of the module object. Tests
+ can check for the attribute being a tuple to decide what to do (e.g. to skip).
+
+ Note that for the test to be valid, all access to the module under test must go
+ through the relevant attribute of the test object. If one of the
+ implementations is also directly imported by the test module or any other,
+ testscenarios will not magically stop it being used.
+
+
+ Advice on Writing Scenarios
+ ===========================
+
+ If a parameterised test is because of a bug run without being parameterized,
+ it should fail rather than running with defaults, because this can hide bugs.
+
+
+ Producing Scenarios
+ ===================
+
+ The `multiply_scenarios` function produces the cross-product of the scenarios
+ passed in::
+
+ >>> from testscenarios.scenarios import multiply_scenarios
+ >>>
+ >>> scenarios = multiply_scenarios(
+ ... [('scenario1', dict(param1=1)), ('scenario2', dict(param1=2))],
+ ... [('scenario2', dict(param2=1))],
+ ... )
+ >>> scenarios == [('scenario1,scenario2', {'param2': 1, 'param1': 1}),
+ ... ('scenario2,scenario2', {'param2': 1, 'param1': 2})]
+ True
+
+Platform: UNKNOWN
+Classifier: Development Status :: 6 - Mature
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Quality Assurance
+Classifier: Topic :: Software Development :: Testing
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/README b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/README
new file mode 100644
index 00000000000..e7e7eb717e0
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/README
@@ -0,0 +1,316 @@
+*****************************************************************
+testscenarios: extensions to python unittest to support scenarios
+*****************************************************************
+
+ Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+
+testscenarios provides clean dependency injection for python unittest style
+tests. This can be used for interface testing (testing many implementations via
+a single test suite) or for classic dependency injection (provide tests with
+dependencies externally to the test code itself, allowing easy testing in
+different situations).
+
+Dependencies
+============
+
+* Python 2.4+
+* testtools <https://launchpad.net/testtools>
+
+
+Why TestScenarios
+=================
+
+Standard Python unittest.py provides on obvious method for running a single
+test_foo method with two (or more) scenarios: by creating a mix-in that
+provides the functions, objects or settings that make up the scenario. This is
+however limited and unsatisfying. Firstly, when two projects are cooperating
+on a test suite (for instance, a plugin to a larger project may want to run
+the standard tests for a given interface on its implementation), then it is
+easy for them to get out of sync with each other: when the list of TestCase
+classes to mix-in with changes, the plugin will either fail to run some tests
+or error trying to run deleted tests. Secondly, its not as easy to work with
+runtime-created-subclasses (a way of dealing with the aforementioned skew)
+because they require more indirection to locate the source of the test, and will
+often be ignored by e.g. pyflakes pylint etc.
+
+It is the intent of testscenarios to make dynamically running a single test
+in multiple scenarios clear, easy to debug and work with even when the list
+of scenarios is dynamically generated.
+
+
+Defining Scenarios
+==================
+
+A **scenario** is a tuple of a string name for the scenario, and a dict of
+parameters describing the scenario. The name is appended to the test name, and
+the parameters are made available to the test instance when it's run.
+
+Scenarios are presented in **scenario lists** which are typically Python lists
+but may be any iterable.
+
+
+Getting Scenarios applied
+=========================
+
+At its heart the concept is simple. For a given test object with a list of
+scenarios we prepare a new test object for each scenario. This involves:
+
+* Clone the test to a new test with a new id uniquely distinguishing it.
+* Apply the scenario to the test by setting each key, value in the scenario
+ as attributes on the test object.
+
+There are some complicating factors around making this happen seamlessly. These
+factors are in two areas:
+
+* Choosing what scenarios to use. (See Setting Scenarios For A Test).
+* Getting the multiplication to happen.
+
+Subclasssing
+++++++++++++
+
+If you can subclass TestWithScenarios, then the ``run()`` method in
+TestWithScenarios will take care of test multiplication. It will at test
+execution act as a generator causing multiple tests to execute. For this to
+work reliably TestWithScenarios must be first in the MRO and you cannot
+override run() or __call__. This is the most robust method, in the sense
+that any test runner or test loader that obeys the python unittest protocol
+will run all your scenarios.
+
+Manual generation
++++++++++++++++++
+
+If you cannot subclass TestWithScenarios (e.g. because you are using
+TwistedTestCase, or TestCaseWithResources, or any one of a number of other
+useful test base classes, or need to override run() or __call__ yourself) then
+you can cause scenario application to happen later by calling
+``testscenarios.generate_scenarios()``. For instance::
+
+ >>> import unittest
+ >>> try:
+ ... from StringIO import StringIO
+ ... except ImportError:
+ ... from io import StringIO
+ >>> from testscenarios.scenarios import generate_scenarios
+
+This can work with loaders and runners from the standard library, or possibly other
+implementations::
+
+ >>> loader = unittest.TestLoader()
+ >>> test_suite = unittest.TestSuite()
+ >>> runner = unittest.TextTestRunner(stream=StringIO())
+
+ >>> mytests = loader.loadTestsFromNames(['doc.test_sample'])
+ >>> test_suite.addTests(generate_scenarios(mytests))
+ >>> runner.run(test_suite)
+ <unittest...TextTestResult run=1 errors=0 failures=0>
+
+Testloaders
++++++++++++
+
+Some test loaders support hooks like ``load_tests`` and ``test_suite``.
+Ensuring your tests have had scenario application done through these hooks can
+be a good idea - it means that external test runners (which support these hooks
+like ``nose``, ``trial``, ``tribunal``) will still run your scenarios. (Of
+course, if you are using the subclassing approach this is already a surety).
+With ``load_tests``::
+
+ >>> def load_tests(standard_tests, module, loader):
+ ... result = loader.suiteClass()
+ ... result.addTests(generate_scenarios(standard_tests))
+ ... return result
+
+as a convenience, this is available in ``load_tests_apply_scenarios``, so a
+module using scenario tests need only say ::
+
+ >>> from testscenarios import load_tests_apply_scenarios as load_tests
+
+Python 2.7 and greater support a different calling convention for `load_tests``
+<https://bugs.launchpad.net/bzr/+bug/607412>. `load_tests_apply_scenarios`
+copes with both.
+
+With ``test_suite``::
+
+ >>> def test_suite():
+ ... loader = TestLoader()
+ ... tests = loader.loadTestsFromName(__name__)
+ ... result = loader.suiteClass()
+ ... result.addTests(generate_scenarios(tests))
+ ... return result
+
+
+Setting Scenarios for a test
+============================
+
+A sample test using scenarios can be found in the doc/ folder.
+
+See `pydoc testscenarios` for details.
+
+On the TestCase
++++++++++++++++
+
+You can set a scenarios attribute on the test case::
+
+ >>> class MyTest(unittest.TestCase):
+ ...
+ ... scenarios = [
+ ... ('scenario1', dict(param=1)),
+ ... ('scenario2', dict(param=2)),]
+
+This provides the main interface by which scenarios are found for a given test.
+Subclasses will inherit the scenarios (unless they override the attribute).
+
+After loading
++++++++++++++
+
+Test scenarios can also be generated arbitrarily later, as long as the test has
+not yet run. Simply replace (or alter, but be aware that many tests may share a
+single scenarios attribute) the scenarios attribute. For instance in this
+example some third party tests are extended to run with a custom scenario. ::
+
+ >>> import testtools
+ >>> class TestTransport:
+ ... """Hypothetical test case for bzrlib transport tests"""
+ ... pass
+ ...
+ >>> stock_library_tests = unittest.TestLoader().loadTestsFromNames(
+ ... ['doc.test_sample'])
+ ...
+ >>> for test in testtools.iterate_tests(stock_library_tests):
+ ... if isinstance(test, TestTransport):
+ ... test.scenarios = test.scenarios + [my_vfs_scenario]
+ ...
+ >>> suite = unittest.TestSuite()
+ >>> suite.addTests(generate_scenarios(stock_library_tests))
+
+Generated tests don't have a ``scenarios`` list, because they don't normally
+require any more expansion. However, you can add a ``scenarios`` list back on
+to them, and then run them through ``generate_scenarios`` again to generate the
+cross product of tests. ::
+
+ >>> class CrossProductDemo(unittest.TestCase):
+ ... scenarios = [('scenario_0_0', {}),
+ ... ('scenario_0_1', {})]
+ ... def test_foo(self):
+ ... return
+ ...
+ >>> suite = unittest.TestSuite()
+ >>> suite.addTests(generate_scenarios(CrossProductDemo("test_foo")))
+ >>> for test in testtools.iterate_tests(suite):
+ ... test.scenarios = [
+ ... ('scenario_1_0', {}),
+ ... ('scenario_1_1', {})]
+ ...
+ >>> suite2 = unittest.TestSuite()
+ >>> suite2.addTests(generate_scenarios(suite))
+ >>> print(suite2.countTestCases())
+ 4
+
+Dynamic Scenarios
++++++++++++++++++
+
+A common use case is to have the list of scenarios be dynamic based on plugins
+and available libraries. An easy way to do this is to provide a global scope
+scenarios somewhere relevant to the tests that will use it, and then that can
+be customised, or dynamically populate your scenarios from a registry etc.
+For instance::
+
+ >>> hash_scenarios = []
+ >>> try:
+ ... from hashlib import md5
+ ... except ImportError:
+ ... pass
+ ... else:
+ ... hash_scenarios.append(("md5", dict(hash=md5)))
+ >>> try:
+ ... from hashlib import sha1
+ ... except ImportError:
+ ... pass
+ ... else:
+ ... hash_scenarios.append(("sha1", dict(hash=sha1)))
+ ...
+ >>> class TestHashContract(unittest.TestCase):
+ ...
+ ... scenarios = hash_scenarios
+ ...
+ >>> class TestHashPerformance(unittest.TestCase):
+ ...
+ ... scenarios = hash_scenarios
+
+
+Forcing Scenarios
++++++++++++++++++
+
+The ``apply_scenarios`` function can be useful to apply scenarios to a test
+that has none applied. ``apply_scenarios`` is the workhorse for
+``generate_scenarios``, except it takes the scenarios passed in rather than
+introspecting the test object to determine the scenarios. The
+``apply_scenarios`` function does not reset the test scenarios attribute,
+allowing it to be used to layer scenarios without affecting existing scenario
+selection.
+
+
+Generating Scenarios
+====================
+
+Some functions (currently one :-) are available to ease generation of scenario
+lists for common situations.
+
+Testing Per Implementation Module
++++++++++++++++++++++++++++++++++
+
+It is reasonably common to have multiple Python modules that provide the same
+capabilities and interface, and to want apply the same tests to all of them.
+
+In some cases, not all of the statically defined implementations will be able
+to be used in a particular testing environment. For example, there may be both
+a C and a pure-Python implementation of a module. You want to test the C
+module if it can be loaded, but also to have the tests pass if the C module has
+not been compiled.
+
+The ``per_module_scenarios`` function generates a scenario for each named
+module. The module object of the imported module is set in the supplied
+attribute name of the resulting scenario.
+Modules which raise ``ImportError`` during import will have the
+``sys.exc_info()`` of the exception set instead of the module object. Tests
+can check for the attribute being a tuple to decide what to do (e.g. to skip).
+
+Note that for the test to be valid, all access to the module under test must go
+through the relevant attribute of the test object. If one of the
+implementations is also directly imported by the test module or any other,
+testscenarios will not magically stop it being used.
+
+
+Advice on Writing Scenarios
+===========================
+
+If a parameterised test is because of a bug run without being parameterized,
+it should fail rather than running with defaults, because this can hide bugs.
+
+
+Producing Scenarios
+===================
+
+The `multiply_scenarios` function produces the cross-product of the scenarios
+passed in::
+
+ >>> from testscenarios.scenarios import multiply_scenarios
+ >>>
+ >>> scenarios = multiply_scenarios(
+ ... [('scenario1', dict(param1=1)), ('scenario2', dict(param1=2))],
+ ... [('scenario2', dict(param2=1))],
+ ... )
+ >>> scenarios == [('scenario1,scenario2', {'param2': 1, 'param1': 1}),
+ ... ('scenario2,scenario2', {'param2': 1, 'param1': 2})]
+ True
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/__init__.py
new file mode 100644
index 00000000000..4dbad55dcbb
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/__init__.py
@@ -0,0 +1,16 @@
+# testscenarios: extensions to python unittest to allow declarative
+# dependency injection ('scenarios') by tests.
+#
+# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/example.py b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/example.py
new file mode 100644
index 00000000000..a8d195fade2
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/example.py
@@ -0,0 +1,30 @@
+# testscenarios: extensions to python unittest to allow declarative
+# dependency injection ('scenarios') by tests.
+# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+"""Example TestScenario."""
+
+from testscenarios import TestWithScenarios
+
+
+scenario1 = ('basic', {'attribute': 'value'})
+scenario2 = ('advanced', {'attribute': 'value2'})
+
+
+class SampleWithScenarios(TestWithScenarios):
+
+ scenarios = [scenario1, scenario2]
+
+ def test_demo(self):
+ self.assertIsInstance(self.attribute, str)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/test_sample.py b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/test_sample.py
new file mode 100644
index 00000000000..a0b00a5ef54
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/doc/test_sample.py
@@ -0,0 +1,22 @@
+# testscenarios: extensions to python unittest to allow declarative
+# dependency injection ('scenarios') by tests.
+#
+# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+import unittest
+
+class TestSample(unittest.TestCase):
+
+ def test_so_easy(self):
+ pass
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/PKG-INFO b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/PKG-INFO
new file mode 100644
index 00000000000..f3ab96a5653
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/PKG-INFO
@@ -0,0 +1,335 @@
+Metadata-Version: 1.1
+Name: testscenarios
+Version: 0.4
+Summary: Testscenarios, a pyunit extension for dependency injection
+Home-page: https://launchpad.net/testscenarios
+Author: Robert Collins
+Author-email: robertc@robertcollins.net
+License: UNKNOWN
+Description: *****************************************************************
+ testscenarios: extensions to python unittest to support scenarios
+ *****************************************************************
+
+ Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+
+ testscenarios provides clean dependency injection for python unittest style
+ tests. This can be used for interface testing (testing many implementations via
+ a single test suite) or for classic dependency injection (provide tests with
+ dependencies externally to the test code itself, allowing easy testing in
+ different situations).
+
+ Dependencies
+ ============
+
+ * Python 2.4+
+ * testtools <https://launchpad.net/testtools>
+
+
+ Why TestScenarios
+ =================
+
+ Standard Python unittest.py provides on obvious method for running a single
+ test_foo method with two (or more) scenarios: by creating a mix-in that
+ provides the functions, objects or settings that make up the scenario. This is
+ however limited and unsatisfying. Firstly, when two projects are cooperating
+ on a test suite (for instance, a plugin to a larger project may want to run
+ the standard tests for a given interface on its implementation), then it is
+ easy for them to get out of sync with each other: when the list of TestCase
+ classes to mix-in with changes, the plugin will either fail to run some tests
+ or error trying to run deleted tests. Secondly, its not as easy to work with
+ runtime-created-subclasses (a way of dealing with the aforementioned skew)
+ because they require more indirection to locate the source of the test, and will
+ often be ignored by e.g. pyflakes pylint etc.
+
+ It is the intent of testscenarios to make dynamically running a single test
+ in multiple scenarios clear, easy to debug and work with even when the list
+ of scenarios is dynamically generated.
+
+
+ Defining Scenarios
+ ==================
+
+ A **scenario** is a tuple of a string name for the scenario, and a dict of
+ parameters describing the scenario. The name is appended to the test name, and
+ the parameters are made available to the test instance when it's run.
+
+ Scenarios are presented in **scenario lists** which are typically Python lists
+ but may be any iterable.
+
+
+ Getting Scenarios applied
+ =========================
+
+ At its heart the concept is simple. For a given test object with a list of
+ scenarios we prepare a new test object for each scenario. This involves:
+
+ * Clone the test to a new test with a new id uniquely distinguishing it.
+ * Apply the scenario to the test by setting each key, value in the scenario
+ as attributes on the test object.
+
+ There are some complicating factors around making this happen seamlessly. These
+ factors are in two areas:
+
+ * Choosing what scenarios to use. (See Setting Scenarios For A Test).
+ * Getting the multiplication to happen.
+
+ Subclasssing
+ ++++++++++++
+
+ If you can subclass TestWithScenarios, then the ``run()`` method in
+ TestWithScenarios will take care of test multiplication. It will at test
+ execution act as a generator causing multiple tests to execute. For this to
+ work reliably TestWithScenarios must be first in the MRO and you cannot
+ override run() or __call__. This is the most robust method, in the sense
+ that any test runner or test loader that obeys the python unittest protocol
+ will run all your scenarios.
+
+ Manual generation
+ +++++++++++++++++
+
+ If you cannot subclass TestWithScenarios (e.g. because you are using
+ TwistedTestCase, or TestCaseWithResources, or any one of a number of other
+ useful test base classes, or need to override run() or __call__ yourself) then
+ you can cause scenario application to happen later by calling
+ ``testscenarios.generate_scenarios()``. For instance::
+
+ >>> import unittest
+ >>> try:
+ ... from StringIO import StringIO
+ ... except ImportError:
+ ... from io import StringIO
+ >>> from testscenarios.scenarios import generate_scenarios
+
+ This can work with loaders and runners from the standard library, or possibly other
+ implementations::
+
+ >>> loader = unittest.TestLoader()
+ >>> test_suite = unittest.TestSuite()
+ >>> runner = unittest.TextTestRunner(stream=StringIO())
+
+ >>> mytests = loader.loadTestsFromNames(['doc.test_sample'])
+ >>> test_suite.addTests(generate_scenarios(mytests))
+ >>> runner.run(test_suite)
+ <unittest...TextTestResult run=1 errors=0 failures=0>
+
+ Testloaders
+ +++++++++++
+
+ Some test loaders support hooks like ``load_tests`` and ``test_suite``.
+ Ensuring your tests have had scenario application done through these hooks can
+ be a good idea - it means that external test runners (which support these hooks
+ like ``nose``, ``trial``, ``tribunal``) will still run your scenarios. (Of
+ course, if you are using the subclassing approach this is already a surety).
+ With ``load_tests``::
+
+ >>> def load_tests(standard_tests, module, loader):
+ ... result = loader.suiteClass()
+ ... result.addTests(generate_scenarios(standard_tests))
+ ... return result
+
+ as a convenience, this is available in ``load_tests_apply_scenarios``, so a
+ module using scenario tests need only say ::
+
+ >>> from testscenarios import load_tests_apply_scenarios as load_tests
+
+ Python 2.7 and greater support a different calling convention for `load_tests``
+ <https://bugs.launchpad.net/bzr/+bug/607412>. `load_tests_apply_scenarios`
+ copes with both.
+
+ With ``test_suite``::
+
+ >>> def test_suite():
+ ... loader = TestLoader()
+ ... tests = loader.loadTestsFromName(__name__)
+ ... result = loader.suiteClass()
+ ... result.addTests(generate_scenarios(tests))
+ ... return result
+
+
+ Setting Scenarios for a test
+ ============================
+
+ A sample test using scenarios can be found in the doc/ folder.
+
+ See `pydoc testscenarios` for details.
+
+ On the TestCase
+ +++++++++++++++
+
+ You can set a scenarios attribute on the test case::
+
+ >>> class MyTest(unittest.TestCase):
+ ...
+ ... scenarios = [
+ ... ('scenario1', dict(param=1)),
+ ... ('scenario2', dict(param=2)),]
+
+ This provides the main interface by which scenarios are found for a given test.
+ Subclasses will inherit the scenarios (unless they override the attribute).
+
+ After loading
+ +++++++++++++
+
+ Test scenarios can also be generated arbitrarily later, as long as the test has
+ not yet run. Simply replace (or alter, but be aware that many tests may share a
+ single scenarios attribute) the scenarios attribute. For instance in this
+ example some third party tests are extended to run with a custom scenario. ::
+
+ >>> import testtools
+ >>> class TestTransport:
+ ... """Hypothetical test case for bzrlib transport tests"""
+ ... pass
+ ...
+ >>> stock_library_tests = unittest.TestLoader().loadTestsFromNames(
+ ... ['doc.test_sample'])
+ ...
+ >>> for test in testtools.iterate_tests(stock_library_tests):
+ ... if isinstance(test, TestTransport):
+ ... test.scenarios = test.scenarios + [my_vfs_scenario]
+ ...
+ >>> suite = unittest.TestSuite()
+ >>> suite.addTests(generate_scenarios(stock_library_tests))
+
+ Generated tests don't have a ``scenarios`` list, because they don't normally
+ require any more expansion. However, you can add a ``scenarios`` list back on
+ to them, and then run them through ``generate_scenarios`` again to generate the
+ cross product of tests. ::
+
+ >>> class CrossProductDemo(unittest.TestCase):
+ ... scenarios = [('scenario_0_0', {}),
+ ... ('scenario_0_1', {})]
+ ... def test_foo(self):
+ ... return
+ ...
+ >>> suite = unittest.TestSuite()
+ >>> suite.addTests(generate_scenarios(CrossProductDemo("test_foo")))
+ >>> for test in testtools.iterate_tests(suite):
+ ... test.scenarios = [
+ ... ('scenario_1_0', {}),
+ ... ('scenario_1_1', {})]
+ ...
+ >>> suite2 = unittest.TestSuite()
+ >>> suite2.addTests(generate_scenarios(suite))
+ >>> print(suite2.countTestCases())
+ 4
+
+ Dynamic Scenarios
+ +++++++++++++++++
+
+ A common use case is to have the list of scenarios be dynamic based on plugins
+ and available libraries. An easy way to do this is to provide a global scope
+ scenarios somewhere relevant to the tests that will use it, and then that can
+ be customised, or dynamically populate your scenarios from a registry etc.
+ For instance::
+
+ >>> hash_scenarios = []
+ >>> try:
+ ... from hashlib import md5
+ ... except ImportError:
+ ... pass
+ ... else:
+ ... hash_scenarios.append(("md5", dict(hash=md5)))
+ >>> try:
+ ... from hashlib import sha1
+ ... except ImportError:
+ ... pass
+ ... else:
+ ... hash_scenarios.append(("sha1", dict(hash=sha1)))
+ ...
+ >>> class TestHashContract(unittest.TestCase):
+ ...
+ ... scenarios = hash_scenarios
+ ...
+ >>> class TestHashPerformance(unittest.TestCase):
+ ...
+ ... scenarios = hash_scenarios
+
+
+ Forcing Scenarios
+ +++++++++++++++++
+
+ The ``apply_scenarios`` function can be useful to apply scenarios to a test
+ that has none applied. ``apply_scenarios`` is the workhorse for
+ ``generate_scenarios``, except it takes the scenarios passed in rather than
+ introspecting the test object to determine the scenarios. The
+ ``apply_scenarios`` function does not reset the test scenarios attribute,
+ allowing it to be used to layer scenarios without affecting existing scenario
+ selection.
+
+
+ Generating Scenarios
+ ====================
+
+ Some functions (currently one :-) are available to ease generation of scenario
+ lists for common situations.
+
+ Testing Per Implementation Module
+ +++++++++++++++++++++++++++++++++
+
+ It is reasonably common to have multiple Python modules that provide the same
+ capabilities and interface, and to want apply the same tests to all of them.
+
+ In some cases, not all of the statically defined implementations will be able
+ to be used in a particular testing environment. For example, there may be both
+ a C and a pure-Python implementation of a module. You want to test the C
+ module if it can be loaded, but also to have the tests pass if the C module has
+ not been compiled.
+
+ The ``per_module_scenarios`` function generates a scenario for each named
+ module. The module object of the imported module is set in the supplied
+ attribute name of the resulting scenario.
+ Modules which raise ``ImportError`` during import will have the
+ ``sys.exc_info()`` of the exception set instead of the module object. Tests
+ can check for the attribute being a tuple to decide what to do (e.g. to skip).
+
+ Note that for the test to be valid, all access to the module under test must go
+ through the relevant attribute of the test object. If one of the
+ implementations is also directly imported by the test module or any other,
+ testscenarios will not magically stop it being used.
+
+
+ Advice on Writing Scenarios
+ ===========================
+
+ If a parameterised test is because of a bug run without being parameterized,
+ it should fail rather than running with defaults, because this can hide bugs.
+
+
+ Producing Scenarios
+ ===================
+
+ The `multiply_scenarios` function produces the cross-product of the scenarios
+ passed in::
+
+ >>> from testscenarios.scenarios import multiply_scenarios
+ >>>
+ >>> scenarios = multiply_scenarios(
+ ... [('scenario1', dict(param1=1)), ('scenario2', dict(param1=2))],
+ ... [('scenario2', dict(param2=1))],
+ ... )
+ >>> scenarios == [('scenario1,scenario2', {'param2': 1, 'param1': 1}),
+ ... ('scenario2,scenario2', {'param2': 1, 'param1': 2})]
+ True
+
+Platform: UNKNOWN
+Classifier: Development Status :: 6 - Mature
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Quality Assurance
+Classifier: Topic :: Software Development :: Testing
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/SOURCES.txt b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/SOURCES.txt
new file mode 100644
index 00000000000..32492e1b819
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/SOURCES.txt
@@ -0,0 +1,25 @@
+.bzrignore
+Apache-2.0
+BSD
+COPYING
+GOALS
+HACKING
+MANIFEST.in
+Makefile
+NEWS
+README
+setup.py
+doc/__init__.py
+doc/example.py
+doc/test_sample.py
+lib/testscenarios/__init__.py
+lib/testscenarios/scenarios.py
+lib/testscenarios/testcase.py
+lib/testscenarios.egg-info/PKG-INFO
+lib/testscenarios.egg-info/SOURCES.txt
+lib/testscenarios.egg-info/dependency_links.txt
+lib/testscenarios.egg-info/requires.txt
+lib/testscenarios.egg-info/top_level.txt
+lib/testscenarios/tests/__init__.py
+lib/testscenarios/tests/test_scenarios.py
+lib/testscenarios/tests/test_testcase.py \ No newline at end of file
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/dependency_links.txt b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/dependency_links.txt
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/requires.txt b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/requires.txt
new file mode 100644
index 00000000000..ccdb4f2ad56
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/requires.txt
@@ -0,0 +1 @@
+testtools \ No newline at end of file
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/top_level.txt b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/top_level.txt
new file mode 100644
index 00000000000..b0ec88e9d5a
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/top_level.txt
@@ -0,0 +1 @@
+testscenarios
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/__init__.py
new file mode 100644
index 00000000000..ceacf37ddca
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/__init__.py
@@ -0,0 +1,74 @@
+# testscenarios: extensions to python unittest to allow declarative
+# dependency injection ('scenarios') by tests.
+#
+# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+
+"""Support for running tests with different scenarios declaratively
+
+Testscenarios provides clean dependency injection for python unittest style
+tests. This can be used for interface testing (testing many implementations via
+a single test suite) or for classic dependency injection (provide tests with
+dependencies externally to the test code itself, allowing easy testing in
+different situations).
+
+See the README for a manual, and the docstrings on individual functions and
+methods for details.
+"""
+
+# same format as sys.version_info: "A tuple containing the five components of
+# the version number: major, minor, micro, releaselevel, and serial. All
+# values except releaselevel are integers; the release level is 'alpha',
+# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
+# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
+# releaselevel of 'dev' for unreleased under-development code.
+#
+# If the releaselevel is 'alpha' then the major/minor/micro components are not
+# established at this point, and setup.py will use a version of next-$(revno).
+# If the releaselevel is 'final', then the tarball will be major.minor.micro.
+# Otherwise it is major.minor.micro~$(revno).
+__version__ = (0, 4, 0, 'final', 0)
+
+__all__ = [
+ 'TestWithScenarios',
+ 'WithScenarios',
+ 'apply_scenario',
+ 'apply_scenarios',
+ 'generate_scenarios',
+ 'load_tests_apply_scenarios',
+ 'multiply_scenarios',
+ 'per_module_scenarios',
+ ]
+
+
+import unittest
+
+from testscenarios.scenarios import (
+ apply_scenario,
+ generate_scenarios,
+ load_tests_apply_scenarios,
+ multiply_scenarios,
+ per_module_scenarios,
+ )
+from testscenarios.testcase import TestWithScenarios, WithScenarios
+
+
+def test_suite():
+ import testscenarios.tests
+ return testscenarios.tests.test_suite()
+
+
+def load_tests(standard_tests, module, loader):
+ standard_tests.addTests(loader.loadTestsFromNames(["testscenarios.tests"]))
+ return standard_tests
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/scenarios.py b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/scenarios.py
new file mode 100644
index 00000000000..eeb72ebb8a4
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/scenarios.py
@@ -0,0 +1,167 @@
+# testscenarios: extensions to python unittest to allow declarative
+# dependency injection ('scenarios') by tests.
+#
+# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+# Copyright (c) 2010, 2011 Martin Pool <mbp@sourcefrog.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+__all__ = [
+ 'apply_scenario',
+ 'apply_scenarios',
+ 'generate_scenarios',
+ 'load_tests_apply_scenarios',
+ 'multiply_scenarios',
+ ]
+
+from itertools import (
+ chain,
+ product,
+ )
+import sys
+import unittest
+
+from testtools.testcase import clone_test_with_new_id
+from testtools import iterate_tests
+
+
+def apply_scenario(scenario, test):
+ """Apply scenario to test.
+
+ :param scenario: A tuple (name, parameters) to apply to the test. The test
+ is cloned, its id adjusted to have (name) after it, and the parameters
+ dict is used to update the new test.
+ :param test: The test to apply the scenario to. This test is unaltered.
+ :return: A new test cloned from test, with the scenario applied.
+ """
+ name, parameters = scenario
+ scenario_suffix = '(' + name + ')'
+ newtest = clone_test_with_new_id(test,
+ test.id() + scenario_suffix)
+ test_desc = test.shortDescription()
+ if test_desc is not None:
+ newtest_desc = "%(test_desc)s %(scenario_suffix)s" % vars()
+ newtest.shortDescription = (lambda: newtest_desc)
+ for key, value in parameters.items():
+ setattr(newtest, key, value)
+ return newtest
+
+
+def apply_scenarios(scenarios, test):
+ """Apply many scenarios to a test.
+
+ :param scenarios: An iterable of scenarios.
+ :param test: A test to apply the scenarios to.
+ :return: A generator of tests.
+ """
+ for scenario in scenarios:
+ yield apply_scenario(scenario, test)
+
+
+def generate_scenarios(test_or_suite):
+ """Yield the tests in test_or_suite with scenario multiplication done.
+
+ TestCase objects with no scenarios specified are yielded unaltered. Tests
+ with scenarios are not yielded at all, instead the results of multiplying
+ them by the scenarios they specified gets yielded.
+
+ :param test_or_suite: A TestCase or TestSuite.
+ :return: A generator of tests - objects satisfying the TestCase protocol.
+ """
+ for test in iterate_tests(test_or_suite):
+ scenarios = getattr(test, 'scenarios', None)
+ if scenarios:
+ for newtest in apply_scenarios(scenarios, test):
+ newtest.scenarios = None
+ yield newtest
+ else:
+ yield test
+
+
+def load_tests_apply_scenarios(*params):
+ """Adapter test runner load hooks to call generate_scenarios.
+
+ If this is referenced by the `load_tests` attribute of a module, then
+ testloaders that implement this protocol will automatically arrange for
+ the scenarios to be expanded. This can be used instead of using
+ TestWithScenarios.
+
+ Two different calling conventions for load_tests have been used, and this
+ function should support both. Python 2.7 passes (loader, standard_tests,
+ pattern), and bzr used (standard_tests, module, loader).
+
+ :param loader: A TestLoader.
+ :param standard_test: The test objects found in this module before
+ multiplication.
+ """
+ if getattr(params[0], 'suiteClass', None) is not None:
+ loader, standard_tests, pattern = params
+ else:
+ standard_tests, module, loader = params
+ result = loader.suiteClass()
+ result.addTests(generate_scenarios(standard_tests))
+ return result
+
+
+def multiply_scenarios(*scenarios):
+ """Multiply two or more iterables of scenarios.
+
+ It is safe to pass scenario generators or iterators.
+
+ :returns: A list of compound scenarios: the cross-product of all
+ scenarios, with the names concatenated and the parameters
+ merged together.
+ """
+ result = []
+ scenario_lists = map(list, scenarios)
+ for combination in product(*scenario_lists):
+ names, parameters = zip(*combination)
+ scenario_name = ','.join(names)
+ scenario_parameters = {}
+ for parameter in parameters:
+ scenario_parameters.update(parameter)
+ result.append((scenario_name, scenario_parameters))
+ return result
+
+
+def per_module_scenarios(attribute_name, modules):
+ """Generate scenarios for available implementation modules.
+
+ This is typically used when there is a subsystem implemented, for
+ example, in both Python and C, and we want to apply the same tests to
+ both, but the C module may sometimes not be available.
+
+ Note: if the module can't be loaded, the sys.exc_info() tuple for the
+ exception raised during import of the module is used instead of the module
+ object. A common idiom is to check in setUp for that and raise a skip or
+ error for that case. No special helpers are supplied in testscenarios as
+ yet.
+
+ :param attribute_name: A name to be set in the scenario parameter
+ dictionary (and thence onto the test instance) pointing to the
+ implementation module (or import exception) for this scenario.
+
+ :param modules: An iterable of (short_name, module_name), where
+ the short name is something like 'python' to put in the
+ scenario name, and the long name is a fully-qualified Python module
+ name.
+ """
+ scenarios = []
+ for short_name, module_name in modules:
+ try:
+ mod = __import__(module_name, {}, {}, [''])
+ except:
+ mod = sys.exc_info()
+ scenarios.append((
+ short_name,
+ {attribute_name: mod}))
+ return scenarios
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/testcase.py b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/testcase.py
new file mode 100644
index 00000000000..2ab50c78848
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/testcase.py
@@ -0,0 +1,70 @@
+# testscenarios: extensions to python unittest to allow declarative
+# dependency injection ('scenarios') by tests.
+#
+# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+__all__ = [
+ 'TestWithScenarios',
+ 'WithScenarios',
+ ]
+
+import unittest
+
+from testtools.testcase import clone_test_with_new_id
+
+from testscenarios.scenarios import generate_scenarios
+
+_doc = """
+ When a test object which inherits from WithScenarios is run, and there is a
+ non-empty scenarios attribute on the object, the test is multiplied by the
+ run method into one test per scenario. For this to work reliably the
+ WithScenarios.run method must not be overriden in a subclass (or overridden
+ compatibly with WithScenarios).
+ """
+
+class WithScenarios(object):
+ __doc__ = """A mixin for TestCase with support for declarative scenarios.
+ """ + _doc
+
+ def _get_scenarios(self):
+ return getattr(self, 'scenarios', None)
+
+ def countTestCases(self):
+ scenarios = self._get_scenarios()
+ if not scenarios:
+ return 1
+ else:
+ return len(scenarios)
+
+ def debug(self):
+ scenarios = self._get_scenarios()
+ if scenarios:
+ for test in generate_scenarios(self):
+ test.debug()
+ else:
+ return super(WithScenarios, self).debug()
+
+ def run(self, result=None):
+ scenarios = self._get_scenarios()
+ if scenarios:
+ for test in generate_scenarios(self):
+ test.run(result)
+ return
+ else:
+ return super(WithScenarios, self).run(result)
+
+
+class TestWithScenarios(WithScenarios, unittest.TestCase):
+ __doc__ = """Unittest TestCase with support for declarative scenarios.
+ """ + _doc
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/__init__.py
new file mode 100644
index 00000000000..8e243b6e5ab
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/__init__.py
@@ -0,0 +1,43 @@
+# testscenarios: extensions to python unittest to allow declarative
+# dependency injection ('scenarios') by tests.
+#
+# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+import doctest
+import sys
+import unittest
+
+import testscenarios
+
+
+def test_suite():
+ result = unittest.TestSuite()
+ standard_tests = unittest.TestSuite()
+ module = sys.modules['testscenarios.tests']
+ loader = unittest.TestLoader()
+ return load_tests(standard_tests, module, loader)
+
+
+def load_tests(standard_tests, module, loader):
+ test_modules = [
+ 'testcase',
+ 'scenarios',
+ ]
+ prefix = "testscenarios.tests.test_"
+ test_mod_names = [prefix + test_module for test_module in test_modules]
+ standard_tests.addTests(loader.loadTestsFromNames(test_mod_names))
+ doctest.set_unittest_reportflags(doctest.REPORT_ONLY_FIRST_FAILURE)
+ standard_tests.addTest(
+ doctest.DocFileSuite("../../../README", optionflags=doctest.ELLIPSIS))
+ return loader.suiteClass(testscenarios.generate_scenarios(standard_tests))
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_scenarios.py b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_scenarios.py
new file mode 100644
index 00000000000..97aa17f86cf
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_scenarios.py
@@ -0,0 +1,261 @@
+# testscenarios: extensions to python unittest to allow declarative
+# dependency injection ('scenarios') by tests.
+#
+# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+# Copyright (c) 2010, 2011 Martin Pool <mbp@sourcefrog.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+import unittest
+
+import testscenarios
+from testscenarios.scenarios import (
+ apply_scenario,
+ apply_scenarios,
+ generate_scenarios,
+ load_tests_apply_scenarios,
+ multiply_scenarios,
+ )
+import testtools
+from testtools.tests.helpers import LoggingResult
+
+
+class TestGenerateScenarios(testtools.TestCase):
+
+ def hook_apply_scenarios(self):
+ self.addCleanup(setattr, testscenarios.scenarios, 'apply_scenarios',
+ apply_scenarios)
+ log = []
+ def capture(scenarios, test):
+ log.append((scenarios, test))
+ return apply_scenarios(scenarios, test)
+ testscenarios.scenarios.apply_scenarios = capture
+ return log
+
+ def test_generate_scenarios_preserves_normal_test(self):
+ class ReferenceTest(unittest.TestCase):
+ def test_pass(self):
+ pass
+ test = ReferenceTest("test_pass")
+ log = self.hook_apply_scenarios()
+ self.assertEqual([test], list(generate_scenarios(test)))
+ self.assertEqual([], log)
+
+ def test_tests_with_scenarios_calls_apply_scenarios(self):
+ class ReferenceTest(unittest.TestCase):
+ scenarios = [('demo', {})]
+ def test_pass(self):
+ pass
+ test = ReferenceTest("test_pass")
+ log = self.hook_apply_scenarios()
+ tests = list(generate_scenarios(test))
+ self.assertEqual(
+ 'testscenarios.tests.test_scenarios.ReferenceTest.test_pass(demo)',
+ tests[0].id())
+ self.assertEqual([([('demo', {})], test)], log)
+
+ def test_all_scenarios_yielded(self):
+ class ReferenceTest(unittest.TestCase):
+ scenarios = [('1', {}), ('2', {})]
+ def test_pass(self):
+ pass
+ test = ReferenceTest("test_pass")
+ tests = list(generate_scenarios(test))
+ self.assertEqual(
+ 'testscenarios.tests.test_scenarios.ReferenceTest.test_pass(1)',
+ tests[0].id())
+ self.assertEqual(
+ 'testscenarios.tests.test_scenarios.ReferenceTest.test_pass(2)',
+ tests[1].id())
+
+ def test_scenarios_attribute_cleared(self):
+ class ReferenceTest(unittest.TestCase):
+ scenarios = [
+ ('1', {'foo': 1, 'bar': 2}),
+ ('2', {'foo': 2, 'bar': 4})]
+ def test_check_foo(self):
+ pass
+ test = ReferenceTest("test_check_foo")
+ tests = list(generate_scenarios(test))
+ for adapted in tests:
+ self.assertEqual(None, adapted.scenarios)
+
+ def test_multiple_tests(self):
+ class Reference1(unittest.TestCase):
+ scenarios = [('1', {}), ('2', {})]
+ def test_something(self):
+ pass
+ class Reference2(unittest.TestCase):
+ scenarios = [('3', {}), ('4', {})]
+ def test_something(self):
+ pass
+ suite = unittest.TestSuite()
+ suite.addTest(Reference1("test_something"))
+ suite.addTest(Reference2("test_something"))
+ tests = list(generate_scenarios(suite))
+ self.assertEqual(4, len(tests))
+
+
+class TestApplyScenario(testtools.TestCase):
+
+ def setUp(self):
+ super(TestApplyScenario, self).setUp()
+
+ self.scenario_name = 'demo'
+ self.scenario_attrs = {'foo': 'bar'}
+ self.scenario = (self.scenario_name, self.scenario_attrs)
+
+ class ReferenceTest(unittest.TestCase):
+ def test_pass(self):
+ pass
+ def test_pass_with_docstring(self):
+ """ The test that always passes.
+
+ This test case has a PEP 257 conformant docstring,
+ with its first line being a brief synopsis and the
+ rest of the docstring explaining that this test
+ does nothing but pass unconditionally.
+
+ """
+ pass
+
+ self.ReferenceTest = ReferenceTest
+
+ def test_sets_specified_id(self):
+ raw_test = self.ReferenceTest('test_pass')
+ raw_id = "testscenarios.tests.test_scenarios.ReferenceTest.test_pass"
+ scenario_name = self.scenario_name
+ expect_id = "%(raw_id)s(%(scenario_name)s)" % vars()
+ modified_test = apply_scenario(self.scenario, raw_test)
+ self.assertEqual(expect_id, modified_test.id())
+
+ def test_sets_specified_attributes(self):
+ raw_test = self.ReferenceTest('test_pass')
+ modified_test = apply_scenario(self.scenario, raw_test)
+ self.assertEqual('bar', modified_test.foo)
+
+ def test_appends_scenario_name_to_short_description(self):
+ raw_test = self.ReferenceTest('test_pass_with_docstring')
+ modified_test = apply_scenario(self.scenario, raw_test)
+ raw_doc = self.ReferenceTest.test_pass_with_docstring.__doc__
+ raw_desc = raw_doc.split("\n")[0].strip()
+ scenario_name = self.scenario_name
+ expect_desc = "%(raw_desc)s (%(scenario_name)s)" % vars()
+ self.assertEqual(expect_desc, modified_test.shortDescription())
+
+class TestApplyScenarios(testtools.TestCase):
+
+ def test_calls_apply_scenario(self):
+ self.addCleanup(setattr, testscenarios.scenarios, 'apply_scenario',
+ apply_scenario)
+ log = []
+ def capture(scenario, test):
+ log.append((scenario, test))
+ testscenarios.scenarios.apply_scenario = capture
+ scenarios = ["foo", "bar"]
+ result = list(apply_scenarios(scenarios, "test"))
+ self.assertEqual([('foo', 'test'), ('bar', 'test')], log)
+
+ def test_preserves_scenarios_attribute(self):
+ class ReferenceTest(unittest.TestCase):
+ scenarios = [('demo', {})]
+ def test_pass(self):
+ pass
+ test = ReferenceTest("test_pass")
+ tests = list(apply_scenarios(ReferenceTest.scenarios, test))
+ self.assertEqual([('demo', {})], ReferenceTest.scenarios)
+ self.assertEqual(ReferenceTest.scenarios, tests[0].scenarios)
+
+
+class TestLoadTests(testtools.TestCase):
+
+ class SampleTest(unittest.TestCase):
+ def test_nothing(self):
+ pass
+ scenarios = [
+ ('a', {}),
+ ('b', {}),
+ ]
+
+ def test_load_tests_apply_scenarios(self):
+ suite = load_tests_apply_scenarios(
+ unittest.TestLoader(),
+ [self.SampleTest('test_nothing')],
+ None)
+ result_tests = list(testtools.iterate_tests(suite))
+ self.assertEquals(
+ 2,
+ len(result_tests),
+ result_tests)
+
+ def test_load_tests_apply_scenarios_old_style(self):
+ """Call load_tests in the way used by bzr."""
+ suite = load_tests_apply_scenarios(
+ [self.SampleTest('test_nothing')],
+ self.__class__.__module__,
+ unittest.TestLoader(),
+ )
+ result_tests = list(testtools.iterate_tests(suite))
+ self.assertEquals(
+ 2,
+ len(result_tests),
+ result_tests)
+
+
+class TestMultiplyScenarios(testtools.TestCase):
+
+ def test_multiply_scenarios(self):
+ def factory(name):
+ for i in 'ab':
+ yield i, {name: i}
+ scenarios = multiply_scenarios(factory('p'), factory('q'))
+ self.assertEqual([
+ ('a,a', dict(p='a', q='a')),
+ ('a,b', dict(p='a', q='b')),
+ ('b,a', dict(p='b', q='a')),
+ ('b,b', dict(p='b', q='b')),
+ ],
+ scenarios)
+
+ def test_multiply_many_scenarios(self):
+ def factory(name):
+ for i in 'abc':
+ yield i, {name: i}
+ scenarios = multiply_scenarios(factory('p'), factory('q'),
+ factory('r'), factory('t'))
+ self.assertEqual(
+ 3**4,
+ len(scenarios),
+ scenarios)
+ self.assertEqual(
+ 'a,a,a,a',
+ scenarios[0][0])
+
+
+class TestPerModuleScenarios(testtools.TestCase):
+
+ def test_per_module_scenarios(self):
+ """Generate scenarios for available modules"""
+ s = testscenarios.scenarios.per_module_scenarios(
+ 'the_module', [
+ ('Python', 'testscenarios'),
+ ('unittest', 'unittest'),
+ ('nonexistent', 'nonexistent'),
+ ])
+ self.assertEqual('nonexistent', s[-1][0])
+ self.assertIsInstance(s[-1][1]['the_module'], tuple)
+ s[-1][1]['the_module'] = None
+ self.assertEqual(s, [
+ ('Python', {'the_module': testscenarios}),
+ ('unittest', {'the_module': unittest}),
+ ('nonexistent', {'the_module': None}),
+ ])
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_testcase.py b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_testcase.py
new file mode 100644
index 00000000000..74d2fe1c504
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_testcase.py
@@ -0,0 +1,157 @@
+# testscenarios: extensions to python unittest to allow declarative
+# dependency injection ('scenarios') by tests.
+#
+# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+import unittest
+
+import testscenarios
+import testtools
+from testtools.tests.helpers import LoggingResult
+
+
+class TestTestWithScenarios(testtools.TestCase):
+
+ scenarios = testscenarios.scenarios.per_module_scenarios(
+ 'impl', (('unittest', 'unittest'), ('unittest2', 'unittest2')))
+
+ @property
+ def Implementation(self):
+ if isinstance(self.impl, tuple):
+ self.skipTest('import failed - module not installed?')
+ class Implementation(testscenarios.WithScenarios, self.impl.TestCase):
+ pass
+ return Implementation
+
+ def test_no_scenarios_no_error(self):
+ class ReferenceTest(self.Implementation):
+ def test_pass(self):
+ pass
+ test = ReferenceTest("test_pass")
+ result = unittest.TestResult()
+ test.run(result)
+ self.assertTrue(result.wasSuccessful())
+ self.assertEqual(1, result.testsRun)
+
+ def test_with_one_scenario_one_run(self):
+ class ReferenceTest(self.Implementation):
+ scenarios = [('demo', {})]
+ def test_pass(self):
+ pass
+ test = ReferenceTest("test_pass")
+ log = []
+ result = LoggingResult(log)
+ test.run(result)
+ self.assertTrue(result.wasSuccessful())
+ self.assertEqual(1, result.testsRun)
+ self.assertEqual(
+ 'testscenarios.tests.test_testcase.ReferenceTest.test_pass(demo)',
+ log[0][1].id())
+
+ def test_with_two_scenarios_two_run(self):
+ class ReferenceTest(self.Implementation):
+ scenarios = [('1', {}), ('2', {})]
+ def test_pass(self):
+ pass
+ test = ReferenceTest("test_pass")
+ log = []
+ result = LoggingResult(log)
+ test.run(result)
+ self.assertTrue(result.wasSuccessful())
+ self.assertEqual(2, result.testsRun)
+ self.assertEqual(
+ 'testscenarios.tests.test_testcase.ReferenceTest.test_pass(1)',
+ log[0][1].id())
+ self.assertEqual(
+ 'testscenarios.tests.test_testcase.ReferenceTest.test_pass(2)',
+ log[4][1].id())
+
+ def test_attributes_set(self):
+ class ReferenceTest(self.Implementation):
+ scenarios = [
+ ('1', {'foo': 1, 'bar': 2}),
+ ('2', {'foo': 2, 'bar': 4})]
+ def test_check_foo(self):
+ self.assertEqual(self.foo * 2, self.bar)
+ test = ReferenceTest("test_check_foo")
+ log = []
+ result = LoggingResult(log)
+ test.run(result)
+ self.assertTrue(result.wasSuccessful())
+ self.assertEqual(2, result.testsRun)
+
+ def test_scenarios_attribute_cleared(self):
+ class ReferenceTest(self.Implementation):
+ scenarios = [
+ ('1', {'foo': 1, 'bar': 2}),
+ ('2', {'foo': 2, 'bar': 4})]
+ def test_check_foo(self):
+ self.assertEqual(self.foo * 2, self.bar)
+ test = ReferenceTest("test_check_foo")
+ log = []
+ result = LoggingResult(log)
+ test.run(result)
+ self.assertTrue(result.wasSuccessful())
+ self.assertEqual(2, result.testsRun)
+ self.assertNotEqual(None, test.scenarios)
+ self.assertEqual(None, log[0][1].scenarios)
+ self.assertEqual(None, log[4][1].scenarios)
+
+ def test_countTestCases_no_scenarios(self):
+ class ReferenceTest(self.Implementation):
+ def test_check_foo(self):
+ pass
+ test = ReferenceTest("test_check_foo")
+ self.assertEqual(1, test.countTestCases())
+
+ def test_countTestCases_empty_scenarios(self):
+ class ReferenceTest(self.Implementation):
+ scenarios = []
+ def test_check_foo(self):
+ pass
+ test = ReferenceTest("test_check_foo")
+ self.assertEqual(1, test.countTestCases())
+
+ def test_countTestCases_1_scenarios(self):
+ class ReferenceTest(self.Implementation):
+ scenarios = [('1', {'foo': 1, 'bar': 2})]
+ def test_check_foo(self):
+ pass
+ test = ReferenceTest("test_check_foo")
+ self.assertEqual(1, test.countTestCases())
+
+ def test_countTestCases_2_scenarios(self):
+ class ReferenceTest(self.Implementation):
+ scenarios = [
+ ('1', {'foo': 1, 'bar': 2}),
+ ('2', {'foo': 2, 'bar': 4})]
+ def test_check_foo(self):
+ pass
+ test = ReferenceTest("test_check_foo")
+ self.assertEqual(2, test.countTestCases())
+
+ def test_debug_2_scenarios(self):
+ log = []
+ class ReferenceTest(self.Implementation):
+ scenarios = [
+ ('1', {'foo': 1, 'bar': 2}),
+ ('2', {'foo': 2, 'bar': 4})]
+ def test_check_foo(self):
+ log.append(self)
+ test = ReferenceTest("test_check_foo")
+ test.debug()
+ self.assertEqual(2, len(log))
+ self.assertEqual(None, log[0].scenarios)
+ self.assertEqual(None, log[1].scenarios)
+ self.assertNotEqual(log[0].id(), log[1].id())
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/setup.cfg b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/setup.cfg
new file mode 100644
index 00000000000..861a9f55426
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/setup.py b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/setup.py
new file mode 100755
index 00000000000..6b0d596a2a4
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testscenarios-0.4/setup.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+from setuptools import setup
+import os.path
+
+description = open(os.path.join(os.path.dirname(__file__), 'README'), 'rt').read()
+
+setup(name="testscenarios",
+ version="0.4",
+ description="Testscenarios, a pyunit extension for dependency injection",
+ long_description=description,
+ maintainer="Robert Collins",
+ maintainer_email="robertc@robertcollins.net",
+ url="https://launchpad.net/testscenarios",
+ packages=['testscenarios', 'testscenarios.tests'],
+ package_dir = {'':'lib'},
+ classifiers = [
+ 'Development Status :: 6 - Mature',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: BSD License',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 3',
+ 'Topic :: Software Development :: Quality Assurance',
+ 'Topic :: Software Development :: Testing',
+ ],
+ install_requires = [
+ 'testtools',
+ ]
+ )
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/.gitignore b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/.gitignore
new file mode 100644
index 00000000000..862442d5067
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/.gitignore
@@ -0,0 +1,15 @@
+__pycache__
+./build
+MANIFEST
+dist
+tags
+TAGS
+apidocs
+_trial_temp
+doc/_build
+.testrepository
+./testtools.egg-info
+*.pyc
+*.swp
+*~
+testtools.egg-info
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/LICENSE b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/LICENSE
new file mode 100644
index 00000000000..21010cc4856
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/LICENSE
@@ -0,0 +1,59 @@
+Copyright (c) 2008-2011 Jonathan M. Lange <jml@mumak.net> and the testtools
+authors.
+
+The testtools authors are:
+ * Canonical Ltd
+ * Twisted Matrix Labs
+ * Jonathan Lange
+ * Robert Collins
+ * Andrew Bennetts
+ * Benjamin Peterson
+ * Jamu Kakar
+ * James Westby
+ * Martin [gz]
+ * Michael Hudson-Doyle
+ * Aaron Bentley
+ * Christian Kampka
+ * Gavin Panella
+ * Martin Pool
+ * Vincent Ladeuil
+ * Nikola Đipanov
+
+and are collectively referred to as "testtools developers".
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+Some code in testtools/run.py taken from Python's unittest module:
+Copyright (c) 1999-2003 Steve Purcell
+Copyright (c) 2003-2010 Python Software Foundation
+
+This module is free software, and you may redistribute it and/or modify
+it under the same terms as Python itself, so long as this copyright message
+and disclaimer are retained in their original form.
+
+IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/MANIFEST.in b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/MANIFEST.in
new file mode 100644
index 00000000000..4619349f3b9
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/MANIFEST.in
@@ -0,0 +1,10 @@
+include LICENSE
+include Makefile
+include MANIFEST.in
+include NEWS
+include README.rst
+include .gitignore
+graft doc
+graft doc/_static
+graft doc/_templates
+prune doc/_build
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/Makefile b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/Makefile
new file mode 100644
index 00000000000..ccaa7762286
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/Makefile
@@ -0,0 +1,56 @@
+# Copyright (c) 2008-2013 testtools developers. See LICENSE for details.
+
+PYTHON=python
+SOURCES=$(shell find testtools -name "*.py")
+
+check:
+ PYTHONPATH=$(PWD) $(PYTHON) -m testtools.run testtools.tests.test_suite
+
+TAGS: ${SOURCES}
+ ctags -e -R testtools/
+
+tags: ${SOURCES}
+ ctags -R testtools/
+
+clean: clean-sphinx
+ rm -f TAGS tags
+ find testtools -name "*.pyc" -exec rm '{}' \;
+
+prerelease:
+ # An existing MANIFEST breaks distutils sometimes. Avoid that.
+ -rm MANIFEST
+
+release:
+ ./setup.py sdist upload --sign
+ $(PYTHON) scripts/_lp_release.py
+
+snapshot: prerelease
+ ./setup.py sdist
+
+### Documentation ###
+
+apidocs:
+ # pydoctor emits deprecation warnings under Ubuntu 10.10 LTS
+ PYTHONWARNINGS='ignore::DeprecationWarning' \
+ pydoctor --make-html --add-package testtools \
+ --docformat=restructuredtext --project-name=testtools \
+ --project-url=https://github.com/testing-cabal/testtools
+
+doc/news.rst:
+ ln -s ../NEWS doc/news.rst
+
+docs: doc/news.rst docs-sphinx
+ rm doc/news.rst
+
+docs-sphinx: html-sphinx
+
+# Clean out generated documentation
+clean-sphinx:
+ cd doc && make clean
+
+# Build the html docs using Sphinx.
+html-sphinx:
+ cd doc && make html
+
+.PHONY: apidocs docs-sphinx clean-sphinx html-sphinx docs
+.PHONY: check clean prerelease release
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/NEWS b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/NEWS
new file mode 100644
index 00000000000..a8aa4d23b3a
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/NEWS
@@ -0,0 +1,1281 @@
+testtools NEWS
+++++++++++++++
+
+Changes and improvements to testtools_, grouped by release.
+
+
+NEXT
+~~~~
+
+0.9.34
+~~~~~~
+
+Improvements
+------------
+
+* Added ability for ``testtools.TestCase`` instances to force a test to
+ fail, even if no assertions failed. (Thomi Richards)
+
+* Added ``testtools.content.StacktraceContent``, a content object that
+ automatically creates a ``StackLinesContent`` object containing the current
+ stack trace. (Thomi Richards)
+
+* ``AnyMatch`` is now exported properly in ``testtools.matchers``.
+ (Robert Collins, Rob Kennedy, github #44)
+
+* In Python 3.3, if there are duplicate test ids, tests.sort() will
+ fail and raise TypeError. Detect the duplicate test ids firstly in
+ sorted_tests() to ensure that all test ids are unique.
+ (Kui Shi, #1243922)
+
+* ``json_content`` is now in the ``__all__`` attribute for
+ ``testtools.content``. (Robert Collins)
+
+* Network tests now bind to 127.0.0.1 to avoid (even temporary) network
+ visible ports. (Benedikt Morbach, github #46)
+
+* Test listing now explicitly indicates by printing 'Failed to import' and
+ exiting (2) when an import has failed rather than only signalling through the
+ test name. (Robert Collins, #1245672)
+
+* ``test_compat.TestDetectEncoding.test_bom`` now works on Python 3.3 - the
+ corner case with euc_jp is no longer permitted in Python 3.3 so we can
+ skip it. (Martin [gz], #1251962)
+
+0.9.33
+~~~~~~
+
+Improvements
+------------
+
+* Added ``addDetailuniqueName`` method to ``testtools.TestCase`` class.
+ (Thomi Richards)
+
+* Removed some unused code from ``testtools.content.TracebackContent``.
+ (Thomi Richards)
+
+* Added ``testtools.StackLinesContent``: a content object for displaying
+ pre-processed stack lines. (Thomi Richards)
+
+* ``StreamSummary`` was calculating testsRun incorrectly: ``exists`` status
+ tests were counted as run tests, but they are not.
+ (Robert Collins, #1203728)
+
+0.9.32
+~~~~~~
+
+Regular maintenance release. Special thanks to new contributor, Xiao Hanyu!
+
+Changes
+-------
+
+ * ``testttols.compat._format_exc_info`` has been refactored into several
+ smaller functions. (Thomi Richards)
+
+Improvements
+------------
+
+* Stacktrace filtering no longer hides unittest frames that are surrounded by
+ user frames. We will reenable this when we figure out a better algorithm for
+ retaining meaning. (Robert Collins, #1188420)
+
+* The compatibility code for skipped tests with unittest2 was broken.
+ (Robert Collins, #1190951)
+
+* Various documentation improvements (Clint Byrum, Xiao Hanyu).
+
+0.9.31
+~~~~~~
+
+Improvements
+------------
+
+* ``ExpectedException`` now accepts a msg parameter for describing an error,
+ much the same as assertEquals etc. (Robert Collins)
+
+0.9.30
+~~~~~~
+
+A new sort of TestResult, the StreamResult has been added, as a prototype for
+a revised standard library test result API. Expect this API to change.
+Although we will try to preserve compatibility for early adopters, it is
+experimental and we might need to break it if it turns out to be unsuitable.
+
+Improvements
+------------
+* ``assertRaises`` works properly for exception classes that have custom
+ metaclasses
+
+* ``ConcurrentTestSuite`` was silently eating exceptions that propagate from
+ the test.run(result) method call. Ignoring them is fine in a normal test
+ runner, but when they happen in a different thread, the thread that called
+ suite.run() is not in the stack anymore, and the exceptions are lost. We now
+ create a synthetic test recording any such exception.
+ (Robert Collins, #1130429)
+
+* Fixed SyntaxError raised in ``_compat2x.py`` when installing via Python 3.
+ (Will Bond, #941958)
+
+* New class ``StreamResult`` which defines the API for the new result type.
+ (Robert Collins)
+
+* New support class ``ConcurrentStreamTestSuite`` for convenient construction
+ and utilisation of ``StreamToQueue`` objects. (Robert Collins)
+
+* New support class ``CopyStreamResult`` which forwards events onto multiple
+ ``StreamResult`` objects (each of which receives all the events).
+ (Robert Collins)
+
+* New support class ``StreamSummary`` which summarises a ``StreamResult``
+ stream compatibly with ``TestResult`` code. (Robert Collins)
+
+* New support class ``StreamTagger`` which adds or removes tags from
+ ``StreamResult`` events. (RobertCollins)
+
+* New support class ``StreamToDict`` which converts a ``StreamResult`` to a
+ series of dicts describing a test. Useful for writing trivial stream
+ analysers. (Robert Collins)
+
+* New support class ``TestControl`` which permits cancelling an in-progress
+ run. (Robert Collins)
+
+* New support class ``StreamFailFast`` which calls a ``TestControl`` instance
+ to abort the test run when a failure is detected. (Robert Collins)
+
+* New support class ``ExtendedToStreamDecorator`` which translates both regular
+ unittest TestResult API calls and the ExtendedTestResult API which testtools
+ has supported into the StreamResult API. ExtendedToStreamDecorator also
+ forwards calls made in the StreamResult API, permitting it to be used
+ anywhere a StreamResult is used. Key TestResult query methods like
+ wasSuccessful and shouldStop are synchronised with the StreamResult API
+ calls, but the detailed statistics like the list of errors are not - a
+ separate consumer will be created to support that.
+ (Robert Collins)
+
+* New support class ``StreamToExtendedDecorator`` which translates
+ ``StreamResult`` API calls into ``ExtendedTestResult`` (or any older
+ ``TestResult``) calls. This permits using un-migrated result objects with
+ new runners / tests. (Robert Collins)
+
+* New support class ``StreamToQueue`` for sending messages to one
+ ``StreamResult`` from multiple threads. (Robert Collins)
+
+* New support class ``TimestampingStreamResult`` which adds a timestamp to
+ events with no timestamp. (Robert Collins)
+
+* New ``TestCase`` decorator ``DecorateTestCaseResult`` that adapts the
+ ``TestResult`` or ``StreamResult`` a case will be run with, for ensuring that
+ a particular result object is used even if the runner running the test doesn't
+ know to use it. (Robert Collins)
+
+* New test support class ``testtools.testresult.doubles.StreamResult``, which
+ captures all the StreamResult events. (Robert Collins)
+
+* ``PlaceHolder`` can now hold tags, and applies them before, and removes them
+ after, the test. (Robert Collins)
+
+* ``PlaceHolder`` can now hold timestamps, and applies them before the test and
+ then before the outcome. (Robert Collins)
+
+* ``StreamResultRouter`` added. This is useful for demultiplexing - e.g. for
+ partitioning analysis of events or sending feedback encapsulated in
+ StreamResult events back to their source. (Robert Collins)
+
+* ``testtools.run.TestProgram`` now supports the ``TestRunner`` taking over
+ responsibility for formatting the output of ``--list-tests``.
+ (Robert Collins)
+
+* The error message for setUp and tearDown upcall errors was broken on Python
+ 3.4. (Monty Taylor, Robert Collins, #1140688)
+
+* The repr of object() on pypy includes the object id, which was breaking a
+ test that accidentally depended on the CPython repr for object().
+ (Jonathan Lange)
+
+0.9.29
+~~~~~~
+
+A simple bug fix, and better error messages when you don't up-call.
+
+Changes
+-------
+
+* ``testtools.content_type.ContentType`` incorrectly used ',' rather than ';'
+ to separate parameters. (Robert Collins)
+
+Improvements
+------------
+
+* ``testtools.compat.unicode_output_stream`` was wrapping a stream encoder
+ around ``io.StringIO`` and ``io.TextIOWrapper`` objects, which was incorrect.
+ (Robert Collins)
+
+* Report the name of the source file for setUp and tearDown upcall errors.
+ (Monty Taylor)
+
+0.9.28
+~~~~~~
+
+Testtools has moved VCS - https://github.com/testing-cabal/testtools/ is
+the new home. Bug tracking is still on Launchpad, and releases are on Pypi.
+
+We made this change to take advantage of the richer ecosystem of tools around
+Git, and to lower the barrier for new contributors.
+
+Improvements
+------------
+
+* New ``testtools.testcase.attr`` and ``testtools.testcase.WithAttributes``
+ helpers allow marking up test case methods with simple labels. This permits
+ filtering tests with more granularity than organising them into modules and
+ test classes. (Robert Collins)
+
+0.9.27
+~~~~~~
+
+Improvements
+------------
+
+* New matcher ``HasLength`` for matching the length of a collection.
+ (Robert Collins)
+
+* New matcher ``MatchesPredicateWithParams`` make it still easier to create
+ ad hoc matchers. (Robert Collins)
+
+* We have a simpler release process in future - see doc/hacking.rst.
+ (Robert Collins)
+
+0.9.26
+~~~~~~
+
+Brown paper bag fix: failed to document the need for setup to be able to use
+extras. Compounded by pip not supporting setup_requires.
+
+Changes
+-------
+
+* setup.py now can generate egg_info even if extras is not available.
+ Also lists extras in setup_requires for easy_install.
+ (Robert Collins, #1102464)
+
+0.9.25
+~~~~~~
+
+Changes
+-------
+
+* ``python -m testtools.run --load-list`` will now preserve any custom suites
+ (such as ``testtools.FixtureSuite`` or ``testresources.OptimisingTestSuite``)
+ rather than flattening them.
+ (Robert Collins, #827175)
+
+* Testtools now depends on extras, a small library split out from it to contain
+ generally useful non-testing facilities. Since extras has been around for a
+ couple of testtools releases now, we're making this into a hard dependency of
+ testtools. (Robert Collins)
+
+* Testtools now uses setuptools rather than distutils so that we can document
+ the extras dependency. (Robert Collins)
+
+Improvements
+------------
+
+* Testtools will no longer override test code registered details called
+ 'traceback' when reporting caught exceptions from test code.
+ (Robert Collins, #812793)
+
+0.9.24
+~~~~~~
+
+Changes
+-------
+
+* ``testtools.run discover`` will now sort the tests it discovered. This is a
+ workaround for http://bugs.python.org/issue16709. Non-standard test suites
+ are preserved, and their ``sort_tests()`` method called (if they have such an
+ attribute). ``testtools.testsuite.sorted_tests(suite, True)`` can be used by
+ such suites to do a local sort. (Robert Collins, #1091512)
+
+* ``ThreadsafeForwardingResult`` now defines a stub ``progress`` method, which
+ fixes ``testr run`` of streams containing progress markers (by discarding the
+ progress data). (Robert Collins, #1019165)
+
+0.9.23
+~~~~~~
+
+Changes
+-------
+
+* ``run.TestToolsTestRunner`` now accepts the verbosity, buffer and failfast
+ arguments the upstream python TestProgram code wants to give it, making it
+ possible to support them in a compatible fashion. (Robert Collins)
+
+Improvements
+------------
+
+* ``testtools.run`` now supports the ``-f`` or ``--failfast`` parameter.
+ Previously it was advertised in the help but ignored.
+ (Robert Collins, #1090582)
+
+* ``AnyMatch`` added, a new matcher that matches when any item in a collection
+ matches the given matcher. (Jonathan Lange)
+
+* Spelling corrections to documentation. (Vincent Ladeuil)
+
+* ``TestProgram`` now has a sane default for its ``testRunner`` argument.
+ (Vincent Ladeuil)
+
+* The test suite passes on Python 3 again. (Robert Collins)
+
+0.9.22
+~~~~~~
+
+Improvements
+------------
+
+* ``content_from_file`` and ``content_from_stream`` now accept seek_offset and
+ seek_whence parameters allowing them to be used to grab less than the full
+ stream, or to be used with StringIO streams. (Robert Collins, #1088693)
+
+0.9.21
+~~~~~~
+
+Improvements
+------------
+
+* ``DirContains`` correctly exposed, after being accidentally hidden in the
+ great matcher re-organization of 0.9.17. (Jonathan Lange)
+
+
+0.9.20
+~~~~~~
+
+Three new matchers that'll rock your world.
+
+Improvements
+------------
+
+* New, powerful matchers that match items in a dictionary:
+
+ - ``MatchesDict``, match every key in a dictionary with a key in a
+ dictionary of matchers. For when the set of expected keys is equal to the
+ set of observed keys.
+
+ - ``ContainsDict``, every key in a dictionary of matchers must be
+ found in a dictionary, and the values for those keys must match. For when
+ the set of expected keys is a subset of the set of observed keys.
+
+ - ``ContainedByDict``, every key in a dictionary must be found in
+ a dictionary of matchers. For when the set of expected keys is a superset
+ of the set of observed keys.
+
+ The names are a little confusing, sorry. We're still trying to figure out
+ how to present the concept in the simplest way possible.
+
+
+0.9.19
+~~~~~~
+
+How embarrassing! Three releases in two days.
+
+We've worked out the kinks and have confirmation from our downstreams that
+this is all good. Should be the last release for a little while. Please
+ignore 0.9.18 and 0.9.17.
+
+Improvements
+------------
+
+* Include the matcher tests in the release, allowing the tests to run and
+ pass from the release tarball. (Jonathan Lange)
+
+* Fix cosmetic test failures in Python 3.3, introduced during release 0.9.17.
+ (Jonathan Lange)
+
+
+0.9.18
+~~~~~~
+
+Due to an oversight, release 0.9.18 did not contain the new
+``testtools.matchers`` package and was thus completely broken. This release
+corrects that, returning us all to normality.
+
+0.9.17
+~~~~~~
+
+This release brings better discover support and Python3.x improvements. There
+are still some test failures on Python3.3 but they are cosmetic - the library
+is as usable there as on any other Python 3 release.
+
+Changes
+-------
+
+* The ``testtools.matchers`` package has been split up. No change to the
+ public interface. (Jonathan Lange)
+
+Improvements
+------------
+
+* ``python -m testtools.run discover . --list`` now works. (Robert Collins)
+
+* Correctly handling of bytes vs text in JSON content type. (Martin [gz])
+
+
+0.9.16
+~~~~~~
+
+Some new matchers and a new content helper for JSON content.
+
+This is the first release of testtools to drop support for Python 2.4 and 2.5.
+If you need support for either of those versions, please use testtools 0.9.15.
+
+Improvements
+------------
+
+* New content helper, ``json_content`` (Jonathan Lange)
+
+* New matchers:
+
+ * ``ContainsAll`` for asserting one thing is a subset of another
+ (Raphaël Badin)
+
+ * ``SameMembers`` for asserting two iterators have the same members.
+ (Jonathan Lange)
+
+* Reraising of exceptions in Python 3 is more reliable. (Martin [gz])
+
+
+0.9.15
+~~~~~~
+
+This is the last release to support Python2.4 and 2.5. It brings in a slew of
+improvements to test tagging and concurrency, making running large test suites
+with partitioned workers more reliable and easier to reproduce exact test
+ordering in a given worker. See our sister project ``testrepository`` for a
+test runner that uses these features.
+
+Changes
+-------
+
+* ``PlaceHolder`` and ``ErrorHolder`` now support being given result details.
+ (Robert Collins)
+
+* ``ErrorHolder`` is now just a function - all the logic is in ``PlaceHolder``.
+ (Robert Collins)
+
+* ``TestResult`` and all other ``TestResult``-like objects in testtools
+ distinguish between global tags and test-local tags, as per the subunit
+ specification. (Jonathan Lange)
+
+* This is the **last** release of testtools that supports Python 2.4 or 2.5.
+ These releases are no longer supported by the Python community and do not
+ receive security updates. If this affects you, you will need to either
+ stay on this release or perform your own backports.
+ (Jonathan Lange, Robert Collins)
+
+* ``ThreadsafeForwardingResult`` now forwards global tags as test-local tags,
+ making reasoning about the correctness of the multiplexed stream simpler.
+ This preserves the semantic value (what tags apply to a given test) while
+ consuming less stream size (as no negative-tag statement is needed).
+ (Robert Collins, Gary Poster, #986434)
+
+Improvements
+------------
+
+* API documentation corrections. (Raphaël Badin)
+
+* ``ConcurrentTestSuite`` now takes an optional ``wrap_result`` parameter
+ that can be used to wrap the ``ThreadsafeForwardingResults`` created by
+ the suite. (Jonathan Lange)
+
+* ``Tagger`` added. It's a new ``TestResult`` that tags all tests sent to
+ it with a particular set of tags. (Jonathan Lange)
+
+* ``testresultdecorator`` brought over from subunit. (Jonathan Lange)
+
+* All ``TestResult`` wrappers now correctly forward ``current_tags`` from
+ their wrapped results, meaning that ``current_tags`` can always be relied
+ upon to return the currently active tags on a test result.
+
+* ``TestByTestResult``, a ``TestResult`` that calls a method once per test,
+ added. (Jonathan Lange)
+
+* ``ThreadsafeForwardingResult`` correctly forwards ``tags()`` calls where
+ only one of ``new_tags`` or ``gone_tags`` are specified.
+ (Jonathan Lange, #980263)
+
+* ``ThreadsafeForwardingResult`` no longer leaks local tags from one test
+ into all future tests run. (Jonathan Lange, #985613)
+
+* ``ThreadsafeForwardingResult`` has many, many more tests. (Jonathan Lange)
+
+
+0.9.14
+~~~~~~
+
+Our sister project, `subunit <https://launchpad.net/subunit>`_, was using a
+private API that was deleted in the 0.9.13 release. This release restores
+that API in order to smooth out the upgrade path.
+
+If you don't use subunit, then this release won't matter very much to you.
+
+
+0.9.13
+~~~~~~
+
+Plenty of new matchers and quite a few critical bug fixes (especially to do
+with stack traces from failed assertions). A net win for all.
+
+Changes
+-------
+
+* ``MatchesAll`` now takes an ``first_only`` keyword argument that changes how
+ mismatches are displayed. If you were previously passing matchers to
+ ``MatchesAll`` with keyword arguments, then this change might affect your
+ test results. (Jonathan Lange)
+
+Improvements
+------------
+
+* Actually hide all of the testtools stack for assertion failures. The
+ previous release promised clean stack, but now we actually provide it.
+ (Jonathan Lange, #854769)
+
+* ``assertRaises`` now includes the ``repr`` of the callable that failed to raise
+ properly. (Jonathan Lange, #881052)
+
+* Asynchronous tests no longer hang when run with trial.
+ (Jonathan Lange, #926189)
+
+* ``Content`` objects now have an ``as_text`` method to convert their contents
+ to Unicode text. (Jonathan Lange)
+
+* Failed equality assertions now line up. (Jonathan Lange, #879339)
+
+* ``FullStackRunTest`` no longer aborts the test run if a test raises an
+ error. (Jonathan Lange)
+
+* ``MatchesAll`` and ``MatchesListwise`` both take a ``first_only`` keyword
+ argument. If True, they will report only on the first mismatch they find,
+ and not continue looking for other possible mismatches.
+ (Jonathan Lange)
+
+* New helper, ``Nullary`` that turns callables with arguments into ones that
+ don't take arguments. (Jonathan Lange)
+
+* New matchers:
+
+ * ``DirContains`` matches the contents of a directory.
+ (Jonathan Lange, James Westby)
+
+ * ``DirExists`` matches if a directory exists.
+ (Jonathan Lange, James Westby)
+
+ * ``FileContains`` matches the contents of a file.
+ (Jonathan Lange, James Westby)
+
+ * ``FileExists`` matches if a file exists.
+ (Jonathan Lange, James Westby)
+
+ * ``HasPermissions`` matches the permissions of a file. (Jonathan Lange)
+
+ * ``MatchesPredicate`` matches if a predicate is true. (Jonathan Lange)
+
+ * ``PathExists`` matches if a path exists. (Jonathan Lange, James Westby)
+
+ * ``SamePath`` matches if two paths are the same. (Jonathan Lange)
+
+ * ``TarballContains`` matches the contents of a tarball. (Jonathan Lange)
+
+* ``MultiTestResult`` supports the ``tags`` method.
+ (Graham Binns, Francesco Banconi, #914279)
+
+* ``ThreadsafeForwardingResult`` supports the ``tags`` method.
+ (Graham Binns, Francesco Banconi, #914279)
+
+* ``ThreadsafeForwardingResult`` no longer includes semaphore acquisition time
+ in the test duration (for implicitly timed test runs).
+ (Robert Collins, #914362)
+
+0.9.12
+~~~~~~
+
+This is a very big release. We've made huge improvements on three fronts:
+ 1. Test failures are way nicer and easier to read
+ 2. Matchers and ``assertThat`` are much more convenient to use
+ 3. Correct handling of extended unicode characters
+
+We've trimmed off the fat from the stack trace you get when tests fail, we've
+cut out the bits of error messages that just didn't help, we've made it easier
+to annotate mismatch failures, to compare complex objects and to match raised
+exceptions.
+
+Testing code was never this fun.
+
+Changes
+-------
+
+* ``AfterPreproccessing`` renamed to ``AfterPreprocessing``, which is a more
+ correct spelling. Old name preserved for backwards compatibility, but is
+ now deprecated. Please stop using it.
+ (Jonathan Lange, #813460)
+
+* ``assertThat`` raises ``MismatchError`` instead of
+ ``TestCase.failureException``. ``MismatchError`` is a subclass of
+ ``AssertionError``, so in most cases this change will not matter. However,
+ if ``self.failureException`` has been set to a non-default value, then
+ mismatches will become test errors rather than test failures.
+
+* ``gather_details`` takes two dicts, rather than two detailed objects.
+ (Jonathan Lange, #801027)
+
+* ``MatchesRegex`` mismatch now says "<value> does not match /<regex>/" rather
+ than "<regex> did not match <value>". The regular expression contains fewer
+ backslashes too. (Jonathan Lange, #818079)
+
+* Tests that run with ``AsynchronousDeferredRunTest`` now have the ``reactor``
+ attribute set to the running reactor. (Jonathan Lange, #720749)
+
+Improvements
+------------
+
+* All public matchers are now in ``testtools.matchers.__all__``.
+ (Jonathan Lange, #784859)
+
+* ``assertThat`` can actually display mismatches and matchers that contain
+ extended unicode characters. (Jonathan Lange, Martin [gz], #804127)
+
+* ``assertThat`` output is much less verbose, displaying only what the mismatch
+ tells us to display. Old-style verbose output can be had by passing
+ ``verbose=True`` to assertThat. (Jonathan Lange, #675323, #593190)
+
+* ``assertThat`` accepts a message which will be used to annotate the matcher.
+ This can be given as a third parameter or as a keyword parameter.
+ (Robert Collins)
+
+* Automated the Launchpad part of the release process.
+ (Jonathan Lange, #623486)
+
+* Correctly display non-ASCII unicode output on terminals that claim to have a
+ unicode encoding. (Martin [gz], #804122)
+
+* ``DocTestMatches`` correctly handles unicode output from examples, rather
+ than raising an error. (Martin [gz], #764170)
+
+* ``ErrorHolder`` and ``PlaceHolder`` added to docs. (Jonathan Lange, #816597)
+
+* ``ExpectedException`` now matches any exception of the given type by
+ default, and also allows specifying a ``Matcher`` rather than a mere regular
+ expression. (Jonathan Lange, #791889)
+
+* ``FixtureSuite`` added, allows test suites to run with a given fixture.
+ (Jonathan Lange)
+
+* Hide testtools's own stack frames when displaying tracebacks, making it
+ easier for test authors to focus on their errors.
+ (Jonathan Lange, Martin [gz], #788974)
+
+* Less boilerplate displayed in test failures and errors.
+ (Jonathan Lange, #660852)
+
+* ``MatchesException`` now allows you to match exceptions against any matcher,
+ rather than just regular expressions. (Jonathan Lange, #791889)
+
+* ``MatchesException`` now permits a tuple of types rather than a single type
+ (when using the type matching mode). (Robert Collins)
+
+* ``MatchesStructure.byEquality`` added to make the common case of matching
+ many attributes by equality much easier. ``MatchesStructure.byMatcher``
+ added in case folk want to match by things other than equality.
+ (Jonathan Lange)
+
+* New convenience assertions, ``assertIsNone`` and ``assertIsNotNone``.
+ (Christian Kampka)
+
+* New matchers:
+
+ * ``AllMatch`` matches many values against a single matcher.
+ (Jonathan Lange, #615108)
+
+ * ``Contains``. (Robert Collins)
+
+ * ``GreaterThan``. (Christian Kampka)
+
+* New helper, ``safe_hasattr`` added. (Jonathan Lange)
+
+* ``reraise`` added to ``testtools.compat``. (Jonathan Lange)
+
+
+0.9.11
+~~~~~~
+
+This release brings consistent use of super for better compatibility with
+multiple inheritance, fixed Python3 support, improvements in fixture and mather
+outputs and a compat helper for testing libraries that deal with bytestrings.
+
+Changes
+-------
+
+* ``TestCase`` now uses super to call base ``unittest.TestCase`` constructor,
+ ``setUp`` and ``tearDown``. (Tim Cole, #771508)
+
+* If, when calling ``useFixture`` an error occurs during fixture set up, we
+ still attempt to gather details from the fixture. (Gavin Panella)
+
+
+Improvements
+------------
+
+* Additional compat helper for ``BytesIO`` for libraries that build on
+ testtools and are working on Python 3 porting. (Robert Collins)
+
+* Corrected documentation for ``MatchesStructure`` in the test authors
+ document. (Jonathan Lange)
+
+* ``LessThan`` error message now says something that is logically correct.
+ (Gavin Panella, #762008)
+
+* Multiple details from a single fixture are now kept separate, rather than
+ being mooshed together. (Gavin Panella, #788182)
+
+* Python 3 support now back in action. (Martin [gz], #688729)
+
+* ``try_import`` and ``try_imports`` have a callback that is called whenever
+ they fail to import a module. (Martin Pool)
+
+
+0.9.10
+~~~~~~
+
+The last release of testtools could not be easy_installed. This is considered
+severe enough for a re-release.
+
+Improvements
+------------
+
+* Include ``doc/`` in the source distribution, making testtools installable
+ from PyPI again (Tres Seaver, #757439)
+
+
+0.9.9
+~~~~~
+
+Many, many new matchers, vastly expanded documentation, stacks of bug fixes,
+better unittest2 integration. If you've ever wanted to try out testtools but
+been afraid to do so, this is the release to try.
+
+
+Changes
+-------
+
+* The timestamps generated by ``TestResult`` objects when no timing data has
+ been received are now datetime-with-timezone, which allows them to be
+ sensibly serialised and transported. (Robert Collins, #692297)
+
+Improvements
+------------
+
+* ``AnnotatedMismatch`` now correctly returns details.
+ (Jonathan Lange, #724691)
+
+* distutils integration for the testtools test runner. Can now use it for
+ 'python setup.py test'. (Christian Kampka, #693773)
+
+* ``EndsWith`` and ``KeysEqual`` now in testtools.matchers.__all__.
+ (Jonathan Lange, #692158)
+
+* ``MatchesException`` extended to support a regular expression check against
+ the str() of a raised exception. (Jonathan Lange)
+
+* ``MultiTestResult`` now forwards the ``time`` API. (Robert Collins, #692294)
+
+* ``MultiTestResult`` now documented in the manual. (Jonathan Lange, #661116)
+
+* New content helpers ``content_from_file``, ``content_from_stream`` and
+ ``attach_file`` make it easier to attach file-like objects to a
+ test. (Jonathan Lange, Robert Collins, #694126)
+
+* New ``ExpectedException`` context manager to help write tests against things
+ that are expected to raise exceptions. (Aaron Bentley)
+
+* New matchers:
+
+ * ``MatchesListwise`` matches an iterable of matchers against an iterable
+ of values. (Michael Hudson-Doyle)
+
+ * ``MatchesRegex`` matches a string against a regular expression.
+ (Michael Hudson-Doyle)
+
+ * ``MatchesStructure`` matches attributes of an object against given
+ matchers. (Michael Hudson-Doyle)
+
+ * ``AfterPreproccessing`` matches values against a matcher after passing them
+ through a callable. (Michael Hudson-Doyle)
+
+ * ``MatchesSetwise`` matches an iterable of matchers against an iterable of
+ values, without regard to order. (Michael Hudson-Doyle)
+
+* ``setup.py`` can now build a snapshot when Bazaar is installed but the tree
+ is not a Bazaar tree. (Jelmer Vernooij)
+
+* Support for running tests using distutils (Christian Kampka, #726539)
+
+* Vastly improved and extended documentation. (Jonathan Lange)
+
+* Use unittest2 exception classes if available. (Jelmer Vernooij)
+
+
+0.9.8
+~~~~~
+
+In this release we bring some very interesting improvements:
+
+* new matchers for exceptions, sets, lists, dicts and more.
+
+* experimental (works but the contract isn't supported) twisted reactor
+ support.
+
+* The built in runner can now list tests and filter tests (the -l and
+ --load-list options).
+
+Changes
+-------
+
+* addUnexpectedSuccess is translated to addFailure for test results that don't
+ know about addUnexpectedSuccess. Further, it fails the entire result for
+ all testtools TestResults (i.e. wasSuccessful() returns False after
+ addUnexpectedSuccess has been called). Note that when using a delegating
+ result such as ThreadsafeForwardingResult, MultiTestResult or
+ ExtendedToOriginalDecorator then the behaviour of addUnexpectedSuccess is
+ determined by the delegated to result(s).
+ (Jonathan Lange, Robert Collins, #654474, #683332)
+
+* startTestRun will reset any errors on the result. That is, wasSuccessful()
+ will always return True immediately after startTestRun() is called. This
+ only applies to delegated test results (ThreadsafeForwardingResult,
+ MultiTestResult and ExtendedToOriginalDecorator) if the delegated to result
+ is a testtools test result - we cannot reliably reset the state of unknown
+ test result class instances. (Jonathan Lange, Robert Collins, #683332)
+
+* Responsibility for running test cleanups has been moved to ``RunTest``.
+ This change does not affect public APIs and can be safely ignored by test
+ authors. (Jonathan Lange, #662647)
+
+Improvements
+------------
+
+* New matchers:
+
+ * ``EndsWith`` which complements the existing ``StartsWith`` matcher.
+ (Jonathan Lange, #669165)
+
+ * ``MatchesException`` matches an exception class and parameters. (Robert
+ Collins)
+
+ * ``KeysEqual`` matches a dictionary with particular keys. (Jonathan Lange)
+
+* ``assertIsInstance`` supports a custom error message to be supplied, which
+ is necessary when using ``assertDictEqual`` on Python 2.7 with a
+ ``testtools.TestCase`` base class. (Jelmer Vernooij)
+
+* Experimental support for running tests that return Deferreds.
+ (Jonathan Lange, Martin [gz])
+
+* Provide a per-test decorator, run_test_with, to specify which RunTest
+ object to use for a given test. (Jonathan Lange, #657780)
+
+* Fix the runTest parameter of TestCase to actually work, rather than raising
+ a TypeError. (Jonathan Lange, #657760)
+
+* Non-release snapshots of testtools will now work with buildout.
+ (Jonathan Lange, #613734)
+
+* Malformed SyntaxErrors no longer blow up the test suite. (Martin [gz])
+
+* ``MismatchesAll.describe`` no longer appends a trailing newline.
+ (Michael Hudson-Doyle, #686790)
+
+* New helpers for conditionally importing modules, ``try_import`` and
+ ``try_imports``. (Jonathan Lange)
+
+* ``Raises`` added to the ``testtools.matchers`` module - matches if the
+ supplied callable raises, and delegates to an optional matcher for validation
+ of the exception. (Robert Collins)
+
+* ``raises`` added to the ``testtools.matchers`` module - matches if the
+ supplied callable raises and delegates to ``MatchesException`` to validate
+ the exception. (Jonathan Lange)
+
+* Tests will now pass on Python 2.6.4 : an ``Exception`` change made only in
+ 2.6.4 and reverted in Python 2.6.5 was causing test failures on that version.
+ (Martin [gz], #689858).
+
+* ``testtools.TestCase.useFixture`` has been added to glue with fixtures nicely.
+ (Robert Collins)
+
+* ``testtools.run`` now supports ``-l`` to list tests rather than executing
+ them. This is useful for integration with external test analysis/processing
+ tools like subunit and testrepository. (Robert Collins)
+
+* ``testtools.run`` now supports ``--load-list``, which takes a file containing
+ test ids, one per line, and intersects those ids with the tests found. This
+ allows fine grained control of what tests are run even when the tests cannot
+ be named as objects to import (e.g. due to test parameterisation via
+ testscenarios). (Robert Collins)
+
+* Update documentation to say how to use testtools.run() on Python 2.4.
+ (Jonathan Lange, #501174)
+
+* ``text_content`` conveniently converts a Python string to a Content object.
+ (Jonathan Lange, James Westby)
+
+
+
+0.9.7
+~~~~~
+
+Lots of little cleanups in this release; many small improvements to make your
+testing life more pleasant.
+
+Improvements
+------------
+
+* Cleanups can raise ``testtools.MultipleExceptions`` if they have multiple
+ exceptions to report. For instance, a cleanup which is itself responsible for
+ running several different internal cleanup routines might use this.
+
+* Code duplication between assertEqual and the matcher Equals has been removed.
+
+* In normal circumstances, a TestCase will no longer share details with clones
+ of itself. (Andrew Bennetts, bug #637725)
+
+* Less exception object cycles are generated (reduces peak memory use between
+ garbage collection). (Martin [gz])
+
+* New matchers 'DoesNotStartWith' and 'StartsWith' contributed by Canonical
+ from the Launchpad project. Written by James Westby.
+
+* Timestamps as produced by subunit protocol clients are now forwarded in the
+ ThreadsafeForwardingResult so correct test durations can be reported.
+ (Martin [gz], Robert Collins, #625594)
+
+* With unittest from Python 2.7 skipped tests will now show only the reason
+ rather than a serialisation of all details. (Martin [gz], #625583)
+
+* The testtools release process is now a little better documented and a little
+ smoother. (Jonathan Lange, #623483, #623487)
+
+
+0.9.6
+~~~~~
+
+Nothing major in this release, just enough small bits and pieces to make it
+useful enough to upgrade to.
+
+In particular, a serious bug in assertThat() has been fixed, it's easier to
+write Matchers, there's a TestCase.patch() method for those inevitable monkey
+patches and TestCase.assertEqual gives slightly nicer errors.
+
+Improvements
+------------
+
+* 'TestCase.assertEqual' now formats errors a little more nicely, in the
+ style of bzrlib.
+
+* Added `PlaceHolder` and `ErrorHolder`, TestCase-like objects that can be
+ used to add results to a `TestResult`.
+
+* 'Mismatch' now takes optional description and details parameters, so
+ custom Matchers aren't compelled to make their own subclass.
+
+* jml added a built-in UTF8_TEXT ContentType to make it slightly easier to
+ add details to test results. See bug #520044.
+
+* Fix a bug in our built-in matchers where assertThat would blow up if any
+ of them failed. All built-in mismatch objects now provide get_details().
+
+* New 'Is' matcher, which lets you assert that a thing is identical to
+ another thing.
+
+* New 'LessThan' matcher which lets you assert that a thing is less than
+ another thing.
+
+* TestCase now has a 'patch()' method to make it easier to monkey-patching
+ objects in tests. See the manual for more information. Fixes bug #310770.
+
+* MultiTestResult methods now pass back return values from the results it
+ forwards to.
+
+0.9.5
+~~~~~
+
+This release fixes some obscure traceback formatting issues that probably
+weren't affecting you but were certainly breaking our own test suite.
+
+Changes
+-------
+
+* Jamu Kakar has updated classes in testtools.matchers and testtools.runtest
+ to be new-style classes, fixing bug #611273.
+
+Improvements
+------------
+
+* Martin[gz] fixed traceback handling to handle cases where extract_tb returns
+ a source line of None. Fixes bug #611307.
+
+* Martin[gz] fixed an unicode issue that was causing the tests to fail,
+ closing bug #604187.
+
+* testtools now handles string exceptions (although why would you want to use
+ them?) and formats their tracebacks correctly. Thanks to Martin[gz] for
+ fixing bug #592262.
+
+0.9.4
+~~~~~
+
+This release overhauls the traceback formatting layer to deal with Python 2
+line numbers and traceback objects often being local user encoded strings
+rather than unicode objects. Test discovery has also been added and Python 3.1
+is also supported. Finally, the Mismatch protocol has been extended to let
+Matchers collaborate with tests in supplying detailed data about failures.
+
+Changes
+-------
+
+* testtools.utils has been renamed to testtools.compat. Importing
+ testtools.utils will now generate a deprecation warning.
+
+Improvements
+------------
+
+* Add machinery for Python 2 to create unicode tracebacks like those used by
+ Python 3. This means testtools no longer throws on encountering non-ascii
+ filenames, source lines, or exception strings when displaying test results.
+ Largely contributed by Martin[gz] with some tweaks from Robert Collins.
+
+* James Westby has supplied test discovery support using the Python 2.7
+ TestRunner in testtools.run. This requires the 'discover' module. This
+ closes bug #250764.
+
+* Python 3.1 is now supported, thanks to Martin[gz] for a partial patch.
+ This fixes bug #592375.
+
+* TestCase.addCleanup has had its docstring corrected about when cleanups run.
+
+* TestCase.skip is now deprecated in favour of TestCase.skipTest, which is the
+ Python2.7 spelling for skip. This closes bug #560436.
+
+* Tests work on IronPython patch from Martin[gz] applied.
+
+* Thanks to a patch from James Westby testtools.matchers.Mismatch can now
+ supply a get_details method, which assertThat will query to provide
+ additional attachments. This can be used to provide additional detail
+ about the mismatch that doesn't suite being included in describe(). For
+ instance, if the match process was complex, a log of the process could be
+ included, permitting debugging.
+
+* testtools.testresults.real._StringException will now answer __str__ if its
+ value is unicode by encoding with UTF8, and vice versa to answer __unicode__.
+ This permits subunit decoded exceptions to contain unicode and still format
+ correctly.
+
+0.9.3
+~~~~~
+
+More matchers, Python 2.4 support, faster test cloning by switching to copy
+rather than deepcopy and better output when exceptions occur in cleanups are
+the defining characteristics of this release.
+
+Improvements
+------------
+
+* New matcher "Annotate" that adds a simple string message to another matcher,
+ much like the option 'message' parameter to standard library assertFoo
+ methods.
+
+* New matchers "Not" and "MatchesAll". "Not" will invert another matcher, and
+ "MatchesAll" that needs a successful match for all of its arguments.
+
+* On Python 2.4, where types.FunctionType cannot be deepcopied, testtools will
+ now monkeypatch copy._deepcopy_dispatch using the same trivial patch that
+ added such support to Python 2.5. The monkey patch is triggered by the
+ absence of FunctionType from the dispatch dict rather than a version check.
+ Bug #498030.
+
+* On windows the test 'test_now_datetime_now' should now work reliably.
+
+* TestCase.getUniqueInteger and TestCase.getUniqueString now have docstrings.
+
+* TestCase.getUniqueString now takes an optional prefix parameter, so you can
+ now use it in circumstances that forbid strings with '.'s, and such like.
+
+* testtools.testcase.clone_test_with_new_id now uses copy.copy, rather than
+ copy.deepcopy. Tests that need a deeper copy should use the copy protocol to
+ control how they are copied. Bug #498869.
+
+* The backtrace test result output tests should now pass on windows and other
+ systems where os.sep is not '/'.
+
+* When a cleanUp or tearDown exception occurs, it is now accumulated as a new
+ traceback in the test details, rather than as a separate call to addError /
+ addException. This makes testtools work better with most TestResult objects
+ and fixes bug #335816.
+
+
+0.9.2
+~~~~~
+
+Python 3 support, more matchers and better consistency with Python 2.7 --
+you'd think that would be enough for a point release. Well, we here on the
+testtools project think that you deserve more.
+
+We've added a hook so that user code can be called just-in-time whenever there
+is an exception, and we've also factored out the "run" logic of test cases so
+that new outcomes can be added without fiddling with the actual flow of logic.
+
+It might sound like small potatoes, but it's changes like these that will
+bring about the end of test frameworks.
+
+
+Improvements
+------------
+
+* A failure in setUp and tearDown now report as failures not as errors.
+
+* Cleanups now run after tearDown to be consistent with Python 2.7's cleanup
+ feature.
+
+* ExtendedToOriginalDecorator now passes unrecognised attributes through
+ to the decorated result object, permitting other extensions to the
+ TestCase -> TestResult protocol to work.
+
+* It is now possible to trigger code just-in-time after an exception causes
+ a test outcome such as failure or skip. See the testtools MANUAL or
+ ``pydoc testtools.TestCase.addOnException``. (bug #469092)
+
+* New matcher Equals which performs a simple equality test.
+
+* New matcher MatchesAny which looks for a match of any of its arguments.
+
+* TestCase no longer breaks if a TestSkipped exception is raised with no
+ parameters.
+
+* TestCase.run now clones test cases before they are run and runs the clone.
+ This reduces memory footprint in large test runs - state accumulated on
+ test objects during their setup and execution gets freed when test case
+ has finished running unless the TestResult object keeps a reference.
+ NOTE: As test cloning uses deepcopy, this can potentially interfere if
+ a test suite has shared state (such as the testscenarios or testresources
+ projects use). Use the __deepcopy__ hook to control the copying of such
+ objects so that the shared references stay shared.
+
+* Testtools now accepts contributions without copyright assignment under some
+ circumstances. See HACKING for details.
+
+* Testtools now provides a convenient way to run a test suite using the
+ testtools result object: python -m testtools.run testspec [testspec...].
+
+* Testtools now works on Python 3, thanks to Benjamin Peterson.
+
+* Test execution now uses a separate class, testtools.RunTest to run single
+ tests. This can be customised and extended in a more consistent fashion than
+ the previous run method idiom. See pydoc for more information.
+
+* The test doubles that testtools itself uses are now available as part of
+ the testtools API in testtols.testresult.doubles.
+
+* TracebackContent now sets utf8 as the charset encoding, rather than not
+ setting one and encoding with the default encoder.
+
+* With python2.7 testtools.TestSkipped will be the unittest.case.SkipTest
+ exception class making skips compatible with code that manually raises the
+ standard library exception. (bug #490109)
+
+Changes
+-------
+
+* TestCase.getUniqueInteger is now implemented using itertools.count. Thanks
+ to Benjamin Peterson for the patch. (bug #490111)
+
+
+0.9.1
+~~~~~
+
+The new matcher API introduced in 0.9.0 had a small flaw where the matchee
+would be evaluated twice to get a description of the mismatch. This could lead
+to bugs if the act of matching caused side effects to occur in the matchee.
+Since having such side effects isn't desirable, we have changed the API now
+before it has become widespread.
+
+Changes
+-------
+
+* Matcher API changed to avoid evaluating matchee twice. Please consult
+ the API documentation.
+
+* TestCase.getUniqueString now uses the test id, not the test method name,
+ which works nicer with parameterised tests.
+
+Improvements
+------------
+
+* Python2.4 is now supported again.
+
+
+0.9.0
+~~~~~
+
+This release of testtools is perhaps the most interesting and exciting one
+it's ever had. We've continued in bringing together the best practices of unit
+testing from across a raft of different Python projects, but we've also
+extended our mission to incorporating unit testing concepts from other
+languages and from our own research, led by Robert Collins.
+
+We now support skipping and expected failures. We'll make sure that you
+up-call setUp and tearDown, avoiding unexpected testing weirdnesses. We're
+now compatible with Python 2.5, 2.6 and 2.7 unittest library.
+
+All in all, if you are serious about unit testing and want to get the best
+thinking from the whole Python community, you should get this release.
+
+Improvements
+------------
+
+* A new TestResult API has been added for attaching details to test outcomes.
+ This API is currently experimental, but is being prepared with the intent
+ of becoming an upstream Python API. For more details see pydoc
+ testtools.TestResult and the TestCase addDetail / getDetails methods.
+
+* assertThat has been added to TestCase. This new assertion supports
+ a hamcrest-inspired matching protocol. See pydoc testtools.Matcher for
+ details about writing matchers, and testtools.matchers for the included
+ matchers. See http://code.google.com/p/hamcrest/.
+
+* Compatible with Python 2.6 and Python 2.7
+
+* Failing to upcall in setUp or tearDown will now cause a test failure.
+ While the base methods do nothing, failing to upcall is usually a problem
+ in deeper hierarchies, and checking that the root method is called is a
+ simple way to catch this common bug.
+
+* New TestResult decorator ExtendedToOriginalDecorator which handles
+ downgrading extended API calls like addSkip to older result objects that
+ do not support them. This is used internally to make testtools simpler but
+ can also be used to simplify other code built on or for use with testtools.
+
+* New TextTestResult supporting the extended APIs that testtools provides.
+
+* Nose will no longer find 'runTest' tests in classes derived from
+ testtools.testcase.TestCase (bug #312257).
+
+* Supports the Python 2.7/3.1 addUnexpectedSuccess and addExpectedFailure
+ TestResult methods, with a support function 'knownFailure' to let tests
+ trigger these outcomes.
+
+* When using the skip feature with TestResult objects that do not support it
+ a test success will now be reported. Previously an error was reported but
+ production experience has shown that this is too disruptive for projects that
+ are using skips: they cannot get a clean run on down-level result objects.
+
+
+.. _testtools: http://pypi.python.org/pypi/testtools
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/PKG-INFO b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/PKG-INFO
new file mode 100644
index 00000000000..8bb756609df
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/PKG-INFO
@@ -0,0 +1,113 @@
+Metadata-Version: 1.1
+Name: testtools
+Version: 0.9.34
+Summary: Extensions to the Python standard library unit testing framework
+Home-page: https://github.com/testing-cabal/testtools
+Author: Jonathan M. Lange
+Author-email: jml+testtools@mumak.net
+License: UNKNOWN
+Description: ======================================
+ testtools: tasteful testing for Python
+ ======================================
+
+ testtools is a set of extensions to the Python standard library's unit testing
+ framework. These extensions have been derived from many years of experience
+ with unit testing in Python and come from many different sources. testtools
+ supports Python versions all the way back to Python 2.6.
+
+ What better way to start than with a contrived code snippet?::
+
+ from testtools import TestCase
+ from testtools.content import Content
+ from testtools.content_type import UTF8_TEXT
+ from testtools.matchers import Equals
+
+ from myproject import SillySquareServer
+
+ class TestSillySquareServer(TestCase):
+
+ def setUp(self):
+ super(TestSillySquare, self).setUp()
+ self.server = self.useFixture(SillySquareServer())
+ self.addCleanup(self.attach_log_file)
+
+ def attach_log_file(self):
+ self.addDetail(
+ 'log-file',
+ Content(UTF8_TEXT
+ lambda: open(self.server.logfile, 'r').readlines()))
+
+ def test_server_is_cool(self):
+ self.assertThat(self.server.temperature, Equals("cool"))
+
+ def test_square(self):
+ self.assertThat(self.server.silly_square_of(7), Equals(49))
+
+
+ Why use testtools?
+ ==================
+
+ Better assertion methods
+ ------------------------
+
+ The standard assertion methods that come with unittest aren't as helpful as
+ they could be, and there aren't quite enough of them. testtools adds
+ ``assertIn``, ``assertIs``, ``assertIsInstance`` and their negatives.
+
+
+ Matchers: better than assertion methods
+ ---------------------------------------
+
+ Of course, in any serious project you want to be able to have assertions that
+ are specific to that project and the particular problem that it is addressing.
+ Rather than forcing you to define your own assertion methods and maintain your
+ own inheritance hierarchy of ``TestCase`` classes, testtools lets you write
+ your own "matchers", custom predicates that can be plugged into a unit test::
+
+ def test_response_has_bold(self):
+ # The response has bold text.
+ response = self.server.getResponse()
+ self.assertThat(response, HTMLContains(Tag('bold', 'b')))
+
+
+ More debugging info, when you need it
+ --------------------------------------
+
+ testtools makes it easy to add arbitrary data to your test result. If you
+ want to know what's in a log file when a test fails, or what the load was on
+ the computer when a test started, or what files were open, you can add that
+ information with ``TestCase.addDetail``, and it will appear in the test
+ results if that test fails.
+
+
+ Extend unittest, but stay compatible and re-usable
+ --------------------------------------------------
+
+ testtools goes to great lengths to allow serious test authors and test
+ *framework* authors to do whatever they like with their tests and their
+ extensions while staying compatible with the standard library's unittest.
+
+ testtools has completely parametrized how exceptions raised in tests are
+ mapped to ``TestResult`` methods and how tests are actually executed (ever
+ wanted ``tearDown`` to be called regardless of whether ``setUp`` succeeds?)
+
+ It also provides many simple but handy utilities, like the ability to clone a
+ test, a ``MultiTestResult`` object that lets many result objects get the
+ results from one test suite, adapters to bring legacy ``TestResult`` objects
+ into our new golden age.
+
+
+ Cross-Python compatibility
+ --------------------------
+
+ testtools gives you the very latest in unit testing technology in a way that
+ will work with Python 2.6, 2.7, 3.1 and 3.2.
+
+ If you wish to use testtools with Python 2.4 or 2.5, then please use testtools
+ 0.9.15. Up to then we supported Python 2.4 and 2.5, but we found the
+ constraints involved in not using the newer language features onerous as we
+ added more support for versions post Python 3.
+
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/README.rst b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/README.rst
new file mode 100644
index 00000000000..cddb5942e18
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/README.rst
@@ -0,0 +1,92 @@
+=========
+testtools
+=========
+
+testtools is a set of extensions to the Python standard library's unit testing
+framework.
+
+These extensions have been derived from years of experience with unit testing
+in Python and come from many different sources.
+
+
+Documentation
+-------------
+
+If you would like to learn more about testtools, consult our documentation in
+the 'doc/' directory. You might like to start at 'doc/overview.rst' or
+'doc/for-test-authors.rst'.
+
+
+Licensing
+---------
+
+This project is distributed under the MIT license and copyright is owned by
+Jonathan M. Lange and the testtools authors. See LICENSE for details.
+
+Some code in 'testtools/run.py' is taken from Python's unittest module, and is
+copyright Steve Purcell and the Python Software Foundation, it is distributed
+under the same license as Python, see LICENSE for details.
+
+
+Required Dependencies
+---------------------
+
+ * Python 2.6+ or 3.0+
+
+If you would like to use testtools for earlier Python's, please use testtools
+0.9.15.
+
+ * extras (helpers that we intend to push into Python itself in the near
+ future).
+
+
+Optional Dependencies
+---------------------
+
+If you would like to use our undocumented, unsupported Twisted support, then
+you will need Twisted.
+
+If you want to use ``fixtures`` then you can either install fixtures (e.g. from
+https://launchpad.net/python-fixtures or http://pypi.python.org/pypi/fixtures)
+or alternatively just make sure your fixture objects obey the same protocol.
+
+
+Bug reports and patches
+-----------------------
+
+Please report bugs using Launchpad at <https://bugs.launchpad.net/testtools>.
+Patches should be submitted as Github pull requests, or mailed to the authors.
+See ``doc/hacking.rst`` for more details.
+
+There's no mailing list for this project yet, however the testing-in-python
+mailing list may be a useful resource:
+
+ * Address: testing-in-python@lists.idyll.org
+ * Subscription link: http://lists.idyll.org/listinfo/testing-in-python
+
+
+History
+-------
+
+testtools used to be called 'pyunit3k'. The name was changed to avoid
+conflating the library with the Python 3.0 release (commonly referred to as
+'py3k').
+
+
+Thanks
+------
+
+ * Canonical Ltd
+ * Bazaar
+ * Twisted Matrix Labs
+ * Robert Collins
+ * Andrew Bennetts
+ * Benjamin Peterson
+ * Jamu Kakar
+ * James Westby
+ * Martin [gz]
+ * Michael Hudson-Doyle
+ * Aaron Bentley
+ * Christian Kampka
+ * Gavin Panella
+ * Martin Pool
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/Makefile b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/Makefile
new file mode 100644
index 00000000000..b5d07af57f2
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/Makefile
@@ -0,0 +1,89 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/testtools.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/testtools.qhc"
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/_static/placeholder.txt b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/_static/placeholder.txt
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/_static/placeholder.txt
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/_templates/placeholder.txt b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/_templates/placeholder.txt
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/_templates/placeholder.txt
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/conf.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/conf.py
new file mode 100644
index 00000000000..de5fdd4224e
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/conf.py
@@ -0,0 +1,194 @@
+# -*- coding: utf-8 -*-
+#
+# testtools documentation build configuration file, created by
+# sphinx-quickstart on Sun Nov 28 13:45:40 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'testtools'
+copyright = u'2010, The testtools authors'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = 'VERSION'
+# The full version, including alpha/beta/rc tags.
+release = 'VERSION'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'testtoolsdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'testtools.tex', u'testtools Documentation',
+ u'The testtools authors', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/for-framework-folk.rst b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/for-framework-folk.rst
new file mode 100644
index 00000000000..d105b4f04e5
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/for-framework-folk.rst
@@ -0,0 +1,454 @@
+============================
+testtools for framework folk
+============================
+
+Introduction
+============
+
+In addition to having many features :doc:`for test authors
+<for-test-authors>`, testtools also has many bits and pieces that are useful
+for folk who write testing frameworks.
+
+If you are the author of a test runner, are working on a very large
+unit-tested project, are trying to get one testing framework to play nicely
+with another or are hacking away at getting your test suite to run in parallel
+over a heterogenous cluster of machines, this guide is for you.
+
+This manual is a summary. You can get details by consulting the `testtools
+API docs`_.
+
+
+Extensions to TestCase
+======================
+
+In addition to the ``TestCase`` specific methods, we have extensions for
+``TestSuite`` that also apply to ``TestCase`` (because ``TestCase`` and
+``TestSuite`` follow the Composite pattern).
+
+Custom exception handling
+-------------------------
+
+testtools provides a way to control how test exceptions are handled. To do
+this, add a new exception to ``self.exception_handlers`` on a
+``testtools.TestCase``. For example::
+
+ >>> self.exception_handlers.insert(-1, (ExceptionClass, handler)).
+
+Having done this, if any of ``setUp``, ``tearDown``, or the test method raise
+``ExceptionClass``, ``handler`` will be called with the test case, test result
+and the raised exception.
+
+Use this if you want to add a new kind of test result, that is, if you think
+that ``addError``, ``addFailure`` and so forth are not enough for your needs.
+
+
+Controlling test execution
+--------------------------
+
+If you want to control more than just how exceptions are raised, you can
+provide a custom ``RunTest`` to a ``TestCase``. The ``RunTest`` object can
+change everything about how the test executes.
+
+To work with ``testtools.TestCase``, a ``RunTest`` must have a factory that
+takes a test and an optional list of exception handlers. Instances returned
+by the factory must have a ``run()`` method that takes an optional ``TestResult``
+object.
+
+The default is ``testtools.runtest.RunTest``, which calls ``setUp``, the test
+method, ``tearDown`` and clean ups (see :ref:`addCleanup`) in the normal, vanilla
+way that Python's standard unittest_ does.
+
+To specify a ``RunTest`` for all the tests in a ``TestCase`` class, do something
+like this::
+
+ class SomeTests(TestCase):
+ run_tests_with = CustomRunTestFactory
+
+To specify a ``RunTest`` for a specific test in a ``TestCase`` class, do::
+
+ class SomeTests(TestCase):
+ @run_test_with(CustomRunTestFactory, extra_arg=42, foo='whatever')
+ def test_something(self):
+ pass
+
+In addition, either of these can be overridden by passing a factory in to the
+``TestCase`` constructor with the optional ``runTest`` argument.
+
+
+Test renaming
+-------------
+
+``testtools.clone_test_with_new_id`` is a function to copy a test case
+instance to one with a new name. This is helpful for implementing test
+parameterization.
+
+.. _force_failure:
+
+Delayed Test Failure
+--------------------
+
+Setting the ``testtools.TestCase.force_failure`` instance variable to True will
+cause ``testtools.RunTest`` to fail the test case after the test has finished.
+This is useful when you want to cause a test to fail, but don't want to
+prevent the remainder of the test code from being executed.
+
+Test placeholders
+=================
+
+Sometimes, it's useful to be able to add things to a test suite that are not
+actually tests. For example, you might wish to represents import failures
+that occur during test discovery as tests, so that your test result object
+doesn't have to do special work to handle them nicely.
+
+testtools provides two such objects, called "placeholders": ``PlaceHolder``
+and ``ErrorHolder``. ``PlaceHolder`` takes a test id and an optional
+description. When it's run, it succeeds. ``ErrorHolder`` takes a test id,
+and error and an optional short description. When it's run, it reports that
+error.
+
+These placeholders are best used to log events that occur outside the test
+suite proper, but are still very relevant to its results.
+
+e.g.::
+
+ >>> suite = TestSuite()
+ >>> suite.add(PlaceHolder('I record an event'))
+ >>> suite.run(TextTestResult(verbose=True))
+ I record an event [OK]
+
+
+Test instance decorators
+========================
+
+DecorateTestCaseResult
+----------------------
+
+This object calls out to your code when ``run`` / ``__call__`` are called and
+allows the result object that will be used to run the test to be altered. This
+is very useful when working with a test runner that doesn't know your test case
+requirements. For instance, it can be used to inject a ``unittest2`` compatible
+adapter when someone attempts to run your test suite with a ``TestResult`` that
+does not support ``addSkip`` or other ``unittest2`` methods. Similarly it can
+aid the migration to ``StreamResult``.
+
+e.g.::
+
+ >>> suite = TestSuite()
+ >>> suite = DecorateTestCaseResult(suite, ExtendedToOriginalDecorator)
+
+Extensions to TestResult
+========================
+
+StreamResult
+------------
+
+``StreamResult`` is a new API for dealing with test case progress that supports
+concurrent and distributed testing without the various issues that
+``TestResult`` has such as buffering in multiplexers.
+
+The design has several key principles:
+
+* Nothing that requires up-front knowledge of all tests.
+
+* Deal with tests running in concurrent environments, potentially distributed
+ across multiple processes (or even machines). This implies allowing multiple
+ tests to be active at once, supplying time explicitly, being able to
+ differentiate between tests running in different contexts and removing any
+ assumption that tests are necessarily in the same process.
+
+* Make the API as simple as possible - each aspect should do one thing well.
+
+The ``TestResult`` API this is intended to replace has three different clients.
+
+* Each executing ``TestCase`` notifies the ``TestResult`` about activity.
+
+* The testrunner running tests uses the API to find out whether the test run
+ had errors, how many tests ran and so on.
+
+* Finally, each ``TestCase`` queries the ``TestResult`` to see whether the test
+ run should be aborted.
+
+With ``StreamResult`` we need to be able to provide a ``TestResult`` compatible
+adapter (``StreamToExtendedDecorator``) to allow incremental migration.
+However, we don't need to conflate things long term. So - we define three
+separate APIs, and merely mix them together to provide the
+``StreamToExtendedDecorator``. ``StreamResult`` is the first of these APIs -
+meeting the needs of ``TestCase`` clients. It handles events generated by
+running tests. See the API documentation for ``testtools.StreamResult`` for
+details.
+
+StreamSummary
+-------------
+
+Secondly we define the ``StreamSummary`` API which takes responsibility for
+collating errors, detecting incomplete tests and counting tests. This provides
+a compatible API with those aspects of ``TestResult``. Again, see the API
+documentation for ``testtools.StreamSummary``.
+
+TestControl
+-----------
+
+Lastly we define the ``TestControl`` API which is used to provide the
+``shouldStop`` and ``stop`` elements from ``TestResult``. Again, see the API
+documentation for ``testtools.TestControl``. ``TestControl`` can be paired with
+a ``StreamFailFast`` to trigger aborting a test run when a failure is observed.
+Aborting multiple workers in a distributed environment requires hooking
+whatever signalling mechanism the distributed environment has up to a
+``TestControl`` in each worker process.
+
+StreamTagger
+------------
+
+A ``StreamResult`` filter that adds or removes tags from events::
+
+ >>> from testtools import StreamTagger
+ >>> sink = StreamResult()
+ >>> result = StreamTagger([sink], set(['add']), set(['discard']))
+ >>> result.startTestRun()
+ >>> # Run tests against result here.
+ >>> result.stopTestRun()
+
+StreamToDict
+------------
+
+A simplified API for dealing with ``StreamResult`` streams. Each test is
+buffered until it completes and then reported as a trivial dict. This makes
+writing analysers very easy - you can ignore all the plumbing and just work
+with the result. e.g.::
+
+ >>> from testtools import StreamToDict
+ >>> def handle_test(test_dict):
+ ... print(test_dict['id'])
+ >>> result = StreamToDict(handle_test)
+ >>> result.startTestRun()
+ >>> # Run tests against result here.
+ >>> # At stopTestRun() any incomplete buffered tests are announced.
+ >>> result.stopTestRun()
+
+ExtendedToStreamDecorator
+-------------------------
+
+This is a hybrid object that combines both the ``Extended`` and ``Stream``
+``TestResult`` APIs into one class, but only emits ``StreamResult`` events.
+This is useful when a ``StreamResult`` stream is desired, but you cannot
+be sure that the tests which will run have been updated to the ``StreamResult``
+API.
+
+StreamToExtendedDecorator
+-------------------------
+
+This is a simple converter that emits the ``ExtendedTestResult`` API in
+response to events from the ``StreamResult`` API. Useful when outputting
+``StreamResult`` events from a ``TestCase`` but the supplied ``TestResult``
+does not support the ``status`` and ``file`` methods.
+
+StreamToQueue
+-------------
+
+This is a ``StreamResult`` decorator for reporting tests from multiple threads
+at once. Each method submits an event to a supplied Queue object as a simple
+dict. See ``ConcurrentStreamTestSuite`` for a convenient way to use this.
+
+TimestampingStreamResult
+------------------------
+
+This is a ``StreamResult`` decorator for adding timestamps to events that lack
+them. This allows writing the simplest possible generators of events and
+passing the events via this decorator to get timestamped data. As long as
+no buffering/queueing or blocking happen before the timestamper sees the event
+the timestamp will be as accurate as if the original event had it.
+
+StreamResultRouter
+------------------
+
+This is a ``StreamResult`` which forwards events to an arbitrary set of target
+``StreamResult`` objects. Events that have no forwarding rule are passed onto
+an fallback ``StreamResult`` for processing. The mapping can be changed at
+runtime, allowing great flexibility and responsiveness to changes. Because
+The mapping can change dynamically and there could be the same recipient for
+two different maps, ``startTestRun`` and ``stopTestRun`` handling is fine
+grained and up to the user.
+
+If no fallback has been supplied, an unroutable event will raise an exception.
+
+For instance::
+
+ >>> router = StreamResultRouter()
+ >>> sink = doubles.StreamResult()
+ >>> router.add_rule(sink, 'route_code_prefix', route_prefix='0',
+ ... consume_route=True)
+ >>> router.status(test_id='foo', route_code='0/1', test_status='uxsuccess')
+
+Would remove the ``0/`` from the route_code and forward the event like so::
+
+ >>> sink.status('test_id=foo', route_code='1', test_status='uxsuccess')
+
+See ``pydoc testtools.StreamResultRouter`` for details.
+
+TestResult.addSkip
+------------------
+
+This method is called on result objects when a test skips. The
+``testtools.TestResult`` class records skips in its ``skip_reasons`` instance
+dict. The can be reported on in much the same way as succesful tests.
+
+
+TestResult.time
+---------------
+
+This method controls the time used by a ``TestResult``, permitting accurate
+timing of test results gathered on different machines or in different threads.
+See pydoc testtools.TestResult.time for more details.
+
+
+ThreadsafeForwardingResult
+--------------------------
+
+A ``TestResult`` which forwards activity to another test result, but synchronises
+on a semaphore to ensure that all the activity for a single test arrives in a
+batch. This allows simple TestResults which do not expect concurrent test
+reporting to be fed the activity from multiple test threads, or processes.
+
+Note that when you provide multiple errors for a single test, the target sees
+each error as a distinct complete test.
+
+
+MultiTestResult
+---------------
+
+A test result that dispatches its events to many test results. Use this
+to combine multiple different test result objects into one test result object
+that can be passed to ``TestCase.run()`` or similar. For example::
+
+ a = TestResult()
+ b = TestResult()
+ combined = MultiTestResult(a, b)
+ combined.startTestRun() # Calls a.startTestRun() and b.startTestRun()
+
+Each of the methods on ``MultiTestResult`` will return a tuple of whatever the
+component test results return.
+
+
+TestResultDecorator
+-------------------
+
+Not strictly a ``TestResult``, but something that implements the extended
+``TestResult`` interface of testtools. It can be subclassed to create objects
+that wrap ``TestResults``.
+
+
+TextTestResult
+--------------
+
+A ``TestResult`` that provides a text UI very similar to the Python standard
+library UI. Key differences are that its supports the extended outcomes and
+details API, and is completely encapsulated into the result object, permitting
+it to be used without a 'TestRunner' object. Not all the Python 2.7 outcomes
+are displayed (yet). It is also a 'quiet' result with no dots or verbose mode.
+These limitations will be corrected soon.
+
+
+ExtendedToOriginalDecorator
+---------------------------
+
+Adapts legacy ``TestResult`` objects, such as those found in older Pythons, to
+meet the testtools ``TestResult`` API.
+
+
+Test Doubles
+------------
+
+In testtools.testresult.doubles there are three test doubles that testtools
+uses for its own testing: ``Python26TestResult``, ``Python27TestResult``,
+``ExtendedTestResult``. These TestResult objects implement a single variation of
+the TestResult API each, and log activity to a list ``self._events``. These are
+made available for the convenience of people writing their own extensions.
+
+
+startTestRun and stopTestRun
+----------------------------
+
+Python 2.7 added hooks ``startTestRun`` and ``stopTestRun`` which are called
+before and after the entire test run. 'stopTestRun' is particularly useful for
+test results that wish to produce summary output.
+
+``testtools.TestResult`` provides default ``startTestRun`` and ``stopTestRun``
+methods, and he default testtools runner will call these methods
+appropriately.
+
+The ``startTestRun`` method will reset any errors, failures and so forth on
+the result, making the result object look as if no tests have been run.
+
+
+Extensions to TestSuite
+=======================
+
+ConcurrentTestSuite
+-------------------
+
+A TestSuite for parallel testing. This is used in conjuction with a helper that
+runs a single suite in some parallel fashion (for instance, forking, handing
+off to a subprocess, to a compute cloud, or simple threads).
+ConcurrentTestSuite uses the helper to get a number of separate runnable
+objects with a run(result), runs them all in threads using the
+ThreadsafeForwardingResult to coalesce their activity.
+
+ConcurrentStreamTestSuite
+-------------------------
+
+A variant of ConcurrentTestSuite that uses the new StreamResult API instead of
+the TestResult API. ConcurrentStreamTestSuite coordinates running some number
+of test/suites concurrently, with one StreamToQueue per test/suite.
+
+Each test/suite gets given its own ExtendedToStreamDecorator +
+TimestampingStreamResult wrapped StreamToQueue instance, forwarding onto the
+StreamResult that ConcurrentStreamTestSuite.run was called with.
+
+ConcurrentStreamTestSuite is a thin shim and it is easy to implement your own
+specialised form if that is needed.
+
+FixtureSuite
+------------
+
+A test suite that sets up a fixture_ before running any tests, and then tears
+it down after all of the tests are run. The fixture is *not* made available to
+any of the tests due to there being no standard channel for suites to pass
+information to the tests they contain (and we don't have enough data on what
+such a channel would need to achieve to design a good one yet - or even decide
+if it is a good idea).
+
+sorted_tests
+------------
+
+Given the composite structure of TestSuite / TestCase, sorting tests is
+problematic - you can't tell what functionality is embedded into custom Suite
+implementations. In order to deliver consistent test orders when using test
+discovery (see http://bugs.python.org/issue16709), testtools flattens and
+sorts tests that have the standard TestSuite, and defines a new method
+sort_tests, which can be used by non-standard TestSuites to know when they
+should sort their tests. An example implementation can be seen at
+``FixtureSuite.sorted_tests``.
+
+If there are duplicate test ids in a suite, ValueError will be raised.
+
+filter_by_ids
+-------------
+
+Similarly to ``sorted_tests`` running a subset of tests is problematic - the
+standard run interface provides no way to limit what runs. Rather than
+confounding the two problems (selection and execution) we defined a method
+that filters the tests in a suite (or a case) by their unique test id.
+If you a writing custom wrapping suites, consider implementing filter_by_ids
+to support this (though most wrappers that subclass ``unittest.TestSuite`` will
+work just fine [see ``testtools.testsuite.filter_by_ids`` for details.]
+
+Extensions to TestRunner
+========================
+
+To facilitate custom listing of tests, ``testtools.run.TestProgram`` attempts
+to call ``list`` on the ``TestRunner``, falling back to a generic
+implementation if it is not present.
+
+.. _`testtools API docs`: http://mumak.net/testtools/apidocs/
+.. _unittest: http://docs.python.org/library/unittest.html
+.. _fixture: http://pypi.python.org/pypi/fixtures
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/for-test-authors.rst b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/for-test-authors.rst
new file mode 100644
index 00000000000..03849e65181
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/for-test-authors.rst
@@ -0,0 +1,1432 @@
+==========================
+testtools for test authors
+==========================
+
+If you are writing tests for a Python project and you (rather wisely) want to
+use testtools to do so, this is the manual for you.
+
+We assume that you already know Python and that you know something about
+automated testing already.
+
+If you are a test author of an unusually large or unusually unusual test
+suite, you might be interested in :doc:`for-framework-folk`.
+
+You might also be interested in the `testtools API docs`_.
+
+
+Introduction
+============
+
+testtools is a set of extensions to Python's standard unittest module.
+Writing tests with testtools is very much like writing tests with standard
+Python, or with Twisted's "trial_", or nose_, except a little bit easier and
+more enjoyable.
+
+Below, we'll try to give some examples of how to use testtools in its most
+basic way, as well as a sort of feature-by-feature breakdown of the cool bits
+that you could easily miss.
+
+
+The basics
+==========
+
+Here's what a basic testtools unit tests look like::
+
+ from testtools import TestCase
+ from myproject import silly
+
+ class TestSillySquare(TestCase):
+ """Tests for silly square function."""
+
+ def test_square(self):
+ # 'square' takes a number and multiplies it by itself.
+ result = silly.square(7)
+ self.assertEqual(result, 49)
+
+ def test_square_bad_input(self):
+ # 'square' raises a TypeError if it's given bad input, say a
+ # string.
+ self.assertRaises(TypeError, silly.square, "orange")
+
+
+Here you have a class that inherits from ``testtools.TestCase`` and bundles
+together a bunch of related tests. The tests themselves are methods on that
+class that begin with ``test_``.
+
+Running your tests
+------------------
+
+You can run these tests in many ways. testtools provides a very basic
+mechanism for doing so::
+
+ $ python -m testtools.run exampletest
+ Tests running...
+ Ran 2 tests in 0.000s
+
+ OK
+
+where 'exampletest' is a module that contains unit tests. By default,
+``testtools.run`` will *not* recursively search the module or package for unit
+tests. To do this, you will need to either have the discover_ module
+installed or have Python 2.7 or later, and then run::
+
+ $ python -m testtools.run discover packagecontainingtests
+
+For more information see the Python 2.7 unittest documentation, or::
+
+ python -m testtools.run --help
+
+As your testing needs grow and evolve, you will probably want to use a more
+sophisticated test runner. There are many of these for Python, and almost all
+of them will happily run testtools tests. In particular:
+
+* testrepository_
+* Trial_
+* nose_
+* unittest2_
+* `zope.testrunner`_ (aka zope.testing)
+
+From now on, we'll assume that you know how to run your tests.
+
+Running test with Distutils
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you are using Distutils_ to build your Python project, you can use the testtools
+Distutils_ command to integrate testtools into your Distutils_ workflow::
+
+ from distutils.core import setup
+ from testtools import TestCommand
+ setup(name='foo',
+ version='1.0',
+ py_modules=['foo'],
+ cmdclass={'test': TestCommand}
+ )
+
+You can then run::
+
+ $ python setup.py test -m exampletest
+ Tests running...
+ Ran 2 tests in 0.000s
+
+ OK
+
+For more information about the capabilities of the `TestCommand` command see::
+
+ $ python setup.py test --help
+
+You can use the `setup configuration`_ to specify the default behavior of the
+`TestCommand` command.
+
+Assertions
+==========
+
+The core of automated testing is making assertions about the way things are,
+and getting a nice, helpful, informative error message when things are not as
+they ought to be.
+
+All of the assertions that you can find in Python standard unittest_ can be
+found in testtools (remember, testtools extends unittest). testtools changes
+the behaviour of some of those assertions slightly and adds some new
+assertions that you will almost certainly find useful.
+
+
+Improved assertRaises
+---------------------
+
+``TestCase.assertRaises`` returns the caught exception. This is useful for
+asserting more things about the exception than just the type::
+
+ def test_square_bad_input(self):
+ # 'square' raises a TypeError if it's given bad input, say a
+ # string.
+ e = self.assertRaises(TypeError, silly.square, "orange")
+ self.assertEqual("orange", e.bad_value)
+ self.assertEqual("Cannot square 'orange', not a number.", str(e))
+
+Note that this is incompatible with the ``assertRaises`` in unittest2 and
+Python2.7.
+
+
+ExpectedException
+-----------------
+
+If you are using a version of Python that supports the ``with`` context
+manager syntax, you might prefer to use that syntax to ensure that code raises
+particular errors. ``ExpectedException`` does just that. For example::
+
+ def test_square_root_bad_input_2(self):
+ # 'square' raises a TypeError if it's given bad input.
+ with ExpectedException(TypeError, "Cannot square.*"):
+ silly.square('orange')
+
+The first argument to ``ExpectedException`` is the type of exception you
+expect to see raised. The second argument is optional, and can be either a
+regular expression or a matcher. If it is a regular expression, the ``str()``
+of the raised exception must match the regular expression. If it is a matcher,
+then the raised exception object must match it. The optional third argument
+``msg`` will cause the raised error to be annotated with that message.
+
+
+assertIn, assertNotIn
+---------------------
+
+These two assertions check whether a value is in a sequence and whether a
+value is not in a sequence. They are "assert" versions of the ``in`` and
+``not in`` operators. For example::
+
+ def test_assert_in_example(self):
+ self.assertIn('a', 'cat')
+ self.assertNotIn('o', 'cat')
+ self.assertIn(5, list_of_primes_under_ten)
+ self.assertNotIn(12, list_of_primes_under_ten)
+
+
+assertIs, assertIsNot
+---------------------
+
+These two assertions check whether values are identical to one another. This
+is sometimes useful when you want to test something more strict than mere
+equality. For example::
+
+ def test_assert_is_example(self):
+ foo = [None]
+ foo_alias = foo
+ bar = [None]
+ self.assertIs(foo, foo_alias)
+ self.assertIsNot(foo, bar)
+ self.assertEqual(foo, bar) # They are equal, but not identical
+
+
+assertIsInstance
+----------------
+
+As much as we love duck-typing and polymorphism, sometimes you need to check
+whether or not a value is of a given type. This method does that. For
+example::
+
+ def test_assert_is_instance_example(self):
+ now = datetime.now()
+ self.assertIsInstance(now, datetime)
+
+Note that there is no ``assertIsNotInstance`` in testtools currently.
+
+
+expectFailure
+-------------
+
+Sometimes it's useful to write tests that fail. For example, you might want
+to turn a bug report into a unit test, but you don't know how to fix the bug
+yet. Or perhaps you want to document a known, temporary deficiency in a
+dependency.
+
+testtools gives you the ``TestCase.expectFailure`` to help with this. You use
+it to say that you expect this assertion to fail. When the test runs and the
+assertion fails, testtools will report it as an "expected failure".
+
+Here's an example::
+
+ def test_expect_failure_example(self):
+ self.expectFailure(
+ "cats should be dogs", self.assertEqual, 'cats', 'dogs')
+
+As long as 'cats' is not equal to 'dogs', the test will be reported as an
+expected failure.
+
+If ever by some miracle 'cats' becomes 'dogs', then testtools will report an
+"unexpected success". Unlike standard unittest, testtools treats this as
+something that fails the test suite, like an error or a failure.
+
+
+Matchers
+========
+
+The built-in assertion methods are very useful, they are the bread and butter
+of writing tests. However, soon enough you will probably want to write your
+own assertions. Perhaps there are domain specific things that you want to
+check (e.g. assert that two widgets are aligned parallel to the flux grid), or
+perhaps you want to check something that could almost but not quite be found
+in some other standard library (e.g. assert that two paths point to the same
+file).
+
+When you are in such situations, you could either make a base class for your
+project that inherits from ``testtools.TestCase`` and make sure that all of
+your tests derive from that, *or* you could use the testtools ``Matcher``
+system.
+
+
+Using Matchers
+--------------
+
+Here's a really basic example using stock matchers found in testtools::
+
+ import testtools
+ from testtools.matchers import Equals
+
+ class TestSquare(TestCase):
+ def test_square(self):
+ result = square(7)
+ self.assertThat(result, Equals(49))
+
+The line ``self.assertThat(result, Equals(49))`` is equivalent to
+``self.assertEqual(result, 49)`` and means "assert that ``result`` equals 49".
+The difference is that ``assertThat`` is a more general method that takes some
+kind of observed value (in this case, ``result``) and any matcher object
+(here, ``Equals(49)``).
+
+The matcher object could be absolutely anything that implements the Matcher
+protocol. This means that you can make more complex matchers by combining
+existing ones::
+
+ def test_square_silly(self):
+ result = square(7)
+ self.assertThat(result, Not(Equals(50)))
+
+Which is roughly equivalent to::
+
+ def test_square_silly(self):
+ result = square(7)
+ self.assertNotEqual(result, 50)
+
+
+Stock matchers
+--------------
+
+testtools comes with many matchers built in. They can all be found in and
+imported from the ``testtools.matchers`` module.
+
+Equals
+~~~~~~
+
+Matches if two items are equal. For example::
+
+ def test_equals_example(self):
+ self.assertThat([42], Equals([42]))
+
+
+Is
+~~~
+
+Matches if two items are identical. For example::
+
+ def test_is_example(self):
+ foo = object()
+ self.assertThat(foo, Is(foo))
+
+
+IsInstance
+~~~~~~~~~~
+
+Adapts isinstance() to use as a matcher. For example::
+
+ def test_isinstance_example(self):
+ class MyClass:pass
+ self.assertThat(MyClass(), IsInstance(MyClass))
+ self.assertThat(MyClass(), IsInstance(MyClass, str))
+
+
+The raises helper
+~~~~~~~~~~~~~~~~~
+
+Matches if a callable raises a particular type of exception. For example::
+
+ def test_raises_example(self):
+ self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
+
+This is actually a convenience function that combines two other matchers:
+Raises_ and MatchesException_.
+
+
+DocTestMatches
+~~~~~~~~~~~~~~
+
+Matches a string as if it were the output of a doctest_ example. Very useful
+for making assertions about large chunks of text. For example::
+
+ import doctest
+
+ def test_doctest_example(self):
+ output = "Colorless green ideas"
+ self.assertThat(
+ output,
+ DocTestMatches("Colorless ... ideas", doctest.ELLIPSIS))
+
+We highly recommend using the following flags::
+
+ doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF
+
+
+GreaterThan
+~~~~~~~~~~~
+
+Matches if the given thing is greater than the thing in the matcher. For
+example::
+
+ def test_greater_than_example(self):
+ self.assertThat(3, GreaterThan(2))
+
+
+LessThan
+~~~~~~~~
+
+Matches if the given thing is less than the thing in the matcher. For
+example::
+
+ def test_less_than_example(self):
+ self.assertThat(2, LessThan(3))
+
+
+StartsWith, EndsWith
+~~~~~~~~~~~~~~~~~~~~
+
+These matchers check to see if a string starts with or ends with a particular
+substring. For example::
+
+ def test_starts_and_ends_with_example(self):
+ self.assertThat('underground', StartsWith('und'))
+ self.assertThat('underground', EndsWith('und'))
+
+
+Contains
+~~~~~~~~
+
+This matcher checks to see if the given thing contains the thing in the
+matcher. For example::
+
+ def test_contains_example(self):
+ self.assertThat('abc', Contains('b'))
+
+
+MatchesException
+~~~~~~~~~~~~~~~~
+
+Matches an exc_info tuple if the exception is of the correct type. For
+example::
+
+ def test_matches_exception_example(self):
+ try:
+ raise RuntimeError('foo')
+ except RuntimeError:
+ exc_info = sys.exc_info()
+ self.assertThat(exc_info, MatchesException(RuntimeError))
+ self.assertThat(exc_info, MatchesException(RuntimeError('bar'))
+
+Most of the time, you will want to uses `The raises helper`_ instead.
+
+
+NotEquals
+~~~~~~~~~
+
+Matches if something is not equal to something else. Note that this is subtly
+different to ``Not(Equals(x))``. ``NotEquals(x)`` will match if ``y != x``,
+``Not(Equals(x))`` will match if ``not y == x``.
+
+You only need to worry about this distinction if you are testing code that
+relies on badly written overloaded equality operators.
+
+
+KeysEqual
+~~~~~~~~~
+
+Matches if the keys of one dict are equal to the keys of another dict. For
+example::
+
+ def test_keys_equal(self):
+ x = {'a': 1, 'b': 2}
+ y = {'a': 2, 'b': 3}
+ self.assertThat(x, KeysEqual(y))
+
+
+MatchesRegex
+~~~~~~~~~~~~
+
+Matches a string against a regular expression, which is a wonderful thing to
+be able to do, if you think about it::
+
+ def test_matches_regex_example(self):
+ self.assertThat('foo', MatchesRegex('fo+'))
+
+
+HasLength
+~~~~~~~~~
+
+Check the length of a collection. The following assertion will fail::
+
+ self.assertThat([1, 2, 3], HasLength(2))
+
+But this one won't::
+
+ self.assertThat([1, 2, 3], HasLength(3))
+
+
+File- and path-related matchers
+-------------------------------
+
+testtools also has a number of matchers to help with asserting things about
+the state of the filesystem.
+
+PathExists
+~~~~~~~~~~
+
+Matches if a path exists::
+
+ self.assertThat('/', PathExists())
+
+
+DirExists
+~~~~~~~~~
+
+Matches if a path exists and it refers to a directory::
+
+ # This will pass on most Linux systems.
+ self.assertThat('/home/', DirExists())
+ # This will not
+ self.assertThat('/home/jml/some-file.txt', DirExists())
+
+
+FileExists
+~~~~~~~~~~
+
+Matches if a path exists and it refers to a file (as opposed to a directory)::
+
+ # This will pass on most Linux systems.
+ self.assertThat('/bin/true', FileExists())
+ # This will not.
+ self.assertThat('/home/', FileExists())
+
+
+DirContains
+~~~~~~~~~~~
+
+Matches if the given directory contains the specified files and directories.
+Say we have a directory ``foo`` that has the files ``a``, ``b`` and ``c``,
+then::
+
+ self.assertThat('foo', DirContains(['a', 'b', 'c']))
+
+will match, but::
+
+ self.assertThat('foo', DirContains(['a', 'b']))
+
+will not.
+
+The matcher sorts both the input and the list of names we get back from the
+filesystem.
+
+You can use this in a more advanced way, and match the sorted directory
+listing against an arbitrary matcher::
+
+ self.assertThat('foo', DirContains(matcher=Contains('a')))
+
+
+FileContains
+~~~~~~~~~~~~
+
+Matches if the given file has the specified contents. Say there's a file
+called ``greetings.txt`` with the contents, ``Hello World!``::
+
+ self.assertThat('greetings.txt', FileContains("Hello World!"))
+
+will match.
+
+You can also use this in a more advanced way, and match the contents of the
+file against an arbitrary matcher::
+
+ self.assertThat('greetings.txt', FileContains(matcher=Contains('!')))
+
+
+HasPermissions
+~~~~~~~~~~~~~~
+
+Used for asserting that a file or directory has certain permissions. Uses
+octal-mode permissions for both input and matching. For example::
+
+ self.assertThat('/tmp', HasPermissions('1777'))
+ self.assertThat('id_rsa', HasPermissions('0600'))
+
+This is probably more useful on UNIX systems than on Windows systems.
+
+
+SamePath
+~~~~~~~~
+
+Matches if two paths actually refer to the same thing. The paths don't have
+to exist, but if they do exist, ``SamePath`` will resolve any symlinks.::
+
+ self.assertThat('somefile', SamePath('childdir/../somefile'))
+
+
+TarballContains
+~~~~~~~~~~~~~~~
+
+Matches the contents of a tarball. In many ways, much like ``DirContains``,
+but instead of matching on ``os.listdir`` matches on ``TarFile.getnames``.
+
+
+Combining matchers
+------------------
+
+One great thing about matchers is that you can readily combine existing
+matchers to get variations on their behaviour or to quickly build more complex
+assertions.
+
+Below are a few of the combining matchers that come with testtools.
+
+
+Not
+~~~
+
+Negates another matcher. For example::
+
+ def test_not_example(self):
+ self.assertThat([42], Not(Equals("potato")))
+ self.assertThat([42], Not(Is([42])))
+
+If you find yourself using ``Not`` frequently, you may wish to create a custom
+matcher for it. For example::
+
+ IsNot = lambda x: Not(Is(x))
+
+ def test_not_example_2(self):
+ self.assertThat([42], IsNot([42]))
+
+
+Annotate
+~~~~~~~~
+
+Used to add custom notes to a matcher. For example::
+
+ def test_annotate_example(self):
+ result = 43
+ self.assertThat(
+ result, Annotate("Not the answer to the Question!", Equals(42))
+
+Since the annotation is only ever displayed when there is a mismatch
+(e.g. when ``result`` does not equal 42), it's a good idea to phrase the note
+negatively, so that it describes what a mismatch actually means.
+
+As with Not_, you may wish to create a custom matcher that describes a
+common operation. For example::
+
+ PoliticallyEquals = lambda x: Annotate("Death to the aristos!", Equals(x))
+
+ def test_annotate_example_2(self):
+ self.assertThat("orange", PoliticallyEquals("yellow"))
+
+You can have assertThat perform the annotation for you as a convenience::
+
+ def test_annotate_example_3(self):
+ self.assertThat("orange", Equals("yellow"), "Death to the aristos!")
+
+
+AfterPreprocessing
+~~~~~~~~~~~~~~~~~~
+
+Used to make a matcher that applies a function to the matched object before
+matching. This can be used to aid in creating trivial matchers as functions, for
+example::
+
+ def test_after_preprocessing_example(self):
+ def PathHasFileContent(content):
+ def _read(path):
+ return open(path).read()
+ return AfterPreprocessing(_read, Equals(content))
+ self.assertThat('/tmp/foo.txt', PathHasFileContent("Hello world!"))
+
+
+MatchesAll
+~~~~~~~~~~
+
+Combines many matchers to make a new matcher. The new matcher will only match
+things that match every single one of the component matchers.
+
+It's much easier to understand in Python than in English::
+
+ def test_matches_all_example(self):
+ has_und_at_both_ends = MatchesAll(StartsWith("und"), EndsWith("und"))
+ # This will succeed.
+ self.assertThat("underground", has_und_at_both_ends)
+ # This will fail.
+ self.assertThat("found", has_und_at_both_ends)
+ # So will this.
+ self.assertThat("undead", has_und_at_both_ends)
+
+At this point some people ask themselves, "why bother doing this at all? why
+not just have two separate assertions?". It's a good question.
+
+The first reason is that when a ``MatchesAll`` gets a mismatch, the error will
+include information about all of the bits that mismatched. When you have two
+separate assertions, as below::
+
+ def test_two_separate_assertions(self):
+ self.assertThat("foo", StartsWith("und"))
+ self.assertThat("foo", EndsWith("und"))
+
+Then you get absolutely no information from the second assertion if the first
+assertion fails. Tests are largely there to help you debug code, so having
+more information in error messages is a big help.
+
+The second reason is that it is sometimes useful to give a name to a set of
+matchers. ``has_und_at_both_ends`` is a bit contrived, of course, but it is
+clear. The ``FileExists`` and ``DirExists`` matchers included in testtools
+are perhaps better real examples.
+
+If you want only the first mismatch to be reported, pass ``first_only=True``
+as a keyword parameter to ``MatchesAll``.
+
+
+MatchesAny
+~~~~~~~~~~
+
+Like MatchesAll_, ``MatchesAny`` combines many matchers to make a new
+matcher. The difference is that the new matchers will match a thing if it
+matches *any* of the component matchers.
+
+For example::
+
+ def test_matches_any_example(self):
+ self.assertThat(42, MatchesAny(Equals(5), Not(Equals(6))))
+
+
+AllMatch
+~~~~~~~~
+
+Matches many values against a single matcher. Can be used to make sure that
+many things all meet the same condition::
+
+ def test_all_match_example(self):
+ self.assertThat([2, 3, 5, 7], AllMatch(LessThan(10)))
+
+If the match fails, then all of the values that fail to match will be included
+in the error message.
+
+In some ways, this is the converse of MatchesAll_.
+
+
+MatchesListwise
+~~~~~~~~~~~~~~~
+
+Where ``MatchesAny`` and ``MatchesAll`` combine many matchers to match a
+single value, ``MatchesListwise`` combines many matches to match many values.
+
+For example::
+
+ def test_matches_listwise_example(self):
+ self.assertThat(
+ [1, 2, 3], MatchesListwise(map(Equals, [1, 2, 3])))
+
+This is useful for writing custom, domain-specific matchers.
+
+If you want only the first mismatch to be reported, pass ``first_only=True``
+to ``MatchesListwise``.
+
+
+MatchesSetwise
+~~~~~~~~~~~~~~
+
+Combines many matchers to match many values, without regard to their order.
+
+Here's an example::
+
+ def test_matches_setwise_example(self):
+ self.assertThat(
+ [1, 2, 3], MatchesSetwise(Equals(2), Equals(3), Equals(1)))
+
+Much like ``MatchesListwise``, best used for writing custom, domain-specific
+matchers.
+
+
+MatchesStructure
+~~~~~~~~~~~~~~~~
+
+Creates a matcher that matches certain attributes of an object against a
+pre-defined set of matchers.
+
+It's much easier to understand in Python than in English::
+
+ def test_matches_structure_example(self):
+ foo = Foo()
+ foo.a = 1
+ foo.b = 2
+ matcher = MatchesStructure(a=Equals(1), b=Equals(2))
+ self.assertThat(foo, matcher)
+
+Since all of the matchers used were ``Equals``, we could also write this using
+the ``byEquality`` helper::
+
+ def test_matches_structure_example(self):
+ foo = Foo()
+ foo.a = 1
+ foo.b = 2
+ matcher = MatchesStructure.byEquality(a=1, b=2)
+ self.assertThat(foo, matcher)
+
+``MatchesStructure.fromExample`` takes an object and a list of attributes and
+creates a ``MatchesStructure`` matcher where each attribute of the matched
+object must equal each attribute of the example object. For example::
+
+ matcher = MatchesStructure.fromExample(foo, 'a', 'b')
+
+is exactly equivalent to ``matcher`` in the previous example.
+
+
+MatchesPredicate
+~~~~~~~~~~~~~~~~
+
+Sometimes, all you want to do is create a matcher that matches if a given
+function returns True, and mismatches if it returns False.
+
+For example, you might have an ``is_prime`` function and want to make a
+matcher based on it::
+
+ def test_prime_numbers(self):
+ IsPrime = MatchesPredicate(is_prime, '%s is not prime.')
+ self.assertThat(7, IsPrime)
+ self.assertThat(1983, IsPrime)
+ # This will fail.
+ self.assertThat(42, IsPrime)
+
+Which will produce the error message::
+
+ Traceback (most recent call last):
+ File "...", line ..., in test_prime_numbers
+ self.assertThat(42, IsPrime)
+ MismatchError: 42 is not prime.
+
+
+MatchesPredicateWithParams
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sometimes you can't use a trivial predicate and instead need to pass in some
+parameters each time. In that case, MatchesPredicateWithParams is your go-to
+tool for creating ad hoc matchers. MatchesPredicateWithParams takes a predicate
+function and message and returns a factory to produce matchers from that. The
+predicate needs to return a boolean (or any truthy object), and accept the
+object to match + whatever was passed into the factory.
+
+For example, you might have an ``divisible`` function and want to make a
+matcher based on it::
+
+ def test_divisible_numbers(self):
+ IsDivisibleBy = MatchesPredicateWithParams(
+ divisible, '{0} is not divisible by {1}')
+ self.assertThat(7, IsDivisibleBy(1))
+ self.assertThat(7, IsDivisibleBy(7))
+ self.assertThat(7, IsDivisibleBy(2)))
+ # This will fail.
+
+Which will produce the error message::
+
+ Traceback (most recent call last):
+ File "...", line ..., in test_divisible
+ self.assertThat(7, IsDivisibleBy(2))
+ MismatchError: 7 is not divisible by 2.
+
+
+Raises
+~~~~~~
+
+Takes whatever the callable raises as an exc_info tuple and matches it against
+whatever matcher it was given. For example, if you want to assert that a
+callable raises an exception of a given type::
+
+ def test_raises_example(self):
+ self.assertThat(
+ lambda: 1/0, Raises(MatchesException(ZeroDivisionError)))
+
+Although note that this could also be written as::
+
+ def test_raises_example_convenient(self):
+ self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
+
+See also MatchesException_ and `the raises helper`_
+
+
+Writing your own matchers
+-------------------------
+
+Combining matchers is fun and can get you a very long way indeed, but
+sometimes you will have to write your own. Here's how.
+
+You need to make two closely-linked objects: a ``Matcher`` and a
+``Mismatch``. The ``Matcher`` knows how to actually make the comparison, and
+the ``Mismatch`` knows how to describe a failure to match.
+
+Here's an example matcher::
+
+ class IsDivisibleBy(object):
+ """Match if a number is divisible by another number."""
+ def __init__(self, divider):
+ self.divider = divider
+ def __str__(self):
+ return 'IsDivisibleBy(%s)' % (self.divider,)
+ def match(self, actual):
+ remainder = actual % self.divider
+ if remainder != 0:
+ return IsDivisibleByMismatch(actual, self.divider, remainder)
+ else:
+ return None
+
+The matcher has a constructor that takes parameters that describe what you
+actually *expect*, in this case a number that other numbers ought to be
+divisible by. It has a ``__str__`` method, the result of which is displayed
+on failure by ``assertThat`` and a ``match`` method that does the actual
+matching.
+
+``match`` takes something to match against, here ``actual``, and decides
+whether or not it matches. If it does match, then ``match`` must return
+``None``. If it does *not* match, then ``match`` must return a ``Mismatch``
+object. ``assertThat`` will call ``match`` and then fail the test if it
+returns a non-None value. For example::
+
+ def test_is_divisible_by_example(self):
+ # This succeeds, since IsDivisibleBy(5).match(10) returns None.
+ self.assertThat(10, IsDivisibleBy(5))
+ # This fails, since IsDivisibleBy(7).match(10) returns a mismatch.
+ self.assertThat(10, IsDivisibleBy(7))
+
+The mismatch is responsible for what sort of error message the failing test
+generates. Here's an example mismatch::
+
+ class IsDivisibleByMismatch(object):
+ def __init__(self, number, divider, remainder):
+ self.number = number
+ self.divider = divider
+ self.remainder = remainder
+
+ def describe(self):
+ return "%r is not divisible by %r, %r remains" % (
+ self.number, self.divider, self.remainder)
+
+ def get_details(self):
+ return {}
+
+The mismatch takes information about the mismatch, and provides a ``describe``
+method that assembles all of that into a nice error message for end users.
+You can use the ``get_details`` method to provide extra, arbitrary data with
+the mismatch (e.g. the contents of a log file). Most of the time it's fine to
+just return an empty dict. You can read more about Details_ elsewhere in this
+document.
+
+Sometimes you don't need to create a custom mismatch class. In particular, if
+you don't care *when* the description is calculated, then you can just do that
+in the Matcher itself like this::
+
+ def match(self, actual):
+ remainder = actual % self.divider
+ if remainder != 0:
+ return Mismatch(
+ "%r is not divisible by %r, %r remains" % (
+ actual, self.divider, remainder))
+ else:
+ return None
+
+When writing a ``describe`` method or constructing a ``Mismatch`` object the
+code should ensure it only emits printable unicode. As this output must be
+combined with other text and forwarded for presentation, letting through
+non-ascii bytes of ambiguous encoding or control characters could throw an
+exception or mangle the display. In most cases simply avoiding the ``%s``
+format specifier and using ``%r`` instead will be enough. For examples of
+more complex formatting see the ``testtools.matchers`` implementatons.
+
+
+Details
+=======
+
+As we may have mentioned once or twice already, one of the great benefits of
+automated tests is that they help find, isolate and debug errors in your
+system.
+
+Frequently however, the information provided by a mere assertion failure is
+not enough. It's often useful to have other information: the contents of log
+files; what queries were run; benchmark timing information; what state certain
+subsystem components are in and so forth.
+
+testtools calls all of these things "details" and provides a single, powerful
+mechanism for including this information in your test run.
+
+Here's an example of how to add them::
+
+ from testtools import TestCase
+ from testtools.content import text_content
+
+ class TestSomething(TestCase):
+
+ def test_thingy(self):
+ self.addDetail('arbitrary-color-name', text_content("blue"))
+ 1 / 0 # Gratuitous error!
+
+A detail an arbitrary piece of content given a name that's unique within the
+test. Here the name is ``arbitrary-color-name`` and the content is
+``text_content("blue")``. The name can be any text string, and the content
+can be any ``testtools.content.Content`` object.
+
+When the test runs, testtools will show you something like this::
+
+ ======================================================================
+ ERROR: exampletest.TestSomething.test_thingy
+ ----------------------------------------------------------------------
+ arbitrary-color-name: {{{blue}}}
+
+ Traceback (most recent call last):
+ File "exampletest.py", line 8, in test_thingy
+ 1 / 0 # Gratuitous error!
+ ZeroDivisionError: integer division or modulo by zero
+ ------------
+ Ran 1 test in 0.030s
+
+As you can see, the detail is included as an attachment, here saying
+that our arbitrary-color-name is "blue".
+
+
+Content
+-------
+
+For the actual content of details, testtools uses its own MIME-based Content
+object. This allows you to attach any information that you could possibly
+conceive of to a test, and allows testtools to use or serialize that
+information.
+
+The basic ``testtools.content.Content`` object is constructed from a
+``testtools.content.ContentType`` and a nullary callable that must return an
+iterator of chunks of bytes that the content is made from.
+
+So, to make a Content object that is just a simple string of text, you can
+do::
+
+ from testtools.content import Content
+ from testtools.content_type import ContentType
+
+ text = Content(ContentType('text', 'plain'), lambda: ["some text"])
+
+Because adding small bits of text content is very common, there's also a
+convenience method::
+
+ text = text_content("some text")
+
+To make content out of an image stored on disk, you could do something like::
+
+ image = Content(ContentType('image', 'png'), lambda: open('foo.png').read())
+
+Or you could use the convenience function::
+
+ image = content_from_file('foo.png', ContentType('image', 'png'))
+
+The ``lambda`` helps make sure that the file is opened and the actual bytes
+read only when they are needed – by default, when the test is finished. This
+means that tests can construct and add Content objects freely without worrying
+too much about how they affect run time.
+
+
+A realistic example
+-------------------
+
+A very common use of details is to add a log file to failing tests. Say your
+project has a server represented by a class ``SomeServer`` that you can start
+up and shut down in tests, but runs in another process. You want to test
+interaction with that server, and whenever the interaction fails, you want to
+see the client-side error *and* the logs from the server-side. Here's how you
+might do it::
+
+ from testtools import TestCase
+ from testtools.content import attach_file, Content
+ from testtools.content_type import UTF8_TEXT
+
+ from myproject import SomeServer
+
+ class SomeTestCase(TestCase):
+
+ def setUp(self):
+ super(SomeTestCase, self).setUp()
+ self.server = SomeServer()
+ self.server.start_up()
+ self.addCleanup(self.server.shut_down)
+ self.addCleanup(attach_file, self.server.logfile, self)
+
+ def attach_log_file(self):
+ self.addDetail(
+ 'log-file',
+ Content(UTF8_TEXT,
+ lambda: open(self.server.logfile, 'r').readlines()))
+
+ def test_a_thing(self):
+ self.assertEqual("cool", self.server.temperature)
+
+This test will attach the log file of ``SomeServer`` to each test that is
+run. testtools will only display the log file for failing tests, so it's not
+such a big deal.
+
+If the act of adding at detail is expensive, you might want to use
+addOnException_ so that you only do it when a test actually raises an
+exception.
+
+
+Controlling test execution
+==========================
+
+.. _addCleanup:
+
+addCleanup
+----------
+
+``TestCase.addCleanup`` is a robust way to arrange for a clean up function to
+be called before ``tearDown``. This is a powerful and simple alternative to
+putting clean up logic in a try/finally block or ``tearDown`` method. For
+example::
+
+ def test_foo(self):
+ foo.lock()
+ self.addCleanup(foo.unlock)
+ ...
+
+This is particularly useful if you have some sort of factory in your test::
+
+ def make_locked_foo(self):
+ foo = Foo()
+ foo.lock()
+ self.addCleanup(foo.unlock)
+ return foo
+
+ def test_frotz_a_foo(self):
+ foo = self.make_locked_foo()
+ foo.frotz()
+ self.assertEqual(foo.frotz_count, 1)
+
+Any extra arguments or keyword arguments passed to ``addCleanup`` are passed
+to the callable at cleanup time.
+
+Cleanups can also report multiple errors, if appropriate by wrapping them in
+a ``testtools.MultipleExceptions`` object::
+
+ raise MultipleExceptions(exc_info1, exc_info2)
+
+
+Fixtures
+--------
+
+Tests often depend on a system being set up in a certain way, or having
+certain resources available to them. Perhaps a test needs a connection to the
+database or access to a running external server.
+
+One common way of doing this is to do::
+
+ class SomeTest(TestCase):
+ def setUp(self):
+ super(SomeTest, self).setUp()
+ self.server = Server()
+ self.server.setUp()
+ self.addCleanup(self.server.tearDown)
+
+testtools provides a more convenient, declarative way to do the same thing::
+
+ class SomeTest(TestCase):
+ def setUp(self):
+ super(SomeTest, self).setUp()
+ self.server = self.useFixture(Server())
+
+``useFixture(fixture)`` calls ``setUp`` on the fixture, schedules a clean up
+to clean it up, and schedules a clean up to attach all details_ held by the
+fixture to the test case. The fixture object must meet the
+``fixtures.Fixture`` protocol (version 0.3.4 or newer, see fixtures_).
+
+If you have anything beyond the most simple test set up, we recommend that
+you put this set up into a ``Fixture`` class. Once there, the fixture can be
+easily re-used by other tests and can be combined with other fixtures to make
+more complex resources.
+
+
+Skipping tests
+--------------
+
+Many reasons exist to skip a test: a dependency might be missing; a test might
+be too expensive and thus should not berun while on battery power; or perhaps
+the test is testing an incomplete feature.
+
+``TestCase.skipTest`` is a simple way to have a test stop running and be
+reported as a skipped test, rather than a success, error or failure. For
+example::
+
+ def test_make_symlink(self):
+ symlink = getattr(os, 'symlink', None)
+ if symlink is None:
+ self.skipTest("No symlink support")
+ symlink(whatever, something_else)
+
+Using ``skipTest`` means that you can make decisions about what tests to run
+as late as possible, and close to the actual tests. Without it, you might be
+forced to use convoluted logic during test loading, which is a bit of a mess.
+
+
+Legacy skip support
+~~~~~~~~~~~~~~~~~~~
+
+If you are using this feature when running your test suite with a legacy
+``TestResult`` object that is missing the ``addSkip`` method, then the
+``addError`` method will be invoked instead. If you are using a test result
+from testtools, you do not have to worry about this.
+
+In older versions of testtools, ``skipTest`` was known as ``skip``. Since
+Python 2.7 added ``skipTest`` support, the ``skip`` name is now deprecated.
+No warning is emitted yet – some time in the future we may do so.
+
+
+addOnException
+--------------
+
+Sometimes, you might wish to do something only when a test fails. Perhaps you
+need to run expensive diagnostic routines or some such.
+``TestCase.addOnException`` allows you to easily do just this. For example::
+
+ class SomeTest(TestCase):
+ def setUp(self):
+ super(SomeTest, self).setUp()
+ self.server = self.useFixture(SomeServer())
+ self.addOnException(self.attach_server_diagnostics)
+
+ def attach_server_diagnostics(self, exc_info):
+ self.server.prep_for_diagnostics() # Expensive!
+ self.addDetail('server-diagnostics', self.server.get_diagnostics)
+
+ def test_a_thing(self):
+ self.assertEqual('cheese', 'chalk')
+
+In this example, ``attach_server_diagnostics`` will only be called when a test
+fails. It is given the exc_info tuple of the error raised by the test, just
+in case it is needed.
+
+
+Twisted support
+---------------
+
+testtools provides *highly experimental* support for running Twisted tests –
+tests that return a Deferred_ and rely on the Twisted reactor. You should not
+use this feature right now. We reserve the right to change the API and
+behaviour without telling you first.
+
+However, if you are going to, here's how you do it::
+
+ from testtools import TestCase
+ from testtools.deferredruntest import AsynchronousDeferredRunTest
+
+ class MyTwistedTests(TestCase):
+
+ run_tests_with = AsynchronousDeferredRunTest
+
+ def test_foo(self):
+ # ...
+ return d
+
+In particular, note that you do *not* have to use a special base ``TestCase``
+in order to run Twisted tests.
+
+You can also run individual tests within a test case class using the Twisted
+test runner::
+
+ class MyTestsSomeOfWhichAreTwisted(TestCase):
+
+ def test_normal(self):
+ pass
+
+ @run_test_with(AsynchronousDeferredRunTest)
+ def test_twisted(self):
+ # ...
+ return d
+
+Here are some tips for converting your Trial tests into testtools tests.
+
+* Use the ``AsynchronousDeferredRunTest`` runner
+* Make sure to upcall to ``setUp`` and ``tearDown``
+* Don't use ``setUpClass`` or ``tearDownClass``
+* Don't expect setting .todo, .timeout or .skip attributes to do anything
+* ``flushLoggedErrors`` is ``testtools.deferredruntest.flush_logged_errors``
+* ``assertFailure`` is ``testtools.deferredruntest.assert_fails_with``
+* Trial spins the reactor a couple of times before cleaning it up,
+ ``AsynchronousDeferredRunTest`` does not. If you rely on this behavior, use
+ ``AsynchronousDeferredRunTestForBrokenTwisted``.
+
+force_failure
+-------------
+
+Setting the ``testtools.TestCase.force_failure`` instance variable to ``True``
+will cause the test to be marked as a failure, but won't stop the test code
+from running (see :ref:`force_failure`).
+
+
+Test helpers
+============
+
+testtools comes with a few little things that make it a little bit easier to
+write tests.
+
+
+TestCase.patch
+--------------
+
+``patch`` is a convenient way to monkey-patch a Python object for the duration
+of your test. It's especially useful for testing legacy code. e.g.::
+
+ def test_foo(self):
+ my_stream = StringIO()
+ self.patch(sys, 'stderr', my_stream)
+ run_some_code_that_prints_to_stderr()
+ self.assertEqual('', my_stream.getvalue())
+
+The call to ``patch`` above masks ``sys.stderr`` with ``my_stream`` so that
+anything printed to stderr will be captured in a StringIO variable that can be
+actually tested. Once the test is done, the real ``sys.stderr`` is restored to
+its rightful place.
+
+
+Creation methods
+----------------
+
+Often when writing unit tests, you want to create an object that is a
+completely normal instance of its type. You don't want there to be anything
+special about its properties, because you are testing generic behaviour rather
+than specific conditions.
+
+A lot of the time, test authors do this by making up silly strings and numbers
+and passing them to constructors (e.g. 42, 'foo', "bar" etc), and that's
+fine. However, sometimes it's useful to be able to create arbitrary objects
+at will, without having to make up silly sample data.
+
+To help with this, ``testtools.TestCase`` implements creation methods called
+``getUniqueString`` and ``getUniqueInteger``. They return strings and
+integers that are unique within the context of the test that can be used to
+assemble more complex objects. Here's a basic example where
+``getUniqueString`` is used instead of saying "foo" or "bar" or whatever::
+
+ class SomeTest(TestCase):
+
+ def test_full_name(self):
+ first_name = self.getUniqueString()
+ last_name = self.getUniqueString()
+ p = Person(first_name, last_name)
+ self.assertEqual(p.full_name, "%s %s" % (first_name, last_name))
+
+
+And here's how it could be used to make a complicated test::
+
+ class TestCoupleLogic(TestCase):
+
+ def make_arbitrary_person(self):
+ return Person(self.getUniqueString(), self.getUniqueString())
+
+ def test_get_invitation(self):
+ a = self.make_arbitrary_person()
+ b = self.make_arbitrary_person()
+ couple = Couple(a, b)
+ event_name = self.getUniqueString()
+ invitation = couple.get_invitation(event_name)
+ self.assertEqual(
+ invitation,
+ "We invite %s and %s to %s" % (
+ a.full_name, b.full_name, event_name))
+
+Essentially, creation methods like these are a way of reducing the number of
+assumptions in your tests and communicating to test readers that the exact
+details of certain variables don't actually matter.
+
+See pages 419-423 of `xUnit Test Patterns`_ by Gerard Meszaros for a detailed
+discussion of creation methods.
+
+Test attributes
+---------------
+
+Inspired by the ``nosetests`` ``attr`` plugin, testtools provides support for
+marking up test methods with attributes, which are then exposed in the test
+id and can be used when filtering tests by id. (e.g. via ``--load-list``)::
+
+ from testtools.testcase import attr, WithAttributes
+
+ class AnnotatedTests(WithAttributes, TestCase):
+
+ @attr('simple')
+ def test_one(self):
+ pass
+
+ @attr('more', 'than', 'one)
+ def test_two(self):
+ pass
+
+ @attr('or')
+ @attr('stacked')
+ def test_three(self):
+ pass
+
+General helpers
+===============
+
+Conditional imports
+-------------------
+
+Lots of the time we would like to conditionally import modules. testtools
+uses the small library extras to do this. This used to be part of testtools.
+
+Instead of::
+
+ try:
+ from twisted.internet import defer
+ except ImportError:
+ defer = None
+
+You can do::
+
+ defer = try_import('twisted.internet.defer')
+
+
+Instead of::
+
+ try:
+ from StringIO import StringIO
+ except ImportError:
+ from io import StringIO
+
+You can do::
+
+ StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
+
+
+Safe attribute testing
+----------------------
+
+``hasattr`` is broken_ on many versions of Python. The helper ``safe_hasattr``
+can be used to safely test whether an object has a particular attribute. Like
+``try_import`` this used to be in testtools but is now in extras.
+
+
+Nullary callables
+-----------------
+
+Sometimes you want to be able to pass around a function with the arguments
+already specified. The normal way of doing this in Python is::
+
+ nullary = lambda: f(*args, **kwargs)
+ nullary()
+
+Which is mostly good enough, but loses a bit of debugging information. If you
+take the ``repr()`` of ``nullary``, you're only told that it's a lambda, and
+you get none of the juicy meaning that you'd get from the ``repr()`` of ``f``.
+
+The solution is to use ``Nullary`` instead::
+
+ nullary = Nullary(f, *args, **kwargs)
+ nullary()
+
+Here, ``repr(nullary)`` will be the same as ``repr(f)``.
+
+
+.. _testrepository: https://launchpad.net/testrepository
+.. _Trial: http://twistedmatrix.com/documents/current/core/howto/testing.html
+.. _nose: http://somethingaboutorange.com/mrl/projects/nose/
+.. _unittest2: http://pypi.python.org/pypi/unittest2
+.. _zope.testrunner: http://pypi.python.org/pypi/zope.testrunner/
+.. _xUnit test patterns: http://xunitpatterns.com/
+.. _fixtures: http://pypi.python.org/pypi/fixtures
+.. _unittest: http://docs.python.org/library/unittest.html
+.. _doctest: http://docs.python.org/library/doctest.html
+.. _Deferred: http://twistedmatrix.com/documents/current/core/howto/defer.html
+.. _discover: http://pypi.python.org/pypi/discover
+.. _`testtools API docs`: http://mumak.net/testtools/apidocs/
+.. _Distutils: http://docs.python.org/library/distutils.html
+.. _`setup configuration`: http://docs.python.org/distutils/configfile.html
+.. _broken: http://chipaca.com/post/3210673069/hasattr-17-less-harmful
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/hacking.rst b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/hacking.rst
new file mode 100644
index 00000000000..6434e36c535
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/hacking.rst
@@ -0,0 +1,163 @@
+=========================
+Contributing to testtools
+=========================
+
+Coding style
+------------
+
+In general, follow `PEP 8`_ except where consistency with the standard
+library's unittest_ module would suggest otherwise.
+
+testtools currently supports Python 2.6 and later, including Python 3.
+
+Copyright assignment
+--------------------
+
+Part of testtools raison d'etre is to provide Python with improvements to the
+testing code it ships. For that reason we require all contributions (that are
+non-trivial) to meet one of the following rules:
+
+* be inapplicable for inclusion in Python.
+* be able to be included in Python without further contact with the contributor.
+* be copyright assigned to Jonathan M. Lange.
+
+Please pick one of these and specify it when contributing code to testtools.
+
+
+Licensing
+---------
+
+All code that is not copyright assigned to Jonathan M. Lange (see Copyright
+Assignment above) needs to be licensed under the `MIT license`_ that testtools
+uses, so that testtools can ship it.
+
+
+Testing
+-------
+
+Please write tests for every feature. This project ought to be a model
+example of well-tested Python code!
+
+Take particular care to make sure the *intent* of each test is clear.
+
+You can run tests with ``make check``.
+
+By default, testtools hides many levels of its own stack when running tests.
+This is for the convenience of users, who do not care about how, say, assert
+methods are implemented. However, when writing tests for testtools itself, it
+is often useful to see all levels of the stack. To do this, add
+``run_tests_with = FullStackRunTest`` to the top of a test's class definition.
+
+
+Discussion
+----------
+
+When submitting a patch, it will help the review process a lot if there's a
+clear explanation of what the change does and why you think the change is a
+good idea. For crasher bugs, this is generally a no-brainer, but for UI bugs
+& API tweaks, the reason something is an improvement might not be obvious, so
+it's worth spelling out.
+
+If you are thinking of implementing a new feature, you might want to have that
+discussion on the [mailing list](testtools-dev@lists.launchpad.net) before the
+patch goes up for review. This is not at all mandatory, but getting feedback
+early can help avoid dead ends.
+
+
+Documentation
+-------------
+
+Documents are written using the Sphinx_ variant of reStructuredText_. All
+public methods, functions, classes and modules must have API documentation.
+When changing code, be sure to check the API documentation to see if it could
+be improved. Before submitting changes to trunk, look over them and see if
+the manuals ought to be updated.
+
+
+Source layout
+-------------
+
+The top-level directory contains the ``testtools/`` package directory, and
+miscellaneous files like ``README.rst`` and ``setup.py``.
+
+The ``testtools/`` directory is the Python package itself. It is separated
+into submodules for internal clarity, but all public APIs should be “promoted”
+into the top-level package by importing them in ``testtools/__init__.py``.
+Users of testtools should never import a submodule in order to use a stable
+API. Unstable APIs like ``testtools.matchers`` and
+``testtools.deferredruntest`` should be exported as submodules.
+
+Tests belong in ``testtools/tests/``.
+
+
+Committing to trunk
+-------------------
+
+Testtools is maintained using git, with its master repo at https://github.com
+/testing-cabal/testtools. This gives every contributor the ability to commit
+their work to their own branches. However permission must be granted to allow
+contributors to commit to the trunk branch.
+
+Commit access to trunk is obtained by joining the `testing-cabal`_, either as an
+Owner or a Committer. Commit access is contingent on obeying the testtools
+contribution policy, see `Copyright Assignment`_ above.
+
+
+Code Review
+-----------
+
+All code must be reviewed before landing on trunk. The process is to create a
+branch on Github, and make a pull request into trunk. It will then be reviewed
+before it can be merged to trunk. It will be reviewed by someone:
+
+* not the author
+* a committer
+
+As a special exception, since there are few testtools committers and thus
+reviews are prone to blocking, a pull request from a committer that has not been
+reviewed after 24 hours may be merged by that committer. When the team is larger
+this policy will be revisited.
+
+Code reviewers should look for the quality of what is being submitted,
+including conformance with this HACKING file.
+
+Changes which all users should be made aware of should be documented in NEWS.
+
+
+NEWS management
+---------------
+
+The file NEWS is structured as a sorted list of releases. Each release can have
+a free form description and more or more sections with bullet point items.
+Sections in use today are 'Improvements' and 'Changes'. To ease merging between
+branches, the bullet points are kept alphabetically sorted. The release NEXT is
+permanently present at the top of the list.
+
+
+Release tasks
+-------------
+
+#. Choose a version number, say X.Y.Z
+#. In trunk, ensure __init__ has version ``(X, Y, Z, 'final', 0)``
+#. Under NEXT in NEWS add a heading with the version number X.Y.Z.
+#. Possibly write a blurb into NEWS.
+#. Commit the changes.
+#. Tag the release, ``git tag -s testtools-X.Y.Z``
+#. Run 'make release', this:
+ #. Creates a source distribution and uploads to PyPI
+ #. Ensures all Fix Committed bugs are in the release milestone
+ #. Makes a release on Launchpad and uploads the tarball
+ #. Marks all the Fix Committed bugs as Fix Released
+ #. Creates a new milestone
+#. Change __version__ in __init__.py to the probable next version.
+ e.g. to ``(X, Y, Z+1, 'dev', 0)``.
+#. Commit 'Opening X.Y.Z+1 for development.'
+#. If a new series has been created (e.g. 0.10.0), make the series on Launchpad.
+#. Push trunk to Github, ``git push --tags origin master``
+
+.. _PEP 8: http://www.python.org/dev/peps/pep-0008/
+.. _unittest: http://docs.python.org/library/unittest.html
+.. _MIT license: http://www.opensource.org/licenses/mit-license.php
+.. _Sphinx: http://sphinx.pocoo.org/
+.. _restructuredtext: http://docutils.sourceforge.net/rst.html
+.. _testing-cabal: https://github.com/organizations/testing-cabal/
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/index.rst b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/index.rst
new file mode 100644
index 00000000000..bac47e43794
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/index.rst
@@ -0,0 +1,36 @@
+.. testtools documentation master file, created by
+ sphinx-quickstart on Sun Nov 28 13:45:40 2010.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+testtools: tasteful testing for Python
+======================================
+
+testtools is a set of extensions to the Python standard library's unit testing
+framework. These extensions have been derived from many years of experience
+with unit testing in Python and come from many different sources. testtools
+also ports recent unittest changes all the way back to Python 2.4. The next
+release of testtools will change that to support versions that are maintained
+by the Python community instead, to allow the use of modern language features
+within testtools.
+
+
+Contents:
+
+.. toctree::
+ :maxdepth: 1
+
+ overview
+ for-test-authors
+ for-framework-folk
+ hacking
+ Changes to testtools <news>
+ API reference documentation <http://mumak.net/testtools/apidocs/>
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/make.bat b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/make.bat
new file mode 100644
index 00000000000..f8c1fd520ab
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/make.bat
@@ -0,0 +1,113 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+set SPHINXBUILD=sphinx-build
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\testtools.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\testtools.ghc
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/overview.rst b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/overview.rst
new file mode 100644
index 00000000000..5d9436ffc9e
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/doc/overview.rst
@@ -0,0 +1,101 @@
+======================================
+testtools: tasteful testing for Python
+======================================
+
+testtools is a set of extensions to the Python standard library's unit testing
+framework. These extensions have been derived from many years of experience
+with unit testing in Python and come from many different sources. testtools
+supports Python versions all the way back to Python 2.6.
+
+What better way to start than with a contrived code snippet?::
+
+ from testtools import TestCase
+ from testtools.content import Content
+ from testtools.content_type import UTF8_TEXT
+ from testtools.matchers import Equals
+
+ from myproject import SillySquareServer
+
+ class TestSillySquareServer(TestCase):
+
+ def setUp(self):
+ super(TestSillySquare, self).setUp()
+ self.server = self.useFixture(SillySquareServer())
+ self.addCleanup(self.attach_log_file)
+
+ def attach_log_file(self):
+ self.addDetail(
+ 'log-file',
+ Content(UTF8_TEXT
+ lambda: open(self.server.logfile, 'r').readlines()))
+
+ def test_server_is_cool(self):
+ self.assertThat(self.server.temperature, Equals("cool"))
+
+ def test_square(self):
+ self.assertThat(self.server.silly_square_of(7), Equals(49))
+
+
+Why use testtools?
+==================
+
+Better assertion methods
+------------------------
+
+The standard assertion methods that come with unittest aren't as helpful as
+they could be, and there aren't quite enough of them. testtools adds
+``assertIn``, ``assertIs``, ``assertIsInstance`` and their negatives.
+
+
+Matchers: better than assertion methods
+---------------------------------------
+
+Of course, in any serious project you want to be able to have assertions that
+are specific to that project and the particular problem that it is addressing.
+Rather than forcing you to define your own assertion methods and maintain your
+own inheritance hierarchy of ``TestCase`` classes, testtools lets you write
+your own "matchers", custom predicates that can be plugged into a unit test::
+
+ def test_response_has_bold(self):
+ # The response has bold text.
+ response = self.server.getResponse()
+ self.assertThat(response, HTMLContains(Tag('bold', 'b')))
+
+
+More debugging info, when you need it
+--------------------------------------
+
+testtools makes it easy to add arbitrary data to your test result. If you
+want to know what's in a log file when a test fails, or what the load was on
+the computer when a test started, or what files were open, you can add that
+information with ``TestCase.addDetail``, and it will appear in the test
+results if that test fails.
+
+
+Extend unittest, but stay compatible and re-usable
+--------------------------------------------------
+
+testtools goes to great lengths to allow serious test authors and test
+*framework* authors to do whatever they like with their tests and their
+extensions while staying compatible with the standard library's unittest.
+
+testtools has completely parametrized how exceptions raised in tests are
+mapped to ``TestResult`` methods and how tests are actually executed (ever
+wanted ``tearDown`` to be called regardless of whether ``setUp`` succeeds?)
+
+It also provides many simple but handy utilities, like the ability to clone a
+test, a ``MultiTestResult`` object that lets many result objects get the
+results from one test suite, adapters to bring legacy ``TestResult`` objects
+into our new golden age.
+
+
+Cross-Python compatibility
+--------------------------
+
+testtools gives you the very latest in unit testing technology in a way that
+will work with Python 2.6, 2.7, 3.1 and 3.2.
+
+If you wish to use testtools with Python 2.4 or 2.5, then please use testtools
+0.9.15. Up to then we supported Python 2.4 and 2.5, but we found the
+constraints involved in not using the newer language features onerous as we
+added more support for versions post Python 3.
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/setup.cfg b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/setup.cfg
new file mode 100644
index 00000000000..72d49ab47be
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/setup.cfg
@@ -0,0 +1,10 @@
+[test]
+test_module = testtools.tests
+buffer = 1
+catch = 1
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/setup.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/setup.py
new file mode 100755
index 00000000000..dacbf91e264
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/setup.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+"""Distutils installer for testtools."""
+
+from setuptools import setup
+from distutils.command.build_py import build_py
+import email
+import os
+import sys
+
+import testtools
+cmd_class = {}
+if getattr(testtools, 'TestCommand', None) is not None:
+ cmd_class['test'] = testtools.TestCommand
+
+
+class testtools_build_py(build_py):
+ def build_module(self, module, module_file, package):
+ if sys.version_info >= (3,) and module == '_compat2x':
+ return
+ return build_py.build_module(self, module, module_file, package)
+cmd_class['build_py'] = testtools_build_py
+
+
+def get_version_from_pkg_info():
+ """Get the version from PKG-INFO file if we can."""
+ pkg_info_path = os.path.join(os.path.dirname(__file__), 'PKG-INFO')
+ try:
+ pkg_info_file = open(pkg_info_path, 'r')
+ except (IOError, OSError):
+ return None
+ try:
+ pkg_info = email.message_from_file(pkg_info_file)
+ except email.MessageError:
+ return None
+ return pkg_info.get('Version', None)
+
+
+def get_version():
+ """Return the version of testtools that we are building."""
+ version = '.'.join(
+ str(component) for component in testtools.__version__[0:3])
+ phase = testtools.__version__[3]
+ if phase == 'final':
+ return version
+ pkg_info_version = get_version_from_pkg_info()
+ if pkg_info_version:
+ return pkg_info_version
+ # Apparently if we just say "snapshot" then distribute won't accept it
+ # as satisfying versioned dependencies. This is a problem for the
+ # daily build version.
+ return "snapshot-%s" % (version,)
+
+
+def get_long_description():
+ manual_path = os.path.join(
+ os.path.dirname(__file__), 'doc/overview.rst')
+ return open(manual_path).read()
+
+
+setup(name='testtools',
+ author='Jonathan M. Lange',
+ author_email='jml+testtools@mumak.net',
+ url='https://github.com/testing-cabal/testtools',
+ description=('Extensions to the Python standard library unit testing '
+ 'framework'),
+ long_description=get_long_description(),
+ version=get_version(),
+ classifiers=["License :: OSI Approved :: MIT License",
+ "Programming Language :: Python :: 3",
+ ],
+ packages=[
+ 'testtools',
+ 'testtools.matchers',
+ 'testtools.testresult',
+ 'testtools.tests',
+ 'testtools.tests.matchers',
+ ],
+ cmdclass=cmd_class,
+ zip_safe=False,
+ install_requires=[
+ 'extras',
+ # 'mimeparse' has not been uploaded by the maintainer with Python3 compat
+ # but someone kindly uploaded a fixed version as 'python-mimeparse'.
+ 'python-mimeparse',
+ ],
+ )
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/__init__.py
new file mode 100644
index 00000000000..62caae8545a
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/__init__.py
@@ -0,0 +1,125 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Extensions to the standard Python unittest library."""
+
+__all__ = [
+ 'clone_test_with_new_id',
+ 'CopyStreamResult',
+ 'ConcurrentTestSuite',
+ 'ConcurrentStreamTestSuite',
+ 'DecorateTestCaseResult',
+ 'ErrorHolder',
+ 'ExpectedException',
+ 'ExtendedToOriginalDecorator',
+ 'ExtendedToStreamDecorator',
+ 'FixtureSuite',
+ 'iterate_tests',
+ 'MultipleExceptions',
+ 'MultiTestResult',
+ 'PlaceHolder',
+ 'run_test_with',
+ 'Tagger',
+ 'TestCase',
+ 'TestCommand',
+ 'TestByTestResult',
+ 'TestResult',
+ 'TestResultDecorator',
+ 'TextTestResult',
+ 'RunTest',
+ 'skip',
+ 'skipIf',
+ 'skipUnless',
+ 'StreamFailFast',
+ 'StreamResult',
+ 'StreamResultRouter',
+ 'StreamSummary',
+ 'StreamTagger',
+ 'StreamToDict',
+ 'StreamToExtendedDecorator',
+ 'StreamToQueue',
+ 'TestControl',
+ 'ThreadsafeForwardingResult',
+ 'TimestampingStreamResult',
+ 'try_import',
+ 'try_imports',
+ ]
+
+# Compat - removal announced in 0.9.25.
+try:
+ from extras import (
+ try_import,
+ try_imports,
+ )
+except ImportError:
+ # Support reading __init__ for __version__ without extras, because pip does
+ # not support setup_requires.
+ pass
+else:
+
+ from testtools.matchers._impl import (
+ Matcher,
+ )
+# Shut up, pyflakes. We are importing for documentation, not for namespacing.
+ Matcher
+
+ from testtools.runtest import (
+ MultipleExceptions,
+ RunTest,
+ )
+ from testtools.testcase import (
+ DecorateTestCaseResult,
+ ErrorHolder,
+ ExpectedException,
+ PlaceHolder,
+ TestCase,
+ clone_test_with_new_id,
+ run_test_with,
+ skip,
+ skipIf,
+ skipUnless,
+ )
+ from testtools.testresult import (
+ CopyStreamResult,
+ ExtendedToOriginalDecorator,
+ ExtendedToStreamDecorator,
+ MultiTestResult,
+ StreamFailFast,
+ StreamResult,
+ StreamResultRouter,
+ StreamSummary,
+ StreamTagger,
+ StreamToDict,
+ StreamToExtendedDecorator,
+ StreamToQueue,
+ Tagger,
+ TestByTestResult,
+ TestControl,
+ TestResult,
+ TestResultDecorator,
+ TextTestResult,
+ ThreadsafeForwardingResult,
+ TimestampingStreamResult,
+ )
+ from testtools.testsuite import (
+ ConcurrentTestSuite,
+ ConcurrentStreamTestSuite,
+ FixtureSuite,
+ iterate_tests,
+ )
+ from testtools.distutilscmd import (
+ TestCommand,
+ )
+
+# same format as sys.version_info: "A tuple containing the five components of
+# the version number: major, minor, micro, releaselevel, and serial. All
+# values except releaselevel are integers; the release level is 'alpha',
+# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
+# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
+# releaselevel of 'dev' for unreleased under-development code.
+#
+# If the releaselevel is 'alpha' then the major/minor/micro components are not
+# established at this point, and setup.py will use a version of next-$(revno).
+# If the releaselevel is 'final', then the tarball will be major.minor.micro.
+# Otherwise it is major.minor.micro~$(revno).
+
+__version__ = (0, 9, 34, 'final', 0)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat2x.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat2x.py
new file mode 100644
index 00000000000..2b25c13e081
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat2x.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2011 testtools developers. See LICENSE for details.
+
+"""Compatibility helpers that are valid syntax in Python 2.x.
+
+Only add things here if they *only* work in Python 2.x or are Python 2
+alternatives to things that *only* work in Python 3.x.
+"""
+
+__all__ = [
+ 'reraise',
+ ]
+
+
+def reraise(exc_class, exc_obj, exc_tb, _marker=object()):
+ """Re-raise an exception received from sys.exc_info() or similar."""
+ raise exc_class, exc_obj, exc_tb
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat3x.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat3x.py
new file mode 100644
index 00000000000..7a482c14b43
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_compat3x.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2011 testtools developers. See LICENSE for details.
+
+"""Compatibility helpers that are valid syntax in Python 3.x.
+
+Only add things here if they *only* work in Python 3.x or are Python 3
+alternatives to things that *only* work in Python 2.x.
+"""
+
+__all__ = [
+ 'reraise',
+ ]
+
+
+def reraise(exc_class, exc_obj, exc_tb, _marker=object()):
+ """Re-raise an exception received from sys.exc_info() or similar."""
+ raise exc_obj.with_traceback(exc_tb)
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_spinner.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_spinner.py
new file mode 100644
index 00000000000..baf455a5f94
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/_spinner.py
@@ -0,0 +1,316 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Evil reactor-spinning logic for running Twisted tests.
+
+This code is highly experimental, liable to change and not to be trusted. If
+you couldn't write this yourself, you should not be using it.
+"""
+
+__all__ = [
+ 'DeferredNotFired',
+ 'extract_result',
+ 'NoResultError',
+ 'not_reentrant',
+ 'ReentryError',
+ 'Spinner',
+ 'StaleJunkError',
+ 'TimeoutError',
+ 'trap_unhandled_errors',
+ ]
+
+import signal
+
+from testtools.monkey import MonkeyPatcher
+
+from twisted.internet import defer
+from twisted.internet.base import DelayedCall
+from twisted.internet.interfaces import IReactorThreads
+from twisted.python.failure import Failure
+from twisted.python.util import mergeFunctionMetadata
+
+
+class ReentryError(Exception):
+ """Raised when we try to re-enter a function that forbids it."""
+
+ def __init__(self, function):
+ Exception.__init__(self,
+ "%r in not re-entrant but was called within a call to itself."
+ % (function,))
+
+
+def not_reentrant(function, _calls={}):
+ """Decorates a function as not being re-entrant.
+
+ The decorated function will raise an error if called from within itself.
+ """
+ def decorated(*args, **kwargs):
+ if _calls.get(function, False):
+ raise ReentryError(function)
+ _calls[function] = True
+ try:
+ return function(*args, **kwargs)
+ finally:
+ _calls[function] = False
+ return mergeFunctionMetadata(function, decorated)
+
+
+class DeferredNotFired(Exception):
+ """Raised when we extract a result from a Deferred that's not fired yet."""
+
+
+def extract_result(deferred):
+ """Extract the result from a fired deferred.
+
+ It can happen that you have an API that returns Deferreds for
+ compatibility with Twisted code, but is in fact synchronous, i.e. the
+ Deferreds it returns have always fired by the time it returns. In this
+ case, you can use this function to convert the result back into the usual
+ form for a synchronous API, i.e. the result itself or a raised exception.
+
+ It would be very bad form to use this as some way of checking if a
+ Deferred has fired.
+ """
+ failures = []
+ successes = []
+ deferred.addCallbacks(successes.append, failures.append)
+ if len(failures) == 1:
+ failures[0].raiseException()
+ elif len(successes) == 1:
+ return successes[0]
+ else:
+ raise DeferredNotFired("%r has not fired yet." % (deferred,))
+
+
+def trap_unhandled_errors(function, *args, **kwargs):
+ """Run a function, trapping any unhandled errors in Deferreds.
+
+ Assumes that 'function' will have handled any errors in Deferreds by the
+ time it is complete. This is almost never true of any Twisted code, since
+ you can never tell when someone has added an errback to a Deferred.
+
+ If 'function' raises, then don't bother doing any unhandled error
+ jiggery-pokery, since something horrible has probably happened anyway.
+
+ :return: A tuple of '(result, error)', where 'result' is the value
+ returned by 'function' and 'error' is a list of 'defer.DebugInfo'
+ objects that have unhandled errors in Deferreds.
+ """
+ real_DebugInfo = defer.DebugInfo
+ debug_infos = []
+ def DebugInfo():
+ info = real_DebugInfo()
+ debug_infos.append(info)
+ return info
+ defer.DebugInfo = DebugInfo
+ try:
+ result = function(*args, **kwargs)
+ finally:
+ defer.DebugInfo = real_DebugInfo
+ errors = []
+ for info in debug_infos:
+ if info.failResult is not None:
+ errors.append(info)
+ # Disable the destructor that logs to error. We are already
+ # catching the error here.
+ info.__del__ = lambda: None
+ return result, errors
+
+
+class TimeoutError(Exception):
+ """Raised when run_in_reactor takes too long to run a function."""
+
+ def __init__(self, function, timeout):
+ Exception.__init__(self,
+ "%r took longer than %s seconds" % (function, timeout))
+
+
+class NoResultError(Exception):
+ """Raised when the reactor has stopped but we don't have any result."""
+
+ def __init__(self):
+ Exception.__init__(self,
+ "Tried to get test's result from Deferred when no result is "
+ "available. Probably means we received SIGINT or similar.")
+
+
+class StaleJunkError(Exception):
+ """Raised when there's junk in the spinner from a previous run."""
+
+ def __init__(self, junk):
+ Exception.__init__(self,
+ "There was junk in the spinner from a previous run. "
+ "Use clear_junk() to clear it out: %r" % (junk,))
+
+
+class Spinner(object):
+ """Spin the reactor until a function is done.
+
+ This class emulates the behaviour of twisted.trial in that it grotesquely
+ and horribly spins the Twisted reactor while a function is running, and
+ then kills the reactor when that function is complete and all the
+ callbacks in its chains are done.
+ """
+
+ _UNSET = object()
+
+ # Signals that we save and restore for each spin.
+ _PRESERVED_SIGNALS = [
+ 'SIGINT',
+ 'SIGTERM',
+ 'SIGCHLD',
+ ]
+
+ # There are many APIs within Twisted itself where a Deferred fires but
+ # leaves cleanup work scheduled for the reactor to do. Arguably, many of
+ # these are bugs. As such, we provide a facility to iterate the reactor
+ # event loop a number of times after every call, in order to shake out
+ # these buggy-but-commonplace events. The default is 0, because that is
+ # the ideal, and it actually works for many cases.
+ _OBLIGATORY_REACTOR_ITERATIONS = 0
+
+ def __init__(self, reactor, debug=False):
+ """Construct a Spinner.
+
+ :param reactor: A Twisted reactor.
+ :param debug: Whether or not to enable Twisted's debugging. Defaults
+ to False.
+ """
+ self._reactor = reactor
+ self._timeout_call = None
+ self._success = self._UNSET
+ self._failure = self._UNSET
+ self._saved_signals = []
+ self._junk = []
+ self._debug = debug
+
+ def _cancel_timeout(self):
+ if self._timeout_call:
+ self._timeout_call.cancel()
+
+ def _get_result(self):
+ if self._failure is not self._UNSET:
+ self._failure.raiseException()
+ if self._success is not self._UNSET:
+ return self._success
+ raise NoResultError()
+
+ def _got_failure(self, result):
+ self._cancel_timeout()
+ self._failure = result
+
+ def _got_success(self, result):
+ self._cancel_timeout()
+ self._success = result
+
+ def _stop_reactor(self, ignored=None):
+ """Stop the reactor!"""
+ self._reactor.crash()
+
+ def _timed_out(self, function, timeout):
+ e = TimeoutError(function, timeout)
+ self._failure = Failure(e)
+ self._stop_reactor()
+
+ def _clean(self):
+ """Clean up any junk in the reactor.
+
+ Will always iterate the reactor a number of times equal to
+ ``Spinner._OBLIGATORY_REACTOR_ITERATIONS``. This is to work around
+ bugs in various Twisted APIs where a Deferred fires but still leaves
+ work (e.g. cancelling a call, actually closing a connection) for the
+ reactor to do.
+ """
+ for i in range(self._OBLIGATORY_REACTOR_ITERATIONS):
+ self._reactor.iterate(0)
+ junk = []
+ for delayed_call in self._reactor.getDelayedCalls():
+ delayed_call.cancel()
+ junk.append(delayed_call)
+ for selectable in self._reactor.removeAll():
+ # Twisted sends a 'KILL' signal to selectables that provide
+ # IProcessTransport. Since only _dumbwin32proc processes do this,
+ # we aren't going to bother.
+ junk.append(selectable)
+ if IReactorThreads.providedBy(self._reactor):
+ if self._reactor.threadpool is not None:
+ self._reactor._stopThreadPool()
+ self._junk.extend(junk)
+ return junk
+
+ def clear_junk(self):
+ """Clear out our recorded junk.
+
+ :return: Whatever junk was there before.
+ """
+ junk = self._junk
+ self._junk = []
+ return junk
+
+ def get_junk(self):
+ """Return any junk that has been found on the reactor."""
+ return self._junk
+
+ def _save_signals(self):
+ available_signals = [
+ getattr(signal, name, None) for name in self._PRESERVED_SIGNALS]
+ self._saved_signals = [
+ (sig, signal.getsignal(sig)) for sig in available_signals if sig]
+
+ def _restore_signals(self):
+ for sig, hdlr in self._saved_signals:
+ signal.signal(sig, hdlr)
+ self._saved_signals = []
+
+ @not_reentrant
+ def run(self, timeout, function, *args, **kwargs):
+ """Run 'function' in a reactor.
+
+ If 'function' returns a Deferred, the reactor will keep spinning until
+ the Deferred fires and its chain completes or until the timeout is
+ reached -- whichever comes first.
+
+ :raise TimeoutError: If 'timeout' is reached before the Deferred
+ returned by 'function' has completed its callback chain.
+ :raise NoResultError: If the reactor is somehow interrupted before
+ the Deferred returned by 'function' has completed its callback
+ chain.
+ :raise StaleJunkError: If there's junk in the spinner from a previous
+ run.
+ :return: Whatever is at the end of the function's callback chain. If
+ it's an error, then raise that.
+ """
+ debug = MonkeyPatcher()
+ if self._debug:
+ debug.add_patch(defer.Deferred, 'debug', True)
+ debug.add_patch(DelayedCall, 'debug', True)
+ debug.patch()
+ try:
+ junk = self.get_junk()
+ if junk:
+ raise StaleJunkError(junk)
+ self._save_signals()
+ self._timeout_call = self._reactor.callLater(
+ timeout, self._timed_out, function, timeout)
+ # Calling 'stop' on the reactor will make it impossible to
+ # re-start the reactor. Since the default signal handlers for
+ # TERM, BREAK and INT all call reactor.stop(), we'll patch it over
+ # with crash. XXX: It might be a better idea to either install
+ # custom signal handlers or to override the methods that are
+ # Twisted's signal handlers.
+ stop, self._reactor.stop = self._reactor.stop, self._reactor.crash
+ def run_function():
+ d = defer.maybeDeferred(function, *args, **kwargs)
+ d.addCallbacks(self._got_success, self._got_failure)
+ d.addBoth(self._stop_reactor)
+ try:
+ self._reactor.callWhenRunning(run_function)
+ self._reactor.run()
+ finally:
+ self._reactor.stop = stop
+ self._restore_signals()
+ try:
+ return self._get_result()
+ finally:
+ self._clean()
+ finally:
+ debug.restore()
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/compat.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/compat.py
new file mode 100644
index 00000000000..5502e0c2161
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/compat.py
@@ -0,0 +1,415 @@
+# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
+
+"""Compatibility support for python 2 and 3."""
+
+__metaclass__ = type
+__all__ = [
+ '_b',
+ '_u',
+ 'advance_iterator',
+ 'all',
+ 'BytesIO',
+ 'classtypes',
+ 'isbaseexception',
+ 'istext',
+ 'str_is_unicode',
+ 'StringIO',
+ 'reraise',
+ 'unicode_output_stream',
+ ]
+
+import codecs
+import io
+import linecache
+import locale
+import os
+import re
+import sys
+import traceback
+import unicodedata
+
+from extras import try_imports
+
+BytesIO = try_imports(['StringIO.StringIO', 'io.BytesIO'])
+StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
+
+try:
+ from testtools import _compat2x as _compat
+except (SyntaxError, ImportError):
+ from testtools import _compat3x as _compat
+
+reraise = _compat.reraise
+
+
+__u_doc = """A function version of the 'u' prefix.
+
+This is needed becayse the u prefix is not usable in Python 3 but is required
+in Python 2 to get a unicode object.
+
+To migrate code that was written as u'\u1234' in Python 2 to 2+3 change
+it to be _u('\u1234'). The Python 3 interpreter will decode it
+appropriately and the no-op _u for Python 3 lets it through, in Python
+2 we then call unicode-escape in the _u function.
+"""
+
+if sys.version_info > (3, 0):
+ import builtins
+ def _u(s):
+ return s
+ _r = ascii
+ def _b(s):
+ """A byte literal."""
+ return s.encode("latin-1")
+ advance_iterator = next
+ # GZ 2011-08-24: Seems istext() is easy to misuse and makes for bad code.
+ def istext(x):
+ return isinstance(x, str)
+ def classtypes():
+ return (type,)
+ str_is_unicode = True
+else:
+ import __builtin__ as builtins
+ def _u(s):
+ # The double replace mangling going on prepares the string for
+ # unicode-escape - \foo is preserved, \u and \U are decoded.
+ return (s.replace("\\", "\\\\").replace("\\\\u", "\\u")
+ .replace("\\\\U", "\\U").decode("unicode-escape"))
+ _r = repr
+ def _b(s):
+ return s
+ advance_iterator = lambda it: it.next()
+ def istext(x):
+ return isinstance(x, basestring)
+ def classtypes():
+ import types
+ return (type, types.ClassType)
+ str_is_unicode = sys.platform == "cli"
+
+_u.__doc__ = __u_doc
+
+
+if sys.version_info > (2, 5):
+ all = all
+ _error_repr = BaseException.__repr__
+ def isbaseexception(exception):
+ """Return whether exception inherits from BaseException only"""
+ return (isinstance(exception, BaseException)
+ and not isinstance(exception, Exception))
+else:
+ def all(iterable):
+ """If contents of iterable all evaluate as boolean True"""
+ for obj in iterable:
+ if not obj:
+ return False
+ return True
+ def _error_repr(exception):
+ """Format an exception instance as Python 2.5 and later do"""
+ return exception.__class__.__name__ + repr(exception.args)
+ def isbaseexception(exception):
+ """Return whether exception would inherit from BaseException only
+
+ This approximates the hierarchy in Python 2.5 and later, compare the
+ difference between the diagrams at the bottom of the pages:
+ <http://docs.python.org/release/2.4.4/lib/module-exceptions.html>
+ <http://docs.python.org/release/2.5.4/lib/module-exceptions.html>
+ """
+ return isinstance(exception, (KeyboardInterrupt, SystemExit))
+
+
+# GZ 2011-08-24: Using isinstance checks like this encourages bad interfaces,
+# there should be better ways to write code needing this.
+if not issubclass(getattr(builtins, "bytes", str), str):
+ def _isbytes(x):
+ return isinstance(x, bytes)
+else:
+ # Never return True on Pythons that provide the name but not the real type
+ def _isbytes(x):
+ return False
+
+
+def _slow_escape(text):
+ """Escape unicode ``text`` leaving printable characters unmodified
+
+ The behaviour emulates the Python 3 implementation of repr, see
+ unicode_repr in unicodeobject.c and isprintable definition.
+
+ Because this iterates over the input a codepoint at a time, it's slow, and
+ does not handle astral characters correctly on Python builds with 16 bit
+ rather than 32 bit unicode type.
+ """
+ output = []
+ for c in text:
+ o = ord(c)
+ if o < 256:
+ if o < 32 or 126 < o < 161:
+ output.append(c.encode("unicode-escape"))
+ elif o == 92:
+ # Separate due to bug in unicode-escape codec in Python 2.4
+ output.append("\\\\")
+ else:
+ output.append(c)
+ else:
+ # To get correct behaviour would need to pair up surrogates here
+ if unicodedata.category(c)[0] in "CZ":
+ output.append(c.encode("unicode-escape"))
+ else:
+ output.append(c)
+ return "".join(output)
+
+
+def text_repr(text, multiline=None):
+ """Rich repr for ``text`` returning unicode, triple quoted if ``multiline``.
+ """
+ is_py3k = sys.version_info > (3, 0)
+ nl = _isbytes(text) and bytes((0xA,)) or "\n"
+ if multiline is None:
+ multiline = nl in text
+ if not multiline and (is_py3k or not str_is_unicode and type(text) is str):
+ # Use normal repr for single line of unicode on Python 3 or bytes
+ return repr(text)
+ prefix = repr(text[:0])[:-2]
+ if multiline:
+ # To escape multiline strings, split and process each line in turn,
+ # making sure that quotes are not escaped.
+ if is_py3k:
+ offset = len(prefix) + 1
+ lines = []
+ for l in text.split(nl):
+ r = repr(l)
+ q = r[-1]
+ lines.append(r[offset:-1].replace("\\" + q, q))
+ elif not str_is_unicode and isinstance(text, str):
+ lines = [l.encode("string-escape").replace("\\'", "'")
+ for l in text.split("\n")]
+ else:
+ lines = [_slow_escape(l) for l in text.split("\n")]
+ # Combine the escaped lines and append two of the closing quotes,
+ # then iterate over the result to escape triple quotes correctly.
+ _semi_done = "\n".join(lines) + "''"
+ p = 0
+ while True:
+ p = _semi_done.find("'''", p)
+ if p == -1:
+ break
+ _semi_done = "\\".join([_semi_done[:p], _semi_done[p:]])
+ p += 2
+ return "".join([prefix, "'''\\\n", _semi_done, "'"])
+ escaped_text = _slow_escape(text)
+ # Determine which quote character to use and if one gets prefixed with a
+ # backslash following the same logic Python uses for repr() on strings
+ quote = "'"
+ if "'" in text:
+ if '"' in text:
+ escaped_text = escaped_text.replace("'", "\\'")
+ else:
+ quote = '"'
+ return "".join([prefix, quote, escaped_text, quote])
+
+
+def unicode_output_stream(stream):
+ """Get wrapper for given stream that writes any unicode without exception
+
+ Characters that can't be coerced to the encoding of the stream, or 'ascii'
+ if valid encoding is not found, will be replaced. The original stream may
+ be returned in situations where a wrapper is determined unneeded.
+
+ The wrapper only allows unicode to be written, not non-ascii bytestrings,
+ which is a good thing to ensure sanity and sanitation.
+ """
+ if (sys.platform == "cli" or
+ isinstance(stream, (io.TextIOWrapper, io.StringIO))):
+ # Best to never encode before writing in IronPython, or if it is
+ # already a TextIO [which in the io library has no encoding
+ # attribute).
+ return stream
+ try:
+ writer = codecs.getwriter(stream.encoding or "")
+ except (AttributeError, LookupError):
+ return codecs.getwriter("ascii")(stream, "replace")
+ if writer.__module__.rsplit(".", 1)[1].startswith("utf"):
+ # The current stream has a unicode encoding so no error handler is needed
+ if sys.version_info > (3, 0):
+ return stream
+ return writer(stream)
+ if sys.version_info > (3, 0):
+ # Python 3 doesn't seem to make this easy, handle a common case
+ try:
+ return stream.__class__(stream.buffer, stream.encoding, "replace",
+ stream.newlines, stream.line_buffering)
+ except AttributeError:
+ pass
+ return writer(stream, "replace")
+
+
+# The default source encoding is actually "iso-8859-1" until Python 2.5 but
+# using non-ascii causes a deprecation warning in 2.4 and it's cleaner to
+# treat all versions the same way
+_default_source_encoding = "ascii"
+
+# Pattern specified in <http://www.python.org/dev/peps/pep-0263/>
+_cookie_search=re.compile("coding[:=]\s*([-\w.]+)").search
+
+def _detect_encoding(lines):
+ """Get the encoding of a Python source file from a list of lines as bytes
+
+ This function does less than tokenize.detect_encoding added in Python 3 as
+ it does not attempt to raise a SyntaxError when the interpreter would, it
+ just wants the encoding of a source file Python has already compiled and
+ determined is valid.
+ """
+ if not lines:
+ return _default_source_encoding
+ if lines[0].startswith("\xef\xbb\xbf"):
+ # Source starting with UTF-8 BOM is either UTF-8 or a SyntaxError
+ return "utf-8"
+ # Only the first two lines of the source file are examined
+ magic = _cookie_search("".join(lines[:2]))
+ if magic is None:
+ return _default_source_encoding
+ encoding = magic.group(1)
+ try:
+ codecs.lookup(encoding)
+ except LookupError:
+ # Some codecs raise something other than LookupError if they don't
+ # support the given error handler, but not the text ones that could
+ # actually be used for Python source code
+ return _default_source_encoding
+ return encoding
+
+
+class _EncodingTuple(tuple):
+ """A tuple type that can have an encoding attribute smuggled on"""
+
+
+def _get_source_encoding(filename):
+ """Detect, cache and return the encoding of Python source at filename"""
+ try:
+ return linecache.cache[filename].encoding
+ except (AttributeError, KeyError):
+ encoding = _detect_encoding(linecache.getlines(filename))
+ if filename in linecache.cache:
+ newtuple = _EncodingTuple(linecache.cache[filename])
+ newtuple.encoding = encoding
+ linecache.cache[filename] = newtuple
+ return encoding
+
+
+def _get_exception_encoding():
+ """Return the encoding we expect messages from the OS to be encoded in"""
+ if os.name == "nt":
+ # GZ 2010-05-24: Really want the codepage number instead, the error
+ # handling of standard codecs is more deterministic
+ return "mbcs"
+ # GZ 2010-05-23: We need this call to be after initialisation, but there's
+ # no benefit in asking more than once as it's a global
+ # setting that can change after the message is formatted.
+ return locale.getlocale(locale.LC_MESSAGES)[1] or "ascii"
+
+
+def _exception_to_text(evalue):
+ """Try hard to get a sensible text value out of an exception instance"""
+ try:
+ return unicode(evalue)
+ except KeyboardInterrupt:
+ raise
+ except:
+ # Apparently this is what traceback._some_str does. Sigh - RBC 20100623
+ pass
+ try:
+ return str(evalue).decode(_get_exception_encoding(), "replace")
+ except KeyboardInterrupt:
+ raise
+ except:
+ # Apparently this is what traceback._some_str does. Sigh - RBC 20100623
+ pass
+ # Okay, out of ideas, let higher level handle it
+ return None
+
+
+def _format_stack_list(stack_lines):
+ """Format 'stack_lines' and return a list of unicode strings.
+
+ :param stack_lines: A list of filename, lineno, name, and line variables,
+ probably obtained by calling traceback.extract_tb or
+ traceback.extract_stack.
+ """
+ fs_enc = sys.getfilesystemencoding()
+ extracted_list = []
+ for filename, lineno, name, line in stack_lines:
+ extracted_list.append((
+ filename.decode(fs_enc, "replace"),
+ lineno,
+ name.decode("ascii", "replace"),
+ line and line.decode(
+ _get_source_encoding(filename), "replace")))
+ return traceback.format_list(extracted_list)
+
+
+def _format_exception_only(eclass, evalue):
+ """Format the excption part of a traceback.
+
+ :param eclass: The type of the exception being formatted.
+ :param evalue: The exception instance.
+ :returns: A list of unicode strings.
+ """
+ list = []
+ if evalue is None:
+ # Is a (deprecated) string exception
+ list.append((eclass + "\n").decode("ascii", "replace"))
+ return list
+ if isinstance(evalue, SyntaxError):
+ # Avoid duplicating the special formatting for SyntaxError here,
+ # instead create a new instance with unicode filename and line
+ # Potentially gives duff spacing, but that's a pre-existing issue
+ try:
+ msg, (filename, lineno, offset, line) = evalue
+ except (TypeError, ValueError):
+ pass # Strange exception instance, fall through to generic code
+ else:
+ # Errors during parsing give the line from buffer encoded as
+ # latin-1 or utf-8 or the encoding of the file depending on the
+ # coding and whether the patch for issue #1031213 is applied, so
+ # give up on trying to decode it and just read the file again
+ if line:
+ bytestr = linecache.getline(filename, lineno)
+ if bytestr:
+ if lineno == 1 and bytestr.startswith("\xef\xbb\xbf"):
+ bytestr = bytestr[3:]
+ line = bytestr.decode(
+ _get_source_encoding(filename), "replace")
+ del linecache.cache[filename]
+ else:
+ line = line.decode("ascii", "replace")
+ if filename:
+ fs_enc = sys.getfilesystemencoding()
+ filename = filename.decode(fs_enc, "replace")
+ evalue = eclass(msg, (filename, lineno, offset, line))
+ list.extend(traceback.format_exception_only(eclass, evalue))
+ return list
+ sclass = eclass.__name__
+ svalue = _exception_to_text(evalue)
+ if svalue:
+ list.append("%s: %s\n" % (sclass, svalue))
+ elif svalue is None:
+ # GZ 2010-05-24: Not a great fallback message, but keep for the moment
+ list.append(_u("%s: <unprintable %s object>\n" % (sclass, sclass)))
+ else:
+ list.append(_u("%s\n" % sclass))
+ return list
+
+
+_TB_HEADER = _u('Traceback (most recent call last):\n')
+
+
+def _format_exc_info(eclass, evalue, tb, limit=None):
+ """Format a stack trace and the exception information as unicode
+
+ Compatibility function for Python 2 which ensures each component of a
+ traceback is correctly decoded according to its origins.
+
+ Based on traceback.format_exception and related functions.
+ """
+ return [_TB_HEADER] \
+ + _format_stack_list(traceback.extract_tb(tb, limit)) \
+ + _format_exception_only(eclass, evalue)
+
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content.py
new file mode 100644
index 00000000000..09f44844524
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content.py
@@ -0,0 +1,385 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""Content - a MIME-like Content object."""
+
+__all__ = [
+ 'attach_file',
+ 'Content',
+ 'content_from_file',
+ 'content_from_stream',
+ 'json_content',
+ 'text_content',
+ 'TracebackContent',
+ ]
+
+import codecs
+import inspect
+import json
+import os
+import sys
+import traceback
+
+from extras import try_import
+
+from testtools.compat import (
+ _b,
+ _format_exception_only,
+ _format_stack_list,
+ _TB_HEADER,
+ _u,
+ str_is_unicode,
+)
+from testtools.content_type import ContentType, JSON, UTF8_TEXT
+
+
+functools = try_import('functools')
+
+_join_b = _b("").join
+
+
+DEFAULT_CHUNK_SIZE = 4096
+
+STDOUT_LINE = '\nStdout:\n%s'
+STDERR_LINE = '\nStderr:\n%s'
+
+
+def _iter_chunks(stream, chunk_size, seek_offset=None, seek_whence=0):
+ """Read 'stream' in chunks of 'chunk_size'.
+
+ :param stream: A file-like object to read from.
+ :param chunk_size: The size of each read from 'stream'.
+ :param seek_offset: If non-None, seek before iterating.
+ :param seek_whence: Pass through to the seek call, if seeking.
+ """
+ if seek_offset is not None:
+ stream.seek(seek_offset, seek_whence)
+ chunk = stream.read(chunk_size)
+ while chunk:
+ yield chunk
+ chunk = stream.read(chunk_size)
+
+
+class Content(object):
+ """A MIME-like Content object.
+
+ Content objects can be serialised to bytes using the iter_bytes method.
+ If the Content-Type is recognised by other code, they are welcome to
+ look for richer contents that mere byte serialisation - for example in
+ memory object graphs etc. However, such code MUST be prepared to receive
+ a generic Content object that has been reconstructed from a byte stream.
+
+ :ivar content_type: The content type of this Content.
+ """
+
+ def __init__(self, content_type, get_bytes):
+ """Create a ContentType."""
+ if None in (content_type, get_bytes):
+ raise ValueError("None not permitted in %r, %r" % (
+ content_type, get_bytes))
+ self.content_type = content_type
+ self._get_bytes = get_bytes
+
+ def __eq__(self, other):
+ return (self.content_type == other.content_type and
+ _join_b(self.iter_bytes()) == _join_b(other.iter_bytes()))
+
+ def as_text(self):
+ """Return all of the content as text.
+
+ This is only valid where ``iter_text`` is. It will load all of the
+ content into memory. Where this is a concern, use ``iter_text``
+ instead.
+ """
+ return _u('').join(self.iter_text())
+
+ def iter_bytes(self):
+ """Iterate over bytestrings of the serialised content."""
+ return self._get_bytes()
+
+ def iter_text(self):
+ """Iterate over the text of the serialised content.
+
+ This is only valid for text MIME types, and will use ISO-8859-1 if
+ no charset parameter is present in the MIME type. (This is somewhat
+ arbitrary, but consistent with RFC2617 3.7.1).
+
+ :raises ValueError: If the content type is not text/\*.
+ """
+ if self.content_type.type != "text":
+ raise ValueError("Not a text type %r" % self.content_type)
+ return self._iter_text()
+
+ def _iter_text(self):
+ """Worker for iter_text - does the decoding."""
+ encoding = self.content_type.parameters.get('charset', 'ISO-8859-1')
+ try:
+ # 2.5+
+ decoder = codecs.getincrementaldecoder(encoding)()
+ for bytes in self.iter_bytes():
+ yield decoder.decode(bytes)
+ final = decoder.decode(_b(''), True)
+ if final:
+ yield final
+ except AttributeError:
+ # < 2.5
+ bytes = ''.join(self.iter_bytes())
+ yield bytes.decode(encoding)
+
+ def __repr__(self):
+ return "<Content type=%r, value=%r>" % (
+ self.content_type, _join_b(self.iter_bytes()))
+
+
+class StackLinesContent(Content):
+ """Content object for stack lines.
+
+ This adapts a list of "preprocessed" stack lines into a content object.
+ The stack lines are most likely produced from ``traceback.extract_stack``
+ or ``traceback.extract_tb``.
+
+ text/x-traceback;language=python is used for the mime type, in order to
+ provide room for other languages to format their tracebacks differently.
+ """
+
+ # Whether or not to hide layers of the stack trace that are
+ # unittest/testtools internal code. Defaults to True since the
+ # system-under-test is rarely unittest or testtools.
+ HIDE_INTERNAL_STACK = True
+
+ def __init__(self, stack_lines, prefix_content="", postfix_content=""):
+ """Create a StackLinesContent for ``stack_lines``.
+
+ :param stack_lines: A list of preprocessed stack lines, probably
+ obtained by calling ``traceback.extract_stack`` or
+ ``traceback.extract_tb``.
+ :param prefix_content: If specified, a unicode string to prepend to the
+ text content.
+ :param postfix_content: If specified, a unicode string to append to the
+ text content.
+ """
+ content_type = ContentType('text', 'x-traceback',
+ {"language": "python", "charset": "utf8"})
+ value = prefix_content + \
+ self._stack_lines_to_unicode(stack_lines) + \
+ postfix_content
+ super(StackLinesContent, self).__init__(
+ content_type, lambda: [value.encode("utf8")])
+
+ def _stack_lines_to_unicode(self, stack_lines):
+ """Converts a list of pre-processed stack lines into a unicode string.
+ """
+
+ # testtools customization. When str is unicode (e.g. IronPython,
+ # Python 3), traceback.format_exception returns unicode. For Python 2,
+ # it returns bytes. We need to guarantee unicode.
+ if str_is_unicode:
+ format_stack_lines = traceback.format_list
+ else:
+ format_stack_lines = _format_stack_list
+
+ msg_lines = format_stack_lines(stack_lines)
+
+ return ''.join(msg_lines)
+
+
+def TracebackContent(err, test):
+ """Content object for tracebacks.
+
+ This adapts an exc_info tuple to the Content interface.
+ text/x-traceback;language=python is used for the mime type, in order to
+ provide room for other languages to format their tracebacks differently.
+ """
+ if err is None:
+ raise ValueError("err may not be None")
+
+ exctype, value, tb = err
+ # Skip test runner traceback levels
+ if StackLinesContent.HIDE_INTERNAL_STACK:
+ while tb and '__unittest' in tb.tb_frame.f_globals:
+ tb = tb.tb_next
+
+ # testtools customization. When str is unicode (e.g. IronPython,
+ # Python 3), traceback.format_exception_only returns unicode. For Python 2,
+ # it returns bytes. We need to guarantee unicode.
+ if str_is_unicode:
+ format_exception_only = traceback.format_exception_only
+ else:
+ format_exception_only = _format_exception_only
+
+ limit = None
+ # Disabled due to https://bugs.launchpad.net/testtools/+bug/1188420
+ if (False
+ and StackLinesContent.HIDE_INTERNAL_STACK
+ and test.failureException
+ and isinstance(value, test.failureException)):
+ # Skip assert*() traceback levels
+ limit = 0
+ while tb and not self._is_relevant_tb_level(tb):
+ limit += 1
+ tb = tb.tb_next
+
+ prefix = _TB_HEADER
+ stack_lines = traceback.extract_tb(tb, limit)
+ postfix = ''.join(format_exception_only(exctype, value))
+
+ return StackLinesContent(stack_lines, prefix, postfix)
+
+
+def StacktraceContent(prefix_content="", postfix_content=""):
+ """Content object for stack traces.
+
+ This function will create and return a content object that contains a
+ stack trace.
+
+ The mime type is set to 'text/x-traceback;language=python', so other
+ languages can format their stack traces differently.
+
+ :param prefix_content: A unicode string to add before the stack lines.
+ :param postfix_content: A unicode string to add after the stack lines.
+ """
+ stack = inspect.stack()[1:]
+
+ if StackLinesContent.HIDE_INTERNAL_STACK:
+ limit = 1
+ while limit < len(stack) and '__unittest' not in stack[limit][0].f_globals:
+ limit += 1
+ else:
+ limit = -1
+
+ frames_only = [line[0] for line in stack[:limit]]
+ processed_stack = [ ]
+ for frame in reversed(frames_only):
+ filename, line, function, context, _ = inspect.getframeinfo(frame)
+ context = ''.join(context)
+ processed_stack.append((filename, line, function, context))
+ return StackLinesContent(processed_stack, prefix_content, postfix_content)
+
+
+def json_content(json_data):
+ """Create a JSON `Content` object from JSON-encodeable data."""
+ data = json.dumps(json_data)
+ if str_is_unicode:
+ # The json module perversely returns native str not bytes
+ data = data.encode('utf8')
+ return Content(JSON, lambda: [data])
+
+
+def text_content(text):
+ """Create a `Content` object from some text.
+
+ This is useful for adding details which are short strings.
+ """
+ return Content(UTF8_TEXT, lambda: [text.encode('utf8')])
+
+
+def maybe_wrap(wrapper, func):
+ """Merge metadata for func into wrapper if functools is present."""
+ if functools is not None:
+ wrapper = functools.update_wrapper(wrapper, func)
+ return wrapper
+
+
+def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
+ buffer_now=False, seek_offset=None, seek_whence=0):
+ """Create a `Content` object from a file on disk.
+
+ Note that unless 'read_now' is explicitly passed in as True, the file
+ will only be read from when ``iter_bytes`` is called.
+
+ :param path: The path to the file to be used as content.
+ :param content_type: The type of content. If not specified, defaults
+ to UTF8-encoded text/plain.
+ :param chunk_size: The size of chunks to read from the file.
+ Defaults to ``DEFAULT_CHUNK_SIZE``.
+ :param buffer_now: If True, read the file from disk now and keep it in
+ memory. Otherwise, only read when the content is serialized.
+ :param seek_offset: If non-None, seek within the stream before reading it.
+ :param seek_whence: If supplied, pass to stream.seek() when seeking.
+ """
+ if content_type is None:
+ content_type = UTF8_TEXT
+ def reader():
+ # This should be try:finally:, but python2.4 makes that hard. When
+ # We drop older python support we can make this use a context manager
+ # for maximum simplicity.
+ stream = open(path, 'rb')
+ for chunk in _iter_chunks(stream, chunk_size, seek_offset, seek_whence):
+ yield chunk
+ stream.close()
+ return content_from_reader(reader, content_type, buffer_now)
+
+
+def content_from_stream(stream, content_type=None,
+ chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False,
+ seek_offset=None, seek_whence=0):
+ """Create a `Content` object from a file-like stream.
+
+ Note that the stream will only be read from when ``iter_bytes`` is
+ called.
+
+ :param stream: A file-like object to read the content from. The stream
+ is not closed by this function or the content object it returns.
+ :param content_type: The type of content. If not specified, defaults
+ to UTF8-encoded text/plain.
+ :param chunk_size: The size of chunks to read from the file.
+ Defaults to ``DEFAULT_CHUNK_SIZE``.
+ :param buffer_now: If True, reads from the stream right now. Otherwise,
+ only reads when the content is serialized. Defaults to False.
+ :param seek_offset: If non-None, seek within the stream before reading it.
+ :param seek_whence: If supplied, pass to stream.seek() when seeking.
+ """
+ if content_type is None:
+ content_type = UTF8_TEXT
+ reader = lambda: _iter_chunks(stream, chunk_size, seek_offset, seek_whence)
+ return content_from_reader(reader, content_type, buffer_now)
+
+
+def content_from_reader(reader, content_type, buffer_now):
+ """Create a Content object that will obtain the content from reader.
+
+ :param reader: A callback to read the content. Should return an iterable of
+ bytestrings.
+ :param content_type: The content type to create.
+ :param buffer_now: If True the reader is evaluated immediately and
+ buffered.
+ """
+ if content_type is None:
+ content_type = UTF8_TEXT
+ if buffer_now:
+ contents = list(reader())
+ reader = lambda: contents
+ return Content(content_type, reader)
+
+
+def attach_file(detailed, path, name=None, content_type=None,
+ chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=True):
+ """Attach a file to this test as a detail.
+
+ This is a convenience method wrapping around ``addDetail``.
+
+ Note that unless 'read_now' is explicitly passed in as True, the file
+ *must* exist when the test result is called with the results of this
+ test, after the test has been torn down.
+
+ :param detailed: An object with details
+ :param path: The path to the file to attach.
+ :param name: The name to give to the detail for the attached file.
+ :param content_type: The content type of the file. If not provided,
+ defaults to UTF8-encoded text/plain.
+ :param chunk_size: The size of chunks to read from the file. Defaults
+ to something sensible.
+ :param buffer_now: If False the file content is read when the content
+ object is evaluated rather than when attach_file is called.
+ Note that this may be after any cleanups that obj_with_details has, so
+ if the file is a temporary file disabling buffer_now may cause the file
+ to be read after it is deleted. To handle those cases, using
+ attach_file as a cleanup is recommended because it guarantees a
+ sequence for when the attach_file call is made::
+
+ detailed.addCleanup(attach_file, 'foo.txt', detailed)
+ """
+ if name is None:
+ name = os.path.basename(path)
+ content_object = content_from_file(
+ path, content_type, chunk_size, buffer_now)
+ detailed.addDetail(name, content_object)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content_type.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content_type.py
new file mode 100644
index 00000000000..bbf314b492e
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/content_type.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""ContentType - a MIME Content Type."""
+
+
+class ContentType(object):
+ """A content type from http://www.iana.org/assignments/media-types/
+
+ :ivar type: The primary type, e.g. "text" or "application"
+ :ivar subtype: The subtype, e.g. "plain" or "octet-stream"
+ :ivar parameters: A dict of additional parameters specific to the
+ content type.
+ """
+
+ def __init__(self, primary_type, sub_type, parameters=None):
+ """Create a ContentType."""
+ if None in (primary_type, sub_type):
+ raise ValueError("None not permitted in %r, %r" % (
+ primary_type, sub_type))
+ self.type = primary_type
+ self.subtype = sub_type
+ self.parameters = parameters or {}
+
+ def __eq__(self, other):
+ if type(other) != ContentType:
+ return False
+ return self.__dict__ == other.__dict__
+
+ def __repr__(self):
+ if self.parameters:
+ params = '; '
+ params += '; '.join(
+ sorted('%s="%s"' % (k, v) for k, v in self.parameters.items()))
+ else:
+ params = ''
+ return "%s/%s%s" % (self.type, self.subtype, params)
+
+
+JSON = ContentType('application', 'json')
+
+UTF8_TEXT = ContentType('text', 'plain', {'charset': 'utf8'})
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/deferredruntest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/deferredruntest.py
new file mode 100644
index 00000000000..cf33c06e277
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/deferredruntest.py
@@ -0,0 +1,336 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Individual test case execution for tests that return Deferreds.
+
+This module is highly experimental and is liable to change in ways that cause
+subtle failures in tests. Use at your own peril.
+"""
+
+__all__ = [
+ 'assert_fails_with',
+ 'AsynchronousDeferredRunTest',
+ 'AsynchronousDeferredRunTestForBrokenTwisted',
+ 'SynchronousDeferredRunTest',
+ ]
+
+import sys
+
+from testtools.compat import StringIO
+from testtools.content import (
+ Content,
+ text_content,
+ )
+from testtools.content_type import UTF8_TEXT
+from testtools.runtest import RunTest
+from testtools._spinner import (
+ extract_result,
+ NoResultError,
+ Spinner,
+ TimeoutError,
+ trap_unhandled_errors,
+ )
+
+from twisted.internet import defer
+from twisted.python import log
+from twisted.trial.unittest import _LogObserver
+
+
+class _DeferredRunTest(RunTest):
+ """Base for tests that return Deferreds."""
+
+ def _got_user_failure(self, failure, tb_label='traceback'):
+ """We got a failure from user code."""
+ return self._got_user_exception(
+ (failure.type, failure.value, failure.getTracebackObject()),
+ tb_label=tb_label)
+
+
+class SynchronousDeferredRunTest(_DeferredRunTest):
+ """Runner for tests that return synchronous Deferreds."""
+
+ def _run_user(self, function, *args):
+ d = defer.maybeDeferred(function, *args)
+ d.addErrback(self._got_user_failure)
+ result = extract_result(d)
+ return result
+
+
+def run_with_log_observers(observers, function, *args, **kwargs):
+ """Run 'function' with the given Twisted log observers."""
+ real_observers = list(log.theLogPublisher.observers)
+ for observer in real_observers:
+ log.theLogPublisher.removeObserver(observer)
+ for observer in observers:
+ log.theLogPublisher.addObserver(observer)
+ try:
+ return function(*args, **kwargs)
+ finally:
+ for observer in observers:
+ log.theLogPublisher.removeObserver(observer)
+ for observer in real_observers:
+ log.theLogPublisher.addObserver(observer)
+
+
+# Observer of the Twisted log that we install during tests.
+_log_observer = _LogObserver()
+
+
+
+class AsynchronousDeferredRunTest(_DeferredRunTest):
+ """Runner for tests that return Deferreds that fire asynchronously.
+
+ That is, this test runner assumes that the Deferreds will only fire if the
+ reactor is left to spin for a while.
+
+ Do not rely too heavily on the nuances of the behaviour of this class.
+ What it does to the reactor is black magic, and if we can find nicer ways
+ of doing it we will gladly break backwards compatibility.
+
+ This is highly experimental code. Use at your own risk.
+ """
+
+ def __init__(self, case, handlers=None, reactor=None, timeout=0.005,
+ debug=False):
+ """Construct an `AsynchronousDeferredRunTest`.
+
+ :param case: The `TestCase` to run.
+ :param handlers: A list of exception handlers (ExceptionType, handler)
+ where 'handler' is a callable that takes a `TestCase`, a
+ ``testtools.TestResult`` and the exception raised.
+ :param reactor: The Twisted reactor to use. If not given, we use the
+ default reactor.
+ :param timeout: The maximum time allowed for running a test. The
+ default is 0.005s.
+ :param debug: Whether or not to enable Twisted's debugging. Use this
+ to get information about unhandled Deferreds and left-over
+ DelayedCalls. Defaults to False.
+ """
+ super(AsynchronousDeferredRunTest, self).__init__(case, handlers)
+ if reactor is None:
+ from twisted.internet import reactor
+ self._reactor = reactor
+ self._timeout = timeout
+ self._debug = debug
+
+ @classmethod
+ def make_factory(cls, reactor=None, timeout=0.005, debug=False):
+ """Make a factory that conforms to the RunTest factory interface."""
+ # This is horrible, but it means that the return value of the method
+ # will be able to be assigned to a class variable *and* also be
+ # invoked directly.
+ class AsynchronousDeferredRunTestFactory:
+ def __call__(self, case, handlers=None):
+ return cls(case, handlers, reactor, timeout, debug)
+ return AsynchronousDeferredRunTestFactory()
+
+ @defer.deferredGenerator
+ def _run_cleanups(self):
+ """Run the cleanups on the test case.
+
+ We expect that the cleanups on the test case can also return
+ asynchronous Deferreds. As such, we take the responsibility for
+ running the cleanups, rather than letting TestCase do it.
+ """
+ while self.case._cleanups:
+ f, args, kwargs = self.case._cleanups.pop()
+ d = defer.maybeDeferred(f, *args, **kwargs)
+ thing = defer.waitForDeferred(d)
+ yield thing
+ try:
+ thing.getResult()
+ except Exception:
+ exc_info = sys.exc_info()
+ self.case._report_traceback(exc_info)
+ last_exception = exc_info[1]
+ yield last_exception
+
+ def _make_spinner(self):
+ """Make the `Spinner` to be used to run the tests."""
+ return Spinner(self._reactor, debug=self._debug)
+
+ def _run_deferred(self):
+ """Run the test, assuming everything in it is Deferred-returning.
+
+ This should return a Deferred that fires with True if the test was
+ successful and False if the test was not successful. It should *not*
+ call addSuccess on the result, because there's reactor clean up that
+ we needs to be done afterwards.
+ """
+ fails = []
+
+ def fail_if_exception_caught(exception_caught):
+ if self.exception_caught == exception_caught:
+ fails.append(None)
+
+ def clean_up(ignored=None):
+ """Run the cleanups."""
+ d = self._run_cleanups()
+ def clean_up_done(result):
+ if result is not None:
+ self._exceptions.append(result)
+ fails.append(None)
+ return d.addCallback(clean_up_done)
+
+ def set_up_done(exception_caught):
+ """Set up is done, either clean up or run the test."""
+ if self.exception_caught == exception_caught:
+ fails.append(None)
+ return clean_up()
+ else:
+ d = self._run_user(self.case._run_test_method, self.result)
+ d.addCallback(fail_if_exception_caught)
+ d.addBoth(tear_down)
+ return d
+
+ def tear_down(ignored):
+ d = self._run_user(self.case._run_teardown, self.result)
+ d.addCallback(fail_if_exception_caught)
+ d.addBoth(clean_up)
+ return d
+
+ d = self._run_user(self.case._run_setup, self.result)
+ d.addCallback(set_up_done)
+ d.addBoth(lambda ignored: len(fails) == 0)
+ return d
+
+ def _log_user_exception(self, e):
+ """Raise 'e' and report it as a user exception."""
+ try:
+ raise e
+ except e.__class__:
+ self._got_user_exception(sys.exc_info())
+
+ def _blocking_run_deferred(self, spinner):
+ try:
+ return trap_unhandled_errors(
+ spinner.run, self._timeout, self._run_deferred)
+ except NoResultError:
+ # We didn't get a result at all! This could be for any number of
+ # reasons, but most likely someone hit Ctrl-C during the test.
+ raise KeyboardInterrupt
+ except TimeoutError:
+ # The function took too long to run.
+ self._log_user_exception(TimeoutError(self.case, self._timeout))
+ return False, []
+
+ def _run_core(self):
+ # Add an observer to trap all logged errors.
+ self.case.reactor = self._reactor
+ error_observer = _log_observer
+ full_log = StringIO()
+ full_observer = log.FileLogObserver(full_log)
+ spinner = self._make_spinner()
+ successful, unhandled = run_with_log_observers(
+ [error_observer.gotEvent, full_observer.emit],
+ self._blocking_run_deferred, spinner)
+
+ self.case.addDetail(
+ 'twisted-log', Content(UTF8_TEXT, full_log.readlines))
+
+ logged_errors = error_observer.flushErrors()
+ for logged_error in logged_errors:
+ successful = False
+ self._got_user_failure(logged_error, tb_label='logged-error')
+
+ if unhandled:
+ successful = False
+ for debug_info in unhandled:
+ f = debug_info.failResult
+ info = debug_info._getDebugTracebacks()
+ if info:
+ self.case.addDetail(
+ 'unhandled-error-in-deferred-debug',
+ text_content(info))
+ self._got_user_failure(f, 'unhandled-error-in-deferred')
+
+ junk = spinner.clear_junk()
+ if junk:
+ successful = False
+ self._log_user_exception(UncleanReactorError(junk))
+
+ if successful:
+ self.result.addSuccess(self.case, details=self.case.getDetails())
+
+ def _run_user(self, function, *args):
+ """Run a user-supplied function.
+
+ This just makes sure that it returns a Deferred, regardless of how the
+ user wrote it.
+ """
+ d = defer.maybeDeferred(function, *args)
+ return d.addErrback(self._got_user_failure)
+
+
+class AsynchronousDeferredRunTestForBrokenTwisted(AsynchronousDeferredRunTest):
+ """Test runner that works around Twisted brokenness re reactor junk.
+
+ There are many APIs within Twisted itself where a Deferred fires but
+ leaves cleanup work scheduled for the reactor to do. Arguably, many of
+ these are bugs. This runner iterates the reactor event loop a number of
+ times after every test, in order to shake out these buggy-but-commonplace
+ events.
+ """
+
+ def _make_spinner(self):
+ spinner = super(
+ AsynchronousDeferredRunTestForBrokenTwisted, self)._make_spinner()
+ spinner._OBLIGATORY_REACTOR_ITERATIONS = 2
+ return spinner
+
+
+def assert_fails_with(d, *exc_types, **kwargs):
+ """Assert that 'd' will fail with one of 'exc_types'.
+
+ The normal way to use this is to return the result of 'assert_fails_with'
+ from your unit test.
+
+ Note that this function is experimental and unstable. Use at your own
+ peril; expect the API to change.
+
+ :param d: A Deferred that is expected to fail.
+ :param exc_types: The exception types that the Deferred is expected to
+ fail with.
+ :param failureException: An optional keyword argument. If provided, will
+ raise that exception instead of
+ ``testtools.TestCase.failureException``.
+ :return: A Deferred that will fail with an ``AssertionError`` if 'd' does
+ not fail with one of the exception types.
+ """
+ failureException = kwargs.pop('failureException', None)
+ if failureException is None:
+ # Avoid circular imports.
+ from testtools import TestCase
+ failureException = TestCase.failureException
+ expected_names = ", ".join(exc_type.__name__ for exc_type in exc_types)
+ def got_success(result):
+ raise failureException(
+ "%s not raised (%r returned)" % (expected_names, result))
+ def got_failure(failure):
+ if failure.check(*exc_types):
+ return failure.value
+ raise failureException("%s raised instead of %s:\n %s" % (
+ failure.type.__name__, expected_names, failure.getTraceback()))
+ return d.addCallbacks(got_success, got_failure)
+
+
+def flush_logged_errors(*error_types):
+ return _log_observer.flushErrors(*error_types)
+
+
+class UncleanReactorError(Exception):
+ """Raised when the reactor has junk in it."""
+
+ def __init__(self, junk):
+ Exception.__init__(self,
+ "The reactor still thinks it needs to do things. Close all "
+ "connections, kill all processes and make sure all delayed "
+ "calls have either fired or been cancelled:\n%s"
+ % ''.join(map(self._get_junk_info, junk)))
+
+ def _get_junk_info(self, junk):
+ from twisted.internet.base import DelayedCall
+ if isinstance(junk, DelayedCall):
+ ret = str(junk)
+ else:
+ ret = repr(junk)
+ return ' %s\n' % (ret,)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/distutilscmd.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/distutilscmd.py
new file mode 100644
index 00000000000..91e14ca504f
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/distutilscmd.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2010-2011 testtools developers . See LICENSE for details.
+
+"""Extensions to the standard Python unittest library."""
+
+import sys
+
+from distutils.core import Command
+from distutils.errors import DistutilsOptionError
+
+from testtools.run import TestProgram, TestToolsTestRunner
+
+
+class TestCommand(Command):
+ """Command to run unit tests with testtools"""
+
+ description = "run unit tests with testtools"
+
+ user_options = [
+ ('catch', 'c', "Catch ctrl-C and display results so far"),
+ ('buffer', 'b', "Buffer stdout and stderr during tests"),
+ ('failfast', 'f', "Stop on first fail or error"),
+ ('test-module=','m', "Run 'test_suite' in specified module"),
+ ('test-suite=','s',
+ "Test suite to run (e.g. 'some_module.test_suite')")
+ ]
+
+ def __init__(self, dist):
+ Command.__init__(self, dist)
+ self.runner = TestToolsTestRunner(sys.stdout)
+
+
+ def initialize_options(self):
+ self.test_suite = None
+ self.test_module = None
+ self.catch = None
+ self.buffer = None
+ self.failfast = None
+
+ def finalize_options(self):
+ if self.test_suite is None:
+ if self.test_module is None:
+ raise DistutilsOptionError(
+ "You must specify a module or a suite to run tests from")
+ else:
+ self.test_suite = self.test_module+".test_suite"
+ elif self.test_module:
+ raise DistutilsOptionError(
+ "You may specify a module or a suite, but not both")
+ self.test_args = [self.test_suite]
+ if self.verbose:
+ self.test_args.insert(0, '--verbose')
+ if self.buffer:
+ self.test_args.insert(0, '--buffer')
+ if self.catch:
+ self.test_args.insert(0, '--catch')
+ if self.failfast:
+ self.test_args.insert(0, '--failfast')
+
+ def run(self):
+ self.program = TestProgram(
+ argv=self.test_args, testRunner=self.runner, stdout=sys.stdout,
+ exit=False)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/helpers.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/helpers.py
new file mode 100644
index 00000000000..401d2cc10ed
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/helpers.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'safe_hasattr',
+ 'try_import',
+ 'try_imports',
+ ]
+
+import sys
+
+# Compat - removal announced in 0.9.25.
+from extras import (
+ safe_hasattr,
+ try_import,
+ try_imports,
+ )
+
+
+def map_values(function, dictionary):
+ """Map ``function`` across the values of ``dictionary``.
+
+ :return: A dict with the same keys as ``dictionary``, where the value
+ of each key ``k`` is ``function(dictionary[k])``.
+ """
+ return dict((k, function(dictionary[k])) for k in dictionary)
+
+
+def filter_values(function, dictionary):
+ """Filter ``dictionary`` by its values using ``function``."""
+ return dict((k, v) for k, v in dictionary.items() if function(v))
+
+
+def dict_subtract(a, b):
+ """Return the part of ``a`` that's not in ``b``."""
+ return dict((k, a[k]) for k in set(a) - set(b))
+
+
+def list_subtract(a, b):
+ """Return a list ``a`` without the elements of ``b``.
+
+ If a particular value is in ``a`` twice and ``b`` once then the returned
+ list then that value will appear once in the returned list.
+ """
+ a_only = list(a)
+ for x in b:
+ if x in a_only:
+ a_only.remove(x)
+ return a_only
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/__init__.py
new file mode 100644
index 00000000000..771d8142b32
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/__init__.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""All the matchers.
+
+Matchers, a way to express complex assertions outside the testcase.
+
+Inspired by 'hamcrest'.
+
+Matcher provides the abstract API that all matchers need to implement.
+
+Bundled matchers are listed in __all__: a list can be obtained by running
+$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
+"""
+
+__all__ = [
+ 'AfterPreprocessing',
+ 'AllMatch',
+ 'Annotate',
+ 'AnyMatch',
+ 'Contains',
+ 'ContainsAll',
+ 'ContainedByDict',
+ 'ContainsDict',
+ 'DirContains',
+ 'DirExists',
+ 'DocTestMatches',
+ 'EndsWith',
+ 'Equals',
+ 'FileContains',
+ 'FileExists',
+ 'GreaterThan',
+ 'HasLength',
+ 'HasPermissions',
+ 'Is',
+ 'IsInstance',
+ 'KeysEqual',
+ 'LessThan',
+ 'MatchesAll',
+ 'MatchesAny',
+ 'MatchesDict',
+ 'MatchesException',
+ 'MatchesListwise',
+ 'MatchesPredicate',
+ 'MatchesPredicateWithParams',
+ 'MatchesRegex',
+ 'MatchesSetwise',
+ 'MatchesStructure',
+ 'NotEquals',
+ 'Not',
+ 'PathExists',
+ 'Raises',
+ 'raises',
+ 'SamePath',
+ 'StartsWith',
+ 'TarballContains',
+ ]
+
+from ._basic import (
+ Contains,
+ EndsWith,
+ Equals,
+ GreaterThan,
+ HasLength,
+ Is,
+ IsInstance,
+ LessThan,
+ MatchesRegex,
+ NotEquals,
+ StartsWith,
+ )
+from ._datastructures import (
+ ContainsAll,
+ MatchesListwise,
+ MatchesSetwise,
+ MatchesStructure,
+ )
+from ._dict import (
+ ContainedByDict,
+ ContainsDict,
+ KeysEqual,
+ MatchesDict,
+ )
+from ._doctest import (
+ DocTestMatches,
+ )
+from ._exception import (
+ MatchesException,
+ Raises,
+ raises,
+ )
+from ._filesystem import (
+ DirContains,
+ DirExists,
+ FileContains,
+ FileExists,
+ HasPermissions,
+ PathExists,
+ SamePath,
+ TarballContains,
+ )
+from ._higherorder import (
+ AfterPreprocessing,
+ AllMatch,
+ Annotate,
+ AnyMatch,
+ MatchesAll,
+ MatchesAny,
+ MatchesPredicate,
+ MatchesPredicateWithParams,
+ Not,
+ )
+
+# XXX: These are not explicitly included in __all__. It's unclear how much of
+# the public interface they really are.
+from ._impl import (
+ Matcher,
+ Mismatch,
+ MismatchError,
+ )
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_basic.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_basic.py
new file mode 100644
index 00000000000..2d9f143f10e
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_basic.py
@@ -0,0 +1,326 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'Contains',
+ 'EndsWith',
+ 'Equals',
+ 'GreaterThan',
+ 'HasLength',
+ 'Is',
+ 'IsInstance',
+ 'LessThan',
+ 'MatchesRegex',
+ 'NotEquals',
+ 'StartsWith',
+ ]
+
+import operator
+from pprint import pformat
+import re
+
+from ..compat import (
+ _isbytes,
+ istext,
+ str_is_unicode,
+ text_repr,
+ )
+from ..helpers import list_subtract
+from ._higherorder import (
+ MatchesPredicateWithParams,
+ PostfixedMismatch,
+ )
+from ._impl import (
+ Matcher,
+ Mismatch,
+ )
+
+
+def _format(thing):
+ """
+ Blocks of text with newlines are formatted as triple-quote
+ strings. Everything else is pretty-printed.
+ """
+ if istext(thing) or _isbytes(thing):
+ return text_repr(thing)
+ return pformat(thing)
+
+
+class _BinaryComparison(object):
+ """Matcher that compares an object to another object."""
+
+ def __init__(self, expected):
+ self.expected = expected
+
+ def __str__(self):
+ return "%s(%r)" % (self.__class__.__name__, self.expected)
+
+ def match(self, other):
+ if self.comparator(other, self.expected):
+ return None
+ return _BinaryMismatch(self.expected, self.mismatch_string, other)
+
+ def comparator(self, expected, other):
+ raise NotImplementedError(self.comparator)
+
+
+class _BinaryMismatch(Mismatch):
+ """Two things did not match."""
+
+ def __init__(self, expected, mismatch_string, other):
+ self.expected = expected
+ self._mismatch_string = mismatch_string
+ self.other = other
+
+ def describe(self):
+ left = repr(self.expected)
+ right = repr(self.other)
+ if len(left) + len(right) > 70:
+ return "%s:\nreference = %s\nactual = %s\n" % (
+ self._mismatch_string, _format(self.expected),
+ _format(self.other))
+ else:
+ return "%s %s %s" % (left, self._mismatch_string, right)
+
+
+class Equals(_BinaryComparison):
+ """Matches if the items are equal."""
+
+ comparator = operator.eq
+ mismatch_string = '!='
+
+
+class NotEquals(_BinaryComparison):
+ """Matches if the items are not equal.
+
+ In most cases, this is equivalent to ``Not(Equals(foo))``. The difference
+ only matters when testing ``__ne__`` implementations.
+ """
+
+ comparator = operator.ne
+ mismatch_string = '=='
+
+
+class Is(_BinaryComparison):
+ """Matches if the items are identical."""
+
+ comparator = operator.is_
+ mismatch_string = 'is not'
+
+
+class LessThan(_BinaryComparison):
+ """Matches if the item is less than the matchers reference object."""
+
+ comparator = operator.__lt__
+ mismatch_string = 'is not >'
+
+
+class GreaterThan(_BinaryComparison):
+ """Matches if the item is greater than the matchers reference object."""
+
+ comparator = operator.__gt__
+ mismatch_string = 'is not <'
+
+
+class SameMembers(Matcher):
+ """Matches if two iterators have the same members.
+
+ This is not the same as set equivalence. The two iterators must be of the
+ same length and have the same repetitions.
+ """
+
+ def __init__(self, expected):
+ super(SameMembers, self).__init__()
+ self.expected = expected
+
+ def __str__(self):
+ return '%s(%r)' % (self.__class__.__name__, self.expected)
+
+ def match(self, observed):
+ expected_only = list_subtract(self.expected, observed)
+ observed_only = list_subtract(observed, self.expected)
+ if expected_only == observed_only == []:
+ return
+ return PostfixedMismatch(
+ "\nmissing: %s\nextra: %s" % (
+ _format(expected_only), _format(observed_only)),
+ _BinaryMismatch(self.expected, 'elements differ', observed))
+
+
+class DoesNotStartWith(Mismatch):
+
+ def __init__(self, matchee, expected):
+ """Create a DoesNotStartWith Mismatch.
+
+ :param matchee: the string that did not match.
+ :param expected: the string that 'matchee' was expected to start with.
+ """
+ self.matchee = matchee
+ self.expected = expected
+
+ def describe(self):
+ return "%s does not start with %s." % (
+ text_repr(self.matchee), text_repr(self.expected))
+
+
+class StartsWith(Matcher):
+ """Checks whether one string starts with another."""
+
+ def __init__(self, expected):
+ """Create a StartsWith Matcher.
+
+ :param expected: the string that matchees should start with.
+ """
+ self.expected = expected
+
+ def __str__(self):
+ return "StartsWith(%r)" % (self.expected,)
+
+ def match(self, matchee):
+ if not matchee.startswith(self.expected):
+ return DoesNotStartWith(matchee, self.expected)
+ return None
+
+
+class DoesNotEndWith(Mismatch):
+
+ def __init__(self, matchee, expected):
+ """Create a DoesNotEndWith Mismatch.
+
+ :param matchee: the string that did not match.
+ :param expected: the string that 'matchee' was expected to end with.
+ """
+ self.matchee = matchee
+ self.expected = expected
+
+ def describe(self):
+ return "%s does not end with %s." % (
+ text_repr(self.matchee), text_repr(self.expected))
+
+
+class EndsWith(Matcher):
+ """Checks whether one string ends with another."""
+
+ def __init__(self, expected):
+ """Create a EndsWith Matcher.
+
+ :param expected: the string that matchees should end with.
+ """
+ self.expected = expected
+
+ def __str__(self):
+ return "EndsWith(%r)" % (self.expected,)
+
+ def match(self, matchee):
+ if not matchee.endswith(self.expected):
+ return DoesNotEndWith(matchee, self.expected)
+ return None
+
+
+class IsInstance(object):
+ """Matcher that wraps isinstance."""
+
+ def __init__(self, *types):
+ self.types = tuple(types)
+
+ def __str__(self):
+ return "%s(%s)" % (self.__class__.__name__,
+ ', '.join(type.__name__ for type in self.types))
+
+ def match(self, other):
+ if isinstance(other, self.types):
+ return None
+ return NotAnInstance(other, self.types)
+
+
+class NotAnInstance(Mismatch):
+
+ def __init__(self, matchee, types):
+ """Create a NotAnInstance Mismatch.
+
+ :param matchee: the thing which is not an instance of any of types.
+ :param types: A tuple of the types which were expected.
+ """
+ self.matchee = matchee
+ self.types = types
+
+ def describe(self):
+ if len(self.types) == 1:
+ typestr = self.types[0].__name__
+ else:
+ typestr = 'any of (%s)' % ', '.join(type.__name__ for type in
+ self.types)
+ return "'%s' is not an instance of %s" % (self.matchee, typestr)
+
+
+class DoesNotContain(Mismatch):
+
+ def __init__(self, matchee, needle):
+ """Create a DoesNotContain Mismatch.
+
+ :param matchee: the object that did not contain needle.
+ :param needle: the needle that 'matchee' was expected to contain.
+ """
+ self.matchee = matchee
+ self.needle = needle
+
+ def describe(self):
+ return "%r not in %r" % (self.needle, self.matchee)
+
+
+class Contains(Matcher):
+ """Checks whether something is contained in another thing."""
+
+ def __init__(self, needle):
+ """Create a Contains Matcher.
+
+ :param needle: the thing that needs to be contained by matchees.
+ """
+ self.needle = needle
+
+ def __str__(self):
+ return "Contains(%r)" % (self.needle,)
+
+ def match(self, matchee):
+ try:
+ if self.needle not in matchee:
+ return DoesNotContain(matchee, self.needle)
+ except TypeError:
+ # e.g. 1 in 2 will raise TypeError
+ return DoesNotContain(matchee, self.needle)
+ return None
+
+
+class MatchesRegex(object):
+ """Matches if the matchee is matched by a regular expression."""
+
+ def __init__(self, pattern, flags=0):
+ self.pattern = pattern
+ self.flags = flags
+
+ def __str__(self):
+ args = ['%r' % self.pattern]
+ flag_arg = []
+ # dir() sorts the attributes for us, so we don't need to do it again.
+ for flag in dir(re):
+ if len(flag) == 1:
+ if self.flags & getattr(re, flag):
+ flag_arg.append('re.%s' % flag)
+ if flag_arg:
+ args.append('|'.join(flag_arg))
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
+
+ def match(self, value):
+ if not re.match(self.pattern, value, self.flags):
+ pattern = self.pattern
+ if not isinstance(pattern, str_is_unicode and str or unicode):
+ pattern = pattern.decode("latin1")
+ pattern = pattern.encode("unicode_escape").decode("ascii")
+ return Mismatch("%r does not match /%s/" % (
+ value, pattern.replace("\\\\", "\\")))
+
+
+def has_len(x, y):
+ return len(x) == y
+
+
+HasLength = MatchesPredicateWithParams(has_len, "len({0}) != {1}", "HasLength")
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_datastructures.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_datastructures.py
new file mode 100644
index 00000000000..70de790738a
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_datastructures.py
@@ -0,0 +1,228 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'ContainsAll',
+ 'MatchesListwise',
+ 'MatchesSetwise',
+ 'MatchesStructure',
+ ]
+
+"""Matchers that operate with knowledge of Python data structures."""
+
+from ..helpers import map_values
+from ._higherorder import (
+ Annotate,
+ MatchesAll,
+ MismatchesAll,
+ )
+from ._impl import Mismatch
+
+
+def ContainsAll(items):
+ """Make a matcher that checks whether a list of things is contained
+ in another thing.
+
+ The matcher effectively checks that the provided sequence is a subset of
+ the matchee.
+ """
+ from ._basic import Contains
+ return MatchesAll(*map(Contains, items), first_only=False)
+
+
+class MatchesListwise(object):
+ """Matches if each matcher matches the corresponding value.
+
+ More easily explained by example than in words:
+
+ >>> from ._basic import Equals
+ >>> MatchesListwise([Equals(1)]).match([1])
+ >>> MatchesListwise([Equals(1), Equals(2)]).match([1, 2])
+ >>> print (MatchesListwise([Equals(1), Equals(2)]).match([2, 1]).describe())
+ Differences: [
+ 1 != 2
+ 2 != 1
+ ]
+ >>> matcher = MatchesListwise([Equals(1), Equals(2)], first_only=True)
+ >>> print (matcher.match([3, 4]).describe())
+ 1 != 3
+ """
+
+ def __init__(self, matchers, first_only=False):
+ """Construct a MatchesListwise matcher.
+
+ :param matchers: A list of matcher that the matched values must match.
+ :param first_only: If True, then only report the first mismatch,
+ otherwise report all of them. Defaults to False.
+ """
+ self.matchers = matchers
+ self.first_only = first_only
+
+ def match(self, values):
+ from ._basic import Equals
+ mismatches = []
+ length_mismatch = Annotate(
+ "Length mismatch", Equals(len(self.matchers))).match(len(values))
+ if length_mismatch:
+ mismatches.append(length_mismatch)
+ for matcher, value in zip(self.matchers, values):
+ mismatch = matcher.match(value)
+ if mismatch:
+ if self.first_only:
+ return mismatch
+ mismatches.append(mismatch)
+ if mismatches:
+ return MismatchesAll(mismatches)
+
+
+class MatchesStructure(object):
+ """Matcher that matches an object structurally.
+
+ 'Structurally' here means that attributes of the object being matched are
+ compared against given matchers.
+
+ `fromExample` allows the creation of a matcher from a prototype object and
+ then modified versions can be created with `update`.
+
+ `byEquality` creates a matcher in much the same way as the constructor,
+ except that the matcher for each of the attributes is assumed to be
+ `Equals`.
+
+ `byMatcher` creates a similar matcher to `byEquality`, but you get to pick
+ the matcher, rather than just using `Equals`.
+ """
+
+ def __init__(self, **kwargs):
+ """Construct a `MatchesStructure`.
+
+ :param kwargs: A mapping of attributes to matchers.
+ """
+ self.kws = kwargs
+
+ @classmethod
+ def byEquality(cls, **kwargs):
+ """Matches an object where the attributes equal the keyword values.
+
+ Similar to the constructor, except that the matcher is assumed to be
+ Equals.
+ """
+ from ._basic import Equals
+ return cls.byMatcher(Equals, **kwargs)
+
+ @classmethod
+ def byMatcher(cls, matcher, **kwargs):
+ """Matches an object where the attributes match the keyword values.
+
+ Similar to the constructor, except that the provided matcher is used
+ to match all of the values.
+ """
+ return cls(**map_values(matcher, kwargs))
+
+ @classmethod
+ def fromExample(cls, example, *attributes):
+ from ._basic import Equals
+ kwargs = {}
+ for attr in attributes:
+ kwargs[attr] = Equals(getattr(example, attr))
+ return cls(**kwargs)
+
+ def update(self, **kws):
+ new_kws = self.kws.copy()
+ for attr, matcher in kws.items():
+ if matcher is None:
+ new_kws.pop(attr, None)
+ else:
+ new_kws[attr] = matcher
+ return type(self)(**new_kws)
+
+ def __str__(self):
+ kws = []
+ for attr, matcher in sorted(self.kws.items()):
+ kws.append("%s=%s" % (attr, matcher))
+ return "%s(%s)" % (self.__class__.__name__, ', '.join(kws))
+
+ def match(self, value):
+ matchers = []
+ values = []
+ for attr, matcher in sorted(self.kws.items()):
+ matchers.append(Annotate(attr, matcher))
+ values.append(getattr(value, attr))
+ return MatchesListwise(matchers).match(values)
+
+
+class MatchesSetwise(object):
+ """Matches if all the matchers match elements of the value being matched.
+
+ That is, each element in the 'observed' set must match exactly one matcher
+ from the set of matchers, with no matchers left over.
+
+ The difference compared to `MatchesListwise` is that the order of the
+ matchings does not matter.
+ """
+
+ def __init__(self, *matchers):
+ self.matchers = matchers
+
+ def match(self, observed):
+ remaining_matchers = set(self.matchers)
+ not_matched = []
+ for value in observed:
+ for matcher in remaining_matchers:
+ if matcher.match(value) is None:
+ remaining_matchers.remove(matcher)
+ break
+ else:
+ not_matched.append(value)
+ if not_matched or remaining_matchers:
+ remaining_matchers = list(remaining_matchers)
+ # There are various cases that all should be reported somewhat
+ # differently.
+
+ # There are two trivial cases:
+ # 1) There are just some matchers left over.
+ # 2) There are just some values left over.
+
+ # Then there are three more interesting cases:
+ # 3) There are the same number of matchers and values left over.
+ # 4) There are more matchers left over than values.
+ # 5) There are more values left over than matchers.
+
+ if len(not_matched) == 0:
+ if len(remaining_matchers) > 1:
+ msg = "There were %s matchers left over: " % (
+ len(remaining_matchers),)
+ else:
+ msg = "There was 1 matcher left over: "
+ msg += ', '.join(map(str, remaining_matchers))
+ return Mismatch(msg)
+ elif len(remaining_matchers) == 0:
+ if len(not_matched) > 1:
+ return Mismatch(
+ "There were %s values left over: %s" % (
+ len(not_matched), not_matched))
+ else:
+ return Mismatch(
+ "There was 1 value left over: %s" % (
+ not_matched, ))
+ else:
+ common_length = min(len(remaining_matchers), len(not_matched))
+ if common_length == 0:
+ raise AssertionError("common_length can't be 0 here")
+ if common_length > 1:
+ msg = "There were %s mismatches" % (common_length,)
+ else:
+ msg = "There was 1 mismatch"
+ if len(remaining_matchers) > len(not_matched):
+ extra_matchers = remaining_matchers[common_length:]
+ msg += " and %s extra matcher" % (len(extra_matchers), )
+ if len(extra_matchers) > 1:
+ msg += "s"
+ msg += ': ' + ', '.join(map(str, extra_matchers))
+ elif len(not_matched) > len(remaining_matchers):
+ extra_values = not_matched[common_length:]
+ msg += " and %s extra value" % (len(extra_values), )
+ if len(extra_values) > 1:
+ msg += "s"
+ msg += ': ' + str(extra_values)
+ return Annotate(
+ msg, MatchesListwise(remaining_matchers[:common_length])
+ ).match(not_matched[:common_length])
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_dict.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_dict.py
new file mode 100644
index 00000000000..b1ec9151b24
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_dict.py
@@ -0,0 +1,259 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'KeysEqual',
+ ]
+
+from ..helpers import (
+ dict_subtract,
+ filter_values,
+ map_values,
+ )
+from ._higherorder import (
+ AnnotatedMismatch,
+ PrefixedMismatch,
+ MismatchesAll,
+ )
+from ._impl import Matcher, Mismatch
+
+
+def LabelledMismatches(mismatches, details=None):
+ """A collection of mismatches, each labelled."""
+ return MismatchesAll(
+ (PrefixedMismatch(k, v) for (k, v) in sorted(mismatches.items())),
+ wrap=False)
+
+
+class MatchesAllDict(Matcher):
+ """Matches if all of the matchers it is created with match.
+
+ A lot like ``MatchesAll``, but takes a dict of Matchers and labels any
+ mismatches with the key of the dictionary.
+ """
+
+ def __init__(self, matchers):
+ super(MatchesAllDict, self).__init__()
+ self.matchers = matchers
+
+ def __str__(self):
+ return 'MatchesAllDict(%s)' % (_format_matcher_dict(self.matchers),)
+
+ def match(self, observed):
+ mismatches = {}
+ for label in self.matchers:
+ mismatches[label] = self.matchers[label].match(observed)
+ return _dict_to_mismatch(
+ mismatches, result_mismatch=LabelledMismatches)
+
+
+class DictMismatches(Mismatch):
+ """A mismatch with a dict of child mismatches."""
+
+ def __init__(self, mismatches, details=None):
+ super(DictMismatches, self).__init__(None, details=details)
+ self.mismatches = mismatches
+
+ def describe(self):
+ lines = ['{']
+ lines.extend(
+ [' %r: %s,' % (key, mismatch.describe())
+ for (key, mismatch) in sorted(self.mismatches.items())])
+ lines.append('}')
+ return '\n'.join(lines)
+
+
+def _dict_to_mismatch(data, to_mismatch=None,
+ result_mismatch=DictMismatches):
+ if to_mismatch:
+ data = map_values(to_mismatch, data)
+ mismatches = filter_values(bool, data)
+ if mismatches:
+ return result_mismatch(mismatches)
+
+
+class _MatchCommonKeys(Matcher):
+ """Match on keys in a dictionary.
+
+ Given a dictionary where the values are matchers, this will look for
+ common keys in the matched dictionary and match if and only if all common
+ keys match the given matchers.
+
+ Thus::
+
+ >>> structure = {'a': Equals('x'), 'b': Equals('y')}
+ >>> _MatchCommonKeys(structure).match({'a': 'x', 'c': 'z'})
+ None
+ """
+
+ def __init__(self, dict_of_matchers):
+ super(_MatchCommonKeys, self).__init__()
+ self._matchers = dict_of_matchers
+
+ def _compare_dicts(self, expected, observed):
+ common_keys = set(expected.keys()) & set(observed.keys())
+ mismatches = {}
+ for key in common_keys:
+ mismatch = expected[key].match(observed[key])
+ if mismatch:
+ mismatches[key] = mismatch
+ return mismatches
+
+ def match(self, observed):
+ mismatches = self._compare_dicts(self._matchers, observed)
+ if mismatches:
+ return DictMismatches(mismatches)
+
+
+class _SubDictOf(Matcher):
+ """Matches if the matched dict only has keys that are in given dict."""
+
+ def __init__(self, super_dict, format_value=repr):
+ super(_SubDictOf, self).__init__()
+ self.super_dict = super_dict
+ self.format_value = format_value
+
+ def match(self, observed):
+ excess = dict_subtract(observed, self.super_dict)
+ return _dict_to_mismatch(
+ excess, lambda v: Mismatch(self.format_value(v)))
+
+
+class _SuperDictOf(Matcher):
+ """Matches if all of the keys in the given dict are in the matched dict.
+ """
+
+ def __init__(self, sub_dict, format_value=repr):
+ super(_SuperDictOf, self).__init__()
+ self.sub_dict = sub_dict
+ self.format_value = format_value
+
+ def match(self, super_dict):
+ return _SubDictOf(super_dict, self.format_value).match(self.sub_dict)
+
+
+def _format_matcher_dict(matchers):
+ return '{%s}' % (
+ ', '.join(sorted('%r: %s' % (k, v) for k, v in matchers.items())))
+
+
+class _CombinedMatcher(Matcher):
+ """Many matchers labelled and combined into one uber-matcher.
+
+ Subclass this and then specify a dict of matcher factories that take a
+ single 'expected' value and return a matcher. The subclass will match
+ only if all of the matchers made from factories match.
+
+ Not **entirely** dissimilar from ``MatchesAll``.
+ """
+
+ matcher_factories = {}
+
+ def __init__(self, expected):
+ super(_CombinedMatcher, self).__init__()
+ self._expected = expected
+
+ def format_expected(self, expected):
+ return repr(expected)
+
+ def __str__(self):
+ return '%s(%s)' % (
+ self.__class__.__name__, self.format_expected(self._expected))
+
+ def match(self, observed):
+ matchers = dict(
+ (k, v(self._expected)) for k, v in self.matcher_factories.items())
+ return MatchesAllDict(matchers).match(observed)
+
+
+class MatchesDict(_CombinedMatcher):
+ """Match a dictionary exactly, by its keys.
+
+ Specify a dictionary mapping keys (often strings) to matchers. This is
+ the 'expected' dict. Any dictionary that matches this must have exactly
+ the same keys, and the values must match the corresponding matchers in the
+ expected dict.
+ """
+
+ matcher_factories = {
+ 'Extra': _SubDictOf,
+ 'Missing': lambda m: _SuperDictOf(m, format_value=str),
+ 'Differences': _MatchCommonKeys,
+ }
+
+ format_expected = lambda self, expected: _format_matcher_dict(expected)
+
+
+class ContainsDict(_CombinedMatcher):
+ """Match a dictionary for that contains a specified sub-dictionary.
+
+ Specify a dictionary mapping keys (often strings) to matchers. This is
+ the 'expected' dict. Any dictionary that matches this must have **at
+ least** these keys, and the values must match the corresponding matchers
+ in the expected dict. Dictionaries that have more keys will also match.
+
+ In other words, any matching dictionary must contain the dictionary given
+ to the constructor.
+
+ Does not check for strict sub-dictionary. That is, equal dictionaries
+ match.
+ """
+
+ matcher_factories = {
+ 'Missing': lambda m: _SuperDictOf(m, format_value=str),
+ 'Differences': _MatchCommonKeys,
+ }
+
+ format_expected = lambda self, expected: _format_matcher_dict(expected)
+
+
+class ContainedByDict(_CombinedMatcher):
+ """Match a dictionary for which this is a super-dictionary.
+
+ Specify a dictionary mapping keys (often strings) to matchers. This is
+ the 'expected' dict. Any dictionary that matches this must have **only**
+ these keys, and the values must match the corresponding matchers in the
+ expected dict. Dictionaries that have fewer keys can also match.
+
+ In other words, any matching dictionary must be contained by the
+ dictionary given to the constructor.
+
+ Does not check for strict super-dictionary. That is, equal dictionaries
+ match.
+ """
+
+ matcher_factories = {
+ 'Extra': _SubDictOf,
+ 'Differences': _MatchCommonKeys,
+ }
+
+ format_expected = lambda self, expected: _format_matcher_dict(expected)
+
+
+class KeysEqual(Matcher):
+ """Checks whether a dict has particular keys."""
+
+ def __init__(self, *expected):
+ """Create a `KeysEqual` Matcher.
+
+ :param expected: The keys the dict is expected to have. If a dict,
+ then we use the keys of that dict, if a collection, we assume it
+ is a collection of expected keys.
+ """
+ super(KeysEqual, self).__init__()
+ try:
+ self.expected = expected[0].keys()
+ except AttributeError:
+ self.expected = list(expected)
+
+ def __str__(self):
+ return "KeysEqual(%s)" % ', '.join(map(repr, self.expected))
+
+ def match(self, matchee):
+ from ._basic import _BinaryMismatch, Equals
+ expected = sorted(self.expected)
+ matched = Equals(expected).match(sorted(matchee.keys()))
+ if matched:
+ return AnnotatedMismatch(
+ 'Keys not equal',
+ _BinaryMismatch(expected, 'does not match', matchee))
+ return None
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_doctest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_doctest.py
new file mode 100644
index 00000000000..41f3c003e53
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_doctest.py
@@ -0,0 +1,104 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'DocTestMatches',
+ ]
+
+import doctest
+import re
+
+from ..compat import str_is_unicode
+from ._impl import Mismatch
+
+
+class _NonManglingOutputChecker(doctest.OutputChecker):
+ """Doctest checker that works with unicode rather than mangling strings
+
+ This is needed because current Python versions have tried to fix string
+ encoding related problems, but regressed the default behaviour with
+ unicode inputs in the process.
+
+ In Python 2.6 and 2.7 ``OutputChecker.output_difference`` is was changed
+ to return a bytestring encoded as per ``sys.stdout.encoding``, or utf-8 if
+ that can't be determined. Worse, that encoding process happens in the
+ innocent looking `_indent` global function. Because the
+ `DocTestMismatch.describe` result may well not be destined for printing to
+ stdout, this is no good for us. To get a unicode return as before, the
+ method is monkey patched if ``doctest._encoding`` exists.
+
+ Python 3 has a different problem. For some reason both inputs are encoded
+ to ascii with 'backslashreplace', making an escaped string matches its
+ unescaped form. Overriding the offending ``OutputChecker._toAscii`` method
+ is sufficient to revert this.
+ """
+
+ def _toAscii(self, s):
+ """Return ``s`` unchanged rather than mangling it to ascii"""
+ return s
+
+ # Only do this overriding hackery if doctest has a broken _input function
+ if getattr(doctest, "_encoding", None) is not None:
+ from types import FunctionType as __F
+ __f = doctest.OutputChecker.output_difference.im_func
+ __g = dict(__f.func_globals)
+ def _indent(s, indent=4, _pattern=re.compile("^(?!$)", re.MULTILINE)):
+ """Prepend non-empty lines in ``s`` with ``indent`` number of spaces"""
+ return _pattern.sub(indent*" ", s)
+ __g["_indent"] = _indent
+ output_difference = __F(__f.func_code, __g, "output_difference")
+ del __F, __f, __g, _indent
+
+
+class DocTestMatches(object):
+ """See if a string matches a doctest example."""
+
+ def __init__(self, example, flags=0):
+ """Create a DocTestMatches to match example.
+
+ :param example: The example to match e.g. 'foo bar baz'
+ :param flags: doctest comparison flags to match on. e.g.
+ doctest.ELLIPSIS.
+ """
+ if not example.endswith('\n'):
+ example += '\n'
+ self.want = example # required variable name by doctest.
+ self.flags = flags
+ self._checker = _NonManglingOutputChecker()
+
+ def __str__(self):
+ if self.flags:
+ flagstr = ", flags=%d" % self.flags
+ else:
+ flagstr = ""
+ return 'DocTestMatches(%r%s)' % (self.want, flagstr)
+
+ def _with_nl(self, actual):
+ result = self.want.__class__(actual)
+ if not result.endswith('\n'):
+ result += '\n'
+ return result
+
+ def match(self, actual):
+ with_nl = self._with_nl(actual)
+ if self._checker.check_output(self.want, with_nl, self.flags):
+ return None
+ return DocTestMismatch(self, with_nl)
+
+ def _describe_difference(self, with_nl):
+ return self._checker.output_difference(self, with_nl, self.flags)
+
+
+class DocTestMismatch(Mismatch):
+ """Mismatch object for DocTestMatches."""
+
+ def __init__(self, matcher, with_nl):
+ self.matcher = matcher
+ self.with_nl = with_nl
+
+ def describe(self):
+ s = self.matcher._describe_difference(self.with_nl)
+ if str_is_unicode or isinstance(s, unicode):
+ return s
+ # GZ 2011-08-24: This is actually pretty bogus, most C0 codes should
+ # be escaped, in addition to non-ascii bytes.
+ return s.decode("latin1").encode("ascii", "backslashreplace")
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py
new file mode 100644
index 00000000000..1938f152b78
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'MatchesException',
+ 'Raises',
+ 'raises',
+ ]
+
+import sys
+
+from testtools.compat import (
+ classtypes,
+ _error_repr,
+ isbaseexception,
+ istext,
+ )
+from ._basic import MatchesRegex
+from ._higherorder import AfterPreproccessing
+from ._impl import (
+ Matcher,
+ Mismatch,
+ )
+
+
+class MatchesException(Matcher):
+ """Match an exc_info tuple against an exception instance or type."""
+
+ def __init__(self, exception, value_re=None):
+ """Create a MatchesException that will match exc_info's for exception.
+
+ :param exception: Either an exception instance or type.
+ If an instance is given, the type and arguments of the exception
+ are checked. If a type is given only the type of the exception is
+ checked. If a tuple is given, then as with isinstance, any of the
+ types in the tuple matching is sufficient to match.
+ :param value_re: If 'exception' is a type, and the matchee exception
+ is of the right type, then match against this. If value_re is a
+ string, then assume value_re is a regular expression and match
+ the str() of the exception against it. Otherwise, assume value_re
+ is a matcher, and match the exception against it.
+ """
+ Matcher.__init__(self)
+ self.expected = exception
+ if istext(value_re):
+ value_re = AfterPreproccessing(str, MatchesRegex(value_re), False)
+ self.value_re = value_re
+ expected_type = type(self.expected)
+ self._is_instance = not any(issubclass(expected_type, class_type)
+ for class_type in classtypes() + (tuple,))
+
+ def match(self, other):
+ if type(other) != tuple:
+ return Mismatch('%r is not an exc_info tuple' % other)
+ expected_class = self.expected
+ if self._is_instance:
+ expected_class = expected_class.__class__
+ if not issubclass(other[0], expected_class):
+ return Mismatch('%r is not a %r' % (other[0], expected_class))
+ if self._is_instance:
+ if other[1].args != self.expected.args:
+ return Mismatch('%s has different arguments to %s.' % (
+ _error_repr(other[1]), _error_repr(self.expected)))
+ elif self.value_re is not None:
+ return self.value_re.match(other[1])
+
+ def __str__(self):
+ if self._is_instance:
+ return "MatchesException(%s)" % _error_repr(self.expected)
+ return "MatchesException(%s)" % repr(self.expected)
+
+
+class Raises(Matcher):
+ """Match if the matchee raises an exception when called.
+
+ Exceptions which are not subclasses of Exception propogate out of the
+ Raises.match call unless they are explicitly matched.
+ """
+
+ def __init__(self, exception_matcher=None):
+ """Create a Raises matcher.
+
+ :param exception_matcher: Optional validator for the exception raised
+ by matchee. If supplied the exc_info tuple for the exception raised
+ is passed into that matcher. If no exception_matcher is supplied
+ then the simple fact of raising an exception is considered enough
+ to match on.
+ """
+ self.exception_matcher = exception_matcher
+
+ def match(self, matchee):
+ try:
+ result = matchee()
+ return Mismatch('%r returned %r' % (matchee, result))
+ # Catch all exceptions: Raises() should be able to match a
+ # KeyboardInterrupt or SystemExit.
+ except:
+ exc_info = sys.exc_info()
+ if self.exception_matcher:
+ mismatch = self.exception_matcher.match(exc_info)
+ if not mismatch:
+ del exc_info
+ return
+ else:
+ mismatch = None
+ # The exception did not match, or no explicit matching logic was
+ # performed. If the exception is a non-user exception (that is, not
+ # a subclass of Exception on Python 2.5+) then propogate it.
+ if isbaseexception(exc_info[1]):
+ del exc_info
+ raise
+ return mismatch
+
+ def __str__(self):
+ return 'Raises()'
+
+
+def raises(exception):
+ """Make a matcher that checks that a callable raises an exception.
+
+ This is a convenience function, exactly equivalent to::
+
+ return Raises(MatchesException(exception))
+
+ See `Raises` and `MatchesException` for more information.
+ """
+ return Raises(MatchesException(exception))
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_filesystem.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_filesystem.py
new file mode 100644
index 00000000000..54f749b1359
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_filesystem.py
@@ -0,0 +1,192 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""Matchers for things related to the filesystem."""
+
+__all__ = [
+ 'FileContains',
+ 'DirExists',
+ 'FileExists',
+ 'HasPermissions',
+ 'PathExists',
+ 'SamePath',
+ 'TarballContains',
+ ]
+
+import os
+import tarfile
+
+from ._basic import Equals
+from ._higherorder import (
+ MatchesAll,
+ MatchesPredicate,
+ )
+from ._impl import (
+ Matcher,
+ )
+
+
+def PathExists():
+ """Matches if the given path exists.
+
+ Use like this::
+
+ assertThat('/some/path', PathExists())
+ """
+ return MatchesPredicate(os.path.exists, "%s does not exist.")
+
+
+def DirExists():
+ """Matches if the path exists and is a directory."""
+ return MatchesAll(
+ PathExists(),
+ MatchesPredicate(os.path.isdir, "%s is not a directory."),
+ first_only=True)
+
+
+def FileExists():
+ """Matches if the given path exists and is a file."""
+ return MatchesAll(
+ PathExists(),
+ MatchesPredicate(os.path.isfile, "%s is not a file."),
+ first_only=True)
+
+
+class DirContains(Matcher):
+ """Matches if the given directory contains files with the given names.
+
+ That is, is the directory listing exactly equal to the given files?
+ """
+
+ def __init__(self, filenames=None, matcher=None):
+ """Construct a ``DirContains`` matcher.
+
+ Can be used in a basic mode where the whole directory listing is
+ matched against an expected directory listing (by passing
+ ``filenames``). Can also be used in a more advanced way where the
+ whole directory listing is matched against an arbitrary matcher (by
+ passing ``matcher`` instead).
+
+ :param filenames: If specified, match the sorted directory listing
+ against this list of filenames, sorted.
+ :param matcher: If specified, match the sorted directory listing
+ against this matcher.
+ """
+ if filenames == matcher == None:
+ raise AssertionError(
+ "Must provide one of `filenames` or `matcher`.")
+ if None not in (filenames, matcher):
+ raise AssertionError(
+ "Must provide either `filenames` or `matcher`, not both.")
+ if filenames is None:
+ self.matcher = matcher
+ else:
+ self.matcher = Equals(sorted(filenames))
+
+ def match(self, path):
+ mismatch = DirExists().match(path)
+ if mismatch is not None:
+ return mismatch
+ return self.matcher.match(sorted(os.listdir(path)))
+
+
+class FileContains(Matcher):
+ """Matches if the given file has the specified contents."""
+
+ def __init__(self, contents=None, matcher=None):
+ """Construct a ``FileContains`` matcher.
+
+ Can be used in a basic mode where the file contents are compared for
+ equality against the expected file contents (by passing ``contents``).
+ Can also be used in a more advanced way where the file contents are
+ matched against an arbitrary matcher (by passing ``matcher`` instead).
+
+ :param contents: If specified, match the contents of the file with
+ these contents.
+ :param matcher: If specified, match the contents of the file against
+ this matcher.
+ """
+ if contents == matcher == None:
+ raise AssertionError(
+ "Must provide one of `contents` or `matcher`.")
+ if None not in (contents, matcher):
+ raise AssertionError(
+ "Must provide either `contents` or `matcher`, not both.")
+ if matcher is None:
+ self.matcher = Equals(contents)
+ else:
+ self.matcher = matcher
+
+ def match(self, path):
+ mismatch = PathExists().match(path)
+ if mismatch is not None:
+ return mismatch
+ f = open(path)
+ try:
+ actual_contents = f.read()
+ return self.matcher.match(actual_contents)
+ finally:
+ f.close()
+
+ def __str__(self):
+ return "File at path exists and contains %s" % self.contents
+
+
+class HasPermissions(Matcher):
+ """Matches if a file has the given permissions.
+
+ Permissions are specified and matched as a four-digit octal string.
+ """
+
+ def __init__(self, octal_permissions):
+ """Construct a HasPermissions matcher.
+
+ :param octal_permissions: A four digit octal string, representing the
+ intended access permissions. e.g. '0775' for rwxrwxr-x.
+ """
+ super(HasPermissions, self).__init__()
+ self.octal_permissions = octal_permissions
+
+ def match(self, filename):
+ permissions = oct(os.stat(filename).st_mode)[-4:]
+ return Equals(self.octal_permissions).match(permissions)
+
+
+class SamePath(Matcher):
+ """Matches if two paths are the same.
+
+ That is, the paths are equal, or they point to the same file but in
+ different ways. The paths do not have to exist.
+ """
+
+ def __init__(self, path):
+ super(SamePath, self).__init__()
+ self.path = path
+
+ def match(self, other_path):
+ f = lambda x: os.path.abspath(os.path.realpath(x))
+ return Equals(f(self.path)).match(f(other_path))
+
+
+class TarballContains(Matcher):
+ """Matches if the given tarball contains the given paths.
+
+ Uses TarFile.getnames() to get the paths out of the tarball.
+ """
+
+ def __init__(self, paths):
+ super(TarballContains, self).__init__()
+ self.paths = paths
+ self.path_matcher = Equals(sorted(self.paths))
+
+ def match(self, tarball_path):
+ # Open underlying file first to ensure it's always closed:
+ # <http://bugs.python.org/issue10233>
+ f = open(tarball_path, "rb")
+ try:
+ tarball = tarfile.open(tarball_path, fileobj=f)
+ try:
+ return self.path_matcher.match(sorted(tarball.getnames()))
+ finally:
+ tarball.close()
+ finally:
+ f.close()
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_higherorder.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_higherorder.py
new file mode 100644
index 00000000000..3570f573747
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_higherorder.py
@@ -0,0 +1,368 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'AfterPreprocessing',
+ 'AllMatch',
+ 'Annotate',
+ 'AnyMatch',
+ 'MatchesAny',
+ 'MatchesAll',
+ 'Not',
+ ]
+
+import types
+
+from ._impl import (
+ Matcher,
+ Mismatch,
+ MismatchDecorator,
+ )
+
+
+class MatchesAny(object):
+ """Matches if any of the matchers it is created with match."""
+
+ def __init__(self, *matchers):
+ self.matchers = matchers
+
+ def match(self, matchee):
+ results = []
+ for matcher in self.matchers:
+ mismatch = matcher.match(matchee)
+ if mismatch is None:
+ return None
+ results.append(mismatch)
+ return MismatchesAll(results)
+
+ def __str__(self):
+ return "MatchesAny(%s)" % ', '.join([
+ str(matcher) for matcher in self.matchers])
+
+
+class MatchesAll(object):
+ """Matches if all of the matchers it is created with match."""
+
+ def __init__(self, *matchers, **options):
+ """Construct a MatchesAll matcher.
+
+ Just list the component matchers as arguments in the ``*args``
+ style. If you want only the first mismatch to be reported, past in
+ first_only=True as a keyword argument. By default, all mismatches are
+ reported.
+ """
+ self.matchers = matchers
+ self.first_only = options.get('first_only', False)
+
+ def __str__(self):
+ return 'MatchesAll(%s)' % ', '.join(map(str, self.matchers))
+
+ def match(self, matchee):
+ results = []
+ for matcher in self.matchers:
+ mismatch = matcher.match(matchee)
+ if mismatch is not None:
+ if self.first_only:
+ return mismatch
+ results.append(mismatch)
+ if results:
+ return MismatchesAll(results)
+ else:
+ return None
+
+
+class MismatchesAll(Mismatch):
+ """A mismatch with many child mismatches."""
+
+ def __init__(self, mismatches, wrap=True):
+ self.mismatches = mismatches
+ self._wrap = wrap
+
+ def describe(self):
+ descriptions = []
+ if self._wrap:
+ descriptions = ["Differences: ["]
+ for mismatch in self.mismatches:
+ descriptions.append(mismatch.describe())
+ if self._wrap:
+ descriptions.append("]")
+ return '\n'.join(descriptions)
+
+
+class Not(object):
+ """Inverts a matcher."""
+
+ def __init__(self, matcher):
+ self.matcher = matcher
+
+ def __str__(self):
+ return 'Not(%s)' % (self.matcher,)
+
+ def match(self, other):
+ mismatch = self.matcher.match(other)
+ if mismatch is None:
+ return MatchedUnexpectedly(self.matcher, other)
+ else:
+ return None
+
+
+class MatchedUnexpectedly(Mismatch):
+ """A thing matched when it wasn't supposed to."""
+
+ def __init__(self, matcher, other):
+ self.matcher = matcher
+ self.other = other
+
+ def describe(self):
+ return "%r matches %s" % (self.other, self.matcher)
+
+
+class Annotate(object):
+ """Annotates a matcher with a descriptive string.
+
+ Mismatches are then described as '<mismatch>: <annotation>'.
+ """
+
+ def __init__(self, annotation, matcher):
+ self.annotation = annotation
+ self.matcher = matcher
+
+ @classmethod
+ def if_message(cls, annotation, matcher):
+ """Annotate ``matcher`` only if ``annotation`` is non-empty."""
+ if not annotation:
+ return matcher
+ return cls(annotation, matcher)
+
+ def __str__(self):
+ return 'Annotate(%r, %s)' % (self.annotation, self.matcher)
+
+ def match(self, other):
+ mismatch = self.matcher.match(other)
+ if mismatch is not None:
+ return AnnotatedMismatch(self.annotation, mismatch)
+
+
+class PostfixedMismatch(MismatchDecorator):
+ """A mismatch annotated with a descriptive string."""
+
+ def __init__(self, annotation, mismatch):
+ super(PostfixedMismatch, self).__init__(mismatch)
+ self.annotation = annotation
+ self.mismatch = mismatch
+
+ def describe(self):
+ return '%s: %s' % (self.original.describe(), self.annotation)
+
+
+AnnotatedMismatch = PostfixedMismatch
+
+
+class PrefixedMismatch(MismatchDecorator):
+
+ def __init__(self, prefix, mismatch):
+ super(PrefixedMismatch, self).__init__(mismatch)
+ self.prefix = prefix
+
+ def describe(self):
+ return '%s: %s' % (self.prefix, self.original.describe())
+
+
+class AfterPreprocessing(object):
+ """Matches if the value matches after passing through a function.
+
+ This can be used to aid in creating trivial matchers as functions, for
+ example::
+
+ def PathHasFileContent(content):
+ def _read(path):
+ return open(path).read()
+ return AfterPreprocessing(_read, Equals(content))
+ """
+
+ def __init__(self, preprocessor, matcher, annotate=True):
+ """Create an AfterPreprocessing matcher.
+
+ :param preprocessor: A function called with the matchee before
+ matching.
+ :param matcher: What to match the preprocessed matchee against.
+ :param annotate: Whether or not to annotate the matcher with
+ something explaining how we transformed the matchee. Defaults
+ to True.
+ """
+ self.preprocessor = preprocessor
+ self.matcher = matcher
+ self.annotate = annotate
+
+ def _str_preprocessor(self):
+ if isinstance(self.preprocessor, types.FunctionType):
+ return '<function %s>' % self.preprocessor.__name__
+ return str(self.preprocessor)
+
+ def __str__(self):
+ return "AfterPreprocessing(%s, %s)" % (
+ self._str_preprocessor(), self.matcher)
+
+ def match(self, value):
+ after = self.preprocessor(value)
+ if self.annotate:
+ matcher = Annotate(
+ "after %s on %r" % (self._str_preprocessor(), value),
+ self.matcher)
+ else:
+ matcher = self.matcher
+ return matcher.match(after)
+
+
+# This is the old, deprecated. spelling of the name, kept for backwards
+# compatibility.
+AfterPreproccessing = AfterPreprocessing
+
+
+class AllMatch(object):
+ """Matches if all provided values match the given matcher."""
+
+ def __init__(self, matcher):
+ self.matcher = matcher
+
+ def __str__(self):
+ return 'AllMatch(%s)' % (self.matcher,)
+
+ def match(self, values):
+ mismatches = []
+ for value in values:
+ mismatch = self.matcher.match(value)
+ if mismatch:
+ mismatches.append(mismatch)
+ if mismatches:
+ return MismatchesAll(mismatches)
+
+
+class AnyMatch(object):
+ """Matches if any of the provided values match the given matcher."""
+
+ def __init__(self, matcher):
+ self.matcher = matcher
+
+ def __str__(self):
+ return 'AnyMatch(%s)' % (self.matcher,)
+
+ def match(self, values):
+ mismatches = []
+ for value in values:
+ mismatch = self.matcher.match(value)
+ if mismatch:
+ mismatches.append(mismatch)
+ else:
+ return None
+ return MismatchesAll(mismatches)
+
+
+class MatchesPredicate(Matcher):
+ """Match if a given function returns True.
+
+ It is reasonably common to want to make a very simple matcher based on a
+ function that you already have that returns True or False given a single
+ argument (i.e. a predicate function). This matcher makes it very easy to
+ do so. e.g.::
+
+ IsEven = MatchesPredicate(lambda x: x % 2 == 0, '%s is not even')
+ self.assertThat(4, IsEven)
+ """
+
+ def __init__(self, predicate, message):
+ """Create a ``MatchesPredicate`` matcher.
+
+ :param predicate: A function that takes a single argument and returns
+ a value that will be interpreted as a boolean.
+ :param message: A message to describe a mismatch. It will be formatted
+ with '%' and be given whatever was passed to ``match()``. Thus, it
+ needs to contain exactly one thing like '%s', '%d' or '%f'.
+ """
+ self.predicate = predicate
+ self.message = message
+
+ def __str__(self):
+ return '%s(%r, %r)' % (
+ self.__class__.__name__, self.predicate, self.message)
+
+ def match(self, x):
+ if not self.predicate(x):
+ return Mismatch(self.message % x)
+
+
+def MatchesPredicateWithParams(predicate, message, name=None):
+ """Match if a given parameterised function returns True.
+
+ It is reasonably common to want to make a very simple matcher based on a
+ function that you already have that returns True or False given some
+ arguments. This matcher makes it very easy to do so. e.g.::
+
+ HasLength = MatchesPredicate(
+ lambda x, y: len(x) == y, 'len({0}) is not {1}')
+ # This assertion will fail, as 'len([1, 2]) == 3' is False.
+ self.assertThat([1, 2], HasLength(3))
+
+ Note that unlike MatchesPredicate MatchesPredicateWithParams returns a
+ factory which you then customise to use by constructing an actual matcher
+ from it.
+
+ The predicate function should take the object to match as its first
+ parameter. Any additional parameters supplied when constructing a matcher
+ are supplied to the predicate as additional parameters when checking for a
+ match.
+
+ :param predicate: The predicate function.
+ :param message: A format string for describing mis-matches.
+ :param name: Optional replacement name for the matcher.
+ """
+ def construct_matcher(*args, **kwargs):
+ return _MatchesPredicateWithParams(
+ predicate, message, name, *args, **kwargs)
+ return construct_matcher
+
+
+class _MatchesPredicateWithParams(Matcher):
+
+ def __init__(self, predicate, message, name, *args, **kwargs):
+ """Create a ``MatchesPredicateWithParams`` matcher.
+
+ :param predicate: A function that takes an object to match and
+ additional params as given in ``*args`` and ``**kwargs``. The
+ result of the function will be interpreted as a boolean to
+ determine a match.
+ :param message: A message to describe a mismatch. It will be formatted
+ with .format() and be given a tuple containing whatever was passed
+ to ``match()`` + ``*args`` in ``*args``, and whatever was passed to
+ ``**kwargs`` as its ``**kwargs``.
+
+ For instance, to format a single parameter::
+
+ "{0} is not a {1}"
+
+ To format a keyword arg::
+
+ "{0} is not a {type_to_check}"
+ :param name: What name to use for the matcher class. Pass None to use
+ the default.
+ """
+ self.predicate = predicate
+ self.message = message
+ self.name = name
+ self.args = args
+ self.kwargs = kwargs
+
+ def __str__(self):
+ args = [str(arg) for arg in self.args]
+ kwargs = ["%s=%s" % item for item in self.kwargs.items()]
+ args = ", ".join(args + kwargs)
+ if self.name is None:
+ name = 'MatchesPredicateWithParams(%r, %r)' % (
+ self.predicate, self.message)
+ else:
+ name = self.name
+ return '%s(%s)' % (name, args)
+
+ def match(self, x):
+ if not self.predicate(x, *self.args, **self.kwargs):
+ return Mismatch(
+ self.message.format(*((x,) + self.args), **self.kwargs))
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_impl.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_impl.py
new file mode 100644
index 00000000000..36e5ee02218
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_impl.py
@@ -0,0 +1,175 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""Matchers, a way to express complex assertions outside the testcase.
+
+Inspired by 'hamcrest'.
+
+Matcher provides the abstract API that all matchers need to implement.
+
+Bundled matchers are listed in __all__: a list can be obtained by running
+$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
+"""
+
+__all__ = [
+ 'Matcher',
+ 'Mismatch',
+ 'MismatchDecorator',
+ 'MismatchError',
+ ]
+
+from testtools.compat import (
+ _isbytes,
+ istext,
+ str_is_unicode,
+ text_repr
+ )
+
+
+class Matcher(object):
+ """A pattern matcher.
+
+ A Matcher must implement match and __str__ to be used by
+ testtools.TestCase.assertThat. Matcher.match(thing) returns None when
+ thing is completely matched, and a Mismatch object otherwise.
+
+ Matchers can be useful outside of test cases, as they are simply a
+ pattern matching language expressed as objects.
+
+ testtools.matchers is inspired by hamcrest, but is pythonic rather than
+ a Java transcription.
+ """
+
+ def match(self, something):
+ """Return None if this matcher matches something, a Mismatch otherwise.
+ """
+ raise NotImplementedError(self.match)
+
+ def __str__(self):
+ """Get a sensible human representation of the matcher.
+
+ This should include the parameters given to the matcher and any
+ state that would affect the matches operation.
+ """
+ raise NotImplementedError(self.__str__)
+
+
+class Mismatch(object):
+ """An object describing a mismatch detected by a Matcher."""
+
+ def __init__(self, description=None, details=None):
+ """Construct a `Mismatch`.
+
+ :param description: A description to use. If not provided,
+ `Mismatch.describe` must be implemented.
+ :param details: Extra details about the mismatch. Defaults
+ to the empty dict.
+ """
+ if description:
+ self._description = description
+ if details is None:
+ details = {}
+ self._details = details
+
+ def describe(self):
+ """Describe the mismatch.
+
+ This should be either a human-readable string or castable to a string.
+ In particular, is should either be plain ascii or unicode on Python 2,
+ and care should be taken to escape control characters.
+ """
+ try:
+ return self._description
+ except AttributeError:
+ raise NotImplementedError(self.describe)
+
+ def get_details(self):
+ """Get extra details about the mismatch.
+
+ This allows the mismatch to provide extra information beyond the basic
+ description, including large text or binary files, or debugging internals
+ without having to force it to fit in the output of 'describe'.
+
+ The testtools assertion assertThat will query get_details and attach
+ all its values to the test, permitting them to be reported in whatever
+ manner the test environment chooses.
+
+ :return: a dict mapping names to Content objects. name is a string to
+ name the detail, and the Content object is the detail to add
+ to the result. For more information see the API to which items from
+ this dict are passed testtools.TestCase.addDetail.
+ """
+ return getattr(self, '_details', {})
+
+ def __repr__(self):
+ return "<testtools.matchers.Mismatch object at %x attributes=%r>" % (
+ id(self), self.__dict__)
+
+
+class MismatchError(AssertionError):
+ """Raised when a mismatch occurs."""
+
+ # This class exists to work around
+ # <https://bugs.launchpad.net/testtools/+bug/804127>. It provides a
+ # guaranteed way of getting a readable exception, no matter what crazy
+ # characters are in the matchee, matcher or mismatch.
+
+ def __init__(self, matchee, matcher, mismatch, verbose=False):
+ # Have to use old-style upcalling for Python 2.4 and 2.5
+ # compatibility.
+ AssertionError.__init__(self)
+ self.matchee = matchee
+ self.matcher = matcher
+ self.mismatch = mismatch
+ self.verbose = verbose
+
+ def __str__(self):
+ difference = self.mismatch.describe()
+ if self.verbose:
+ # GZ 2011-08-24: Smelly API? Better to take any object and special
+ # case text inside?
+ if istext(self.matchee) or _isbytes(self.matchee):
+ matchee = text_repr(self.matchee, multiline=False)
+ else:
+ matchee = repr(self.matchee)
+ return (
+ 'Match failed. Matchee: %s\nMatcher: %s\nDifference: %s\n'
+ % (matchee, self.matcher, difference))
+ else:
+ return difference
+
+ if not str_is_unicode:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return self.__unicode__().encode("ascii", "backslashreplace")
+
+
+class MismatchDecorator(object):
+ """Decorate a ``Mismatch``.
+
+ Forwards all messages to the original mismatch object. Probably the best
+ way to use this is inherit from this class and then provide your own
+ custom decoration logic.
+ """
+
+ def __init__(self, original):
+ """Construct a `MismatchDecorator`.
+
+ :param original: A `Mismatch` object to decorate.
+ """
+ self.original = original
+
+ def __repr__(self):
+ return '<testtools.matchers.MismatchDecorator(%r)>' % (self.original,)
+
+ def describe(self):
+ return self.original.describe()
+
+ def get_details(self):
+ return self.original.get_details()
+
+
+# Signal that this is part of the testing framework, and that code from this
+# should not normally appear in tracebacks.
+__unittest = True
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/monkey.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/monkey.py
new file mode 100644
index 00000000000..ba0ac8fd8bf
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/monkey.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Helpers for monkey-patching Python code."""
+
+__all__ = [
+ 'MonkeyPatcher',
+ 'patch',
+ ]
+
+
+class MonkeyPatcher(object):
+ """A set of monkey-patches that can be applied and removed all together.
+
+ Use this to cover up attributes with new objects. Particularly useful for
+ testing difficult code.
+ """
+
+ # Marker used to indicate that the patched attribute did not exist on the
+ # object before we patched it.
+ _NO_SUCH_ATTRIBUTE = object()
+
+ def __init__(self, *patches):
+ """Construct a `MonkeyPatcher`.
+
+ :param patches: The patches to apply, each should be (obj, name,
+ new_value). Providing patches here is equivalent to calling
+ `add_patch`.
+ """
+ # List of patches to apply in (obj, name, value).
+ self._patches_to_apply = []
+ # List of the original values for things that have been patched.
+ # (obj, name, value) format.
+ self._originals = []
+ for patch in patches:
+ self.add_patch(*patch)
+
+ def add_patch(self, obj, name, value):
+ """Add a patch to overwrite 'name' on 'obj' with 'value'.
+
+ The attribute C{name} on C{obj} will be assigned to C{value} when
+ C{patch} is called or during C{run_with_patches}.
+
+ You can restore the original values with a call to restore().
+ """
+ self._patches_to_apply.append((obj, name, value))
+
+ def patch(self):
+ """Apply all of the patches that have been specified with `add_patch`.
+
+ Reverse this operation using L{restore}.
+ """
+ for obj, name, value in self._patches_to_apply:
+ original_value = getattr(obj, name, self._NO_SUCH_ATTRIBUTE)
+ self._originals.append((obj, name, original_value))
+ setattr(obj, name, value)
+
+ def restore(self):
+ """Restore all original values to any patched objects.
+
+ If the patched attribute did not exist on an object before it was
+ patched, `restore` will delete the attribute so as to return the
+ object to its original state.
+ """
+ while self._originals:
+ obj, name, value = self._originals.pop()
+ if value is self._NO_SUCH_ATTRIBUTE:
+ delattr(obj, name)
+ else:
+ setattr(obj, name, value)
+
+ def run_with_patches(self, f, *args, **kw):
+ """Run 'f' with the given args and kwargs with all patches applied.
+
+ Restores all objects to their original state when finished.
+ """
+ self.patch()
+ try:
+ return f(*args, **kw)
+ finally:
+ self.restore()
+
+
+def patch(obj, attribute, value):
+ """Set 'obj.attribute' to 'value' and return a callable to restore 'obj'.
+
+ If 'attribute' is not set on 'obj' already, then the returned callable
+ will delete the attribute when called.
+
+ :param obj: An object to monkey-patch.
+ :param attribute: The name of the attribute to patch.
+ :param value: The value to set 'obj.attribute' to.
+ :return: A nullary callable that, when run, will restore 'obj' to its
+ original state.
+ """
+ patcher = MonkeyPatcher((obj, attribute, value))
+ patcher.patch()
+ return patcher.restore
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/run.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/run.py
new file mode 100755
index 00000000000..466da76a7d4
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/run.py
@@ -0,0 +1,399 @@
+# Copyright (c) 2009 testtools developers. See LICENSE for details.
+
+"""python -m testtools.run testspec [testspec...]
+
+Run some tests with the testtools extended API.
+
+For instance, to run the testtools test suite.
+ $ python -m testtools.run testtools.tests.test_suite
+"""
+
+from functools import partial
+import os
+import unittest
+import sys
+
+from extras import safe_hasattr
+
+from testtools import TextTestResult
+from testtools.compat import classtypes, istext, unicode_output_stream
+from testtools.testsuite import filter_by_ids, iterate_tests, sorted_tests
+
+
+defaultTestLoader = unittest.defaultTestLoader
+defaultTestLoaderCls = unittest.TestLoader
+
+if getattr(defaultTestLoader, 'discover', None) is None:
+ try:
+ import discover
+ defaultTestLoader = discover.DiscoveringTestLoader()
+ defaultTestLoaderCls = discover.DiscoveringTestLoader
+ have_discover = True
+ except ImportError:
+ have_discover = False
+else:
+ have_discover = True
+
+
+def list_test(test):
+ """Return the test ids that would be run if test() was run.
+
+ When things fail to import they can be represented as well, though
+ we use an ugly hack (see http://bugs.python.org/issue19746 for details)
+ to determine that. The difference matters because if a user is
+ filtering tests to run on the returned ids, a failed import can reduce
+ the visible tests but it can be impossible to tell that the selected
+ test would have been one of the imported ones.
+
+ :return: A tuple of test ids that would run and error strings
+ describing things that failed to import.
+ """
+ unittest_import_str = 'unittest.loader.ModuleImportFailure.'
+ test_ids = []
+ errors = []
+ for test in iterate_tests(test):
+ # to this ugly.
+ if test.id().startswith(unittest_import_str):
+ errors.append(test.id()[len(unittest_import_str):])
+ else:
+ test_ids.append(test.id())
+ return test_ids, errors
+
+
+class TestToolsTestRunner(object):
+ """ A thunk object to support unittest.TestProgram."""
+
+ def __init__(self, verbosity=None, failfast=None, buffer=None,
+ stdout=None):
+ """Create a TestToolsTestRunner.
+
+ :param verbosity: Ignored.
+ :param failfast: Stop running tests at the first failure.
+ :param buffer: Ignored.
+ :param stdout: Stream to use for stdout.
+ """
+ self.failfast = failfast
+ self.stdout = stdout
+
+ def list(self, test):
+ """List the tests that would be run if test() was run."""
+ test_ids, errors = list_test(test)
+ for test_id in test_ids:
+ self.stdout.write('%s\n' % test_id)
+ if errors:
+ self.stdout.write('Failed to import\n')
+ for test_id in errors:
+ self.stdout.write('%s\n' % test_id)
+ sys.exit(2)
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ result = TextTestResult(
+ unicode_output_stream(sys.stdout), failfast=self.failfast)
+ result.startTestRun()
+ try:
+ return test.run(result)
+ finally:
+ result.stopTestRun()
+
+
+####################
+# Taken from python 2.7 and slightly modified for compatibility with
+# older versions. Delete when 2.7 is the oldest supported version.
+# Modifications:
+# - Use have_discover to raise an error if the user tries to use
+# discovery on an old version and doesn't have discover installed.
+# - If --catch is given check that installHandler is available, as
+# it won't be on old python versions.
+# - print calls have been been made single-source python3 compatibile.
+# - exception handling likewise.
+# - The default help has been changed to USAGE_AS_MAIN and USAGE_FROM_MODULE
+# removed.
+# - A tweak has been added to detect 'python -m *.run' and use a
+# better progName in that case.
+# - self.module is more comprehensively set to None when being invoked from
+# the commandline - __name__ is used as a sentinel value.
+# - --list has been added which can list tests (should be upstreamed).
+# - --load-list has been added which can reduce the tests used (should be
+# upstreamed).
+# - The limitation of using getopt is declared to the user.
+# - http://bugs.python.org/issue16709 is worked around, by sorting tests when
+# discover is used.
+
+FAILFAST = " -f, --failfast Stop on first failure\n"
+CATCHBREAK = " -c, --catch Catch control-C and display results\n"
+BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
+
+USAGE_AS_MAIN = """\
+Usage: %(progName)s [options] [tests]
+
+Options:
+ -h, --help Show this message
+ -v, --verbose Verbose output
+ -q, --quiet Minimal output
+ -l, --list List tests rather than executing them.
+ --load-list Specifies a file containing test ids, only tests matching
+ those ids are executed.
+%(failfast)s%(catchbreak)s%(buffer)s
+Examples:
+ %(progName)s test_module - run tests from test_module
+ %(progName)s module.TestClass - run tests from module.TestClass
+ %(progName)s module.Class.test_method - run specified test method
+
+All options must come before [tests]. [tests] can be a list of any number of
+test modules, classes and test methods.
+
+Alternative Usage: %(progName)s discover [options]
+
+Options:
+ -v, --verbose Verbose output
+%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
+ -p pattern Pattern to match test files ('test*.py' default)
+ -t directory Top level directory of project (default to
+ start directory)
+ -l, --list List tests rather than executing them.
+ --load-list Specifies a file containing test ids, only tests matching
+ those ids are executed.
+
+For test discovery all test modules must be importable from the top
+level directory of the project.
+"""
+
+
+class TestProgram(object):
+ """A command-line program that runs a set of tests; this is primarily
+ for making test modules conveniently executable.
+ """
+ USAGE = USAGE_AS_MAIN
+
+ # defaults for testing
+ failfast = catchbreak = buffer = progName = None
+
+ def __init__(self, module=__name__, defaultTest=None, argv=None,
+ testRunner=None, testLoader=defaultTestLoader,
+ exit=True, verbosity=1, failfast=None, catchbreak=None,
+ buffer=None, stdout=None):
+ if module == __name__:
+ self.module = None
+ elif istext(module):
+ self.module = __import__(module)
+ for part in module.split('.')[1:]:
+ self.module = getattr(self.module, part)
+ else:
+ self.module = module
+ if argv is None:
+ argv = sys.argv
+ if stdout is None:
+ stdout = sys.stdout
+
+ self.exit = exit
+ self.failfast = failfast
+ self.catchbreak = catchbreak
+ self.verbosity = verbosity
+ self.buffer = buffer
+ self.defaultTest = defaultTest
+ self.listtests = False
+ self.load_list = None
+ self.testRunner = testRunner
+ self.testLoader = testLoader
+ progName = argv[0]
+ if progName.endswith('%srun.py' % os.path.sep):
+ elements = progName.split(os.path.sep)
+ progName = '%s.run' % elements[-2]
+ else:
+ progName = os.path.basename(argv[0])
+ self.progName = progName
+ self.parseArgs(argv)
+ if self.load_list:
+ # TODO: preserve existing suites (like testresources does in
+ # OptimisingTestSuite.add, but with a standard protocol).
+ # This is needed because the load_tests hook allows arbitrary
+ # suites, even if that is rarely used.
+ source = open(self.load_list, 'rb')
+ try:
+ lines = source.readlines()
+ finally:
+ source.close()
+ test_ids = set(line.strip().decode('utf-8') for line in lines)
+ self.test = filter_by_ids(self.test, test_ids)
+ if not self.listtests:
+ self.runTests()
+ else:
+ runner = self._get_runner()
+ if safe_hasattr(runner, 'list'):
+ runner.list(self.test)
+ else:
+ for test in iterate_tests(self.test):
+ stdout.write('%s\n' % test.id())
+
+ def usageExit(self, msg=None):
+ if msg:
+ print(msg)
+ usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
+ 'buffer': ''}
+ if self.failfast != False:
+ usage['failfast'] = FAILFAST
+ if self.catchbreak != False:
+ usage['catchbreak'] = CATCHBREAK
+ if self.buffer != False:
+ usage['buffer'] = BUFFEROUTPUT
+ print(self.USAGE % usage)
+ sys.exit(2)
+
+ def parseArgs(self, argv):
+ if len(argv) > 1 and argv[1].lower() == 'discover':
+ self._do_discovery(argv[2:])
+ return
+
+ import getopt
+ long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer',
+ 'list', 'load-list=']
+ try:
+ options, args = getopt.getopt(argv[1:], 'hHvqfcbl', long_opts)
+ for opt, value in options:
+ if opt in ('-h','-H','--help'):
+ self.usageExit()
+ if opt in ('-q','--quiet'):
+ self.verbosity = 0
+ if opt in ('-v','--verbose'):
+ self.verbosity = 2
+ if opt in ('-f','--failfast'):
+ if self.failfast is None:
+ self.failfast = True
+ # Should this raise an exception if -f is not valid?
+ if opt in ('-c','--catch'):
+ if self.catchbreak is None:
+ self.catchbreak = True
+ # Should this raise an exception if -c is not valid?
+ if opt in ('-b','--buffer'):
+ if self.buffer is None:
+ self.buffer = True
+ # Should this raise an exception if -b is not valid?
+ if opt in ('-l', '--list'):
+ self.listtests = True
+ if opt == '--load-list':
+ self.load_list = value
+ if len(args) == 0 and self.defaultTest is None:
+ # createTests will load tests from self.module
+ self.testNames = None
+ elif len(args) > 0:
+ self.testNames = args
+ else:
+ self.testNames = (self.defaultTest,)
+ self.createTests()
+ except getopt.error:
+ self.usageExit(sys.exc_info()[1])
+
+ def createTests(self):
+ if self.testNames is None:
+ self.test = self.testLoader.loadTestsFromModule(self.module)
+ else:
+ self.test = self.testLoader.loadTestsFromNames(self.testNames,
+ self.module)
+
+ def _do_discovery(self, argv, Loader=defaultTestLoaderCls):
+ # handle command line args for test discovery
+ if not have_discover:
+ raise AssertionError("Unable to use discovery, must use python 2.7 "
+ "or greater, or install the discover package.")
+ self.progName = '%s discover' % self.progName
+ import optparse
+ parser = optparse.OptionParser()
+ parser.prog = self.progName
+ parser.add_option('-v', '--verbose', dest='verbose', default=False,
+ help='Verbose output', action='store_true')
+ if self.failfast != False:
+ parser.add_option('-f', '--failfast', dest='failfast', default=False,
+ help='Stop on first fail or error',
+ action='store_true')
+ if self.catchbreak != False:
+ parser.add_option('-c', '--catch', dest='catchbreak', default=False,
+ help='Catch ctrl-C and display results so far',
+ action='store_true')
+ if self.buffer != False:
+ parser.add_option('-b', '--buffer', dest='buffer', default=False,
+ help='Buffer stdout and stderr during tests',
+ action='store_true')
+ parser.add_option('-s', '--start-directory', dest='start', default='.',
+ help="Directory to start discovery ('.' default)")
+ parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
+ help="Pattern to match tests ('test*.py' default)")
+ parser.add_option('-t', '--top-level-directory', dest='top', default=None,
+ help='Top level directory of project (defaults to start directory)')
+ parser.add_option('-l', '--list', dest='listtests', default=False, action="store_true",
+ help='List tests rather than running them.')
+ parser.add_option('--load-list', dest='load_list', default=None,
+ help='Specify a filename containing the test ids to use.')
+
+ options, args = parser.parse_args(argv)
+ if len(args) > 3:
+ self.usageExit()
+
+ for name, value in zip(('start', 'pattern', 'top'), args):
+ setattr(options, name, value)
+
+ # only set options from the parsing here
+ # if they weren't set explicitly in the constructor
+ if self.failfast is None:
+ self.failfast = options.failfast
+ if self.catchbreak is None:
+ self.catchbreak = options.catchbreak
+ if self.buffer is None:
+ self.buffer = options.buffer
+ self.listtests = options.listtests
+ self.load_list = options.load_list
+
+ if options.verbose:
+ self.verbosity = 2
+
+ start_dir = options.start
+ pattern = options.pattern
+ top_level_dir = options.top
+
+ loader = Loader()
+ # See http://bugs.python.org/issue16709
+ # While sorting here is intrusive, its better than being random.
+ # Rules for the sort:
+ # - standard suites are flattened, and the resulting tests sorted by
+ # id.
+ # - non-standard suites are preserved as-is, and sorted into position
+ # by the first test found by iterating the suite.
+ # We do this by a DSU process: flatten and grab a key, sort, strip the
+ # keys.
+ loaded = loader.discover(start_dir, pattern, top_level_dir)
+ self.test = sorted_tests(loaded)
+
+ def runTests(self):
+ if (self.catchbreak
+ and getattr(unittest, 'installHandler', None) is not None):
+ unittest.installHandler()
+ testRunner = self._get_runner()
+ self.result = testRunner.run(self.test)
+ if self.exit:
+ sys.exit(not self.result.wasSuccessful())
+
+ def _get_runner(self):
+ if self.testRunner is None:
+ self.testRunner = TestToolsTestRunner
+ try:
+ testRunner = self.testRunner(verbosity=self.verbosity,
+ failfast=self.failfast,
+ buffer=self.buffer)
+ except TypeError:
+ # didn't accept the verbosity, buffer or failfast arguments
+ try:
+ testRunner = self.testRunner()
+ except TypeError:
+ # it is assumed to be a TestRunner instance
+ testRunner = self.testRunner
+ return testRunner
+
+
+################
+
+def main(argv, stdout):
+ program = TestProgram(argv=argv, testRunner=partial(TestToolsTestRunner, stdout=stdout),
+ stdout=stdout)
+
+if __name__ == '__main__':
+ main(sys.argv, sys.stdout)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/runtest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/runtest.py
new file mode 100644
index 00000000000..26ae387211b
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/runtest.py
@@ -0,0 +1,212 @@
+# Copyright (c) 2009-2010 testtools developers. See LICENSE for details.
+
+"""Individual test case execution."""
+
+__all__ = [
+ 'MultipleExceptions',
+ 'RunTest',
+ ]
+
+import sys
+
+from testtools.testresult import ExtendedToOriginalDecorator
+
+
+class MultipleExceptions(Exception):
+ """Represents many exceptions raised from some operation.
+
+ :ivar args: The sys.exc_info() tuples for each exception.
+ """
+
+
+class RunTest(object):
+ """An object to run a test.
+
+ RunTest objects are used to implement the internal logic involved in
+ running a test. TestCase.__init__ stores _RunTest as the class of RunTest
+ to execute. Passing the runTest= parameter to TestCase.__init__ allows a
+ different RunTest class to be used to execute the test.
+
+ Subclassing or replacing RunTest can be useful to add functionality to the
+ way that tests are run in a given project.
+
+ :ivar case: The test case that is to be run.
+ :ivar result: The result object a case is reporting to.
+ :ivar handlers: A list of (ExceptionClass, handler_function) for
+ exceptions that should be caught if raised from the user
+ code. Exceptions that are caught are checked against this list in
+ first to last order. There is a catch-all of 'Exception' at the end
+ of the list, so to add a new exception to the list, insert it at the
+ front (which ensures that it will be checked before any existing base
+ classes in the list. If you add multiple exceptions some of which are
+ subclasses of each other, add the most specific exceptions last (so
+ they come before their parent classes in the list).
+ :ivar exception_caught: An object returned when _run_user catches an
+ exception.
+ :ivar _exceptions: A list of caught exceptions, used to do the single
+ reporting of error/failure/skip etc.
+ """
+
+ def __init__(self, case, handlers=None):
+ """Create a RunTest to run a case.
+
+ :param case: A testtools.TestCase test case object.
+ :param handlers: Exception handlers for this RunTest. These are stored
+ in self.handlers and can be modified later if needed.
+ """
+ self.case = case
+ self.handlers = handlers or []
+ self.exception_caught = object()
+ self._exceptions = []
+
+ def run(self, result=None):
+ """Run self.case reporting activity to result.
+
+ :param result: Optional testtools.TestResult to report activity to.
+ :return: The result object the test was run against.
+ """
+ if result is None:
+ actual_result = self.case.defaultTestResult()
+ actual_result.startTestRun()
+ else:
+ actual_result = result
+ try:
+ return self._run_one(actual_result)
+ finally:
+ if result is None:
+ actual_result.stopTestRun()
+
+ def _run_one(self, result):
+ """Run one test reporting to result.
+
+ :param result: A testtools.TestResult to report activity to.
+ This result object is decorated with an ExtendedToOriginalDecorator
+ to ensure that the latest TestResult API can be used with
+ confidence by client code.
+ :return: The result object the test was run against.
+ """
+ return self._run_prepared_result(ExtendedToOriginalDecorator(result))
+
+ def _run_prepared_result(self, result):
+ """Run one test reporting to result.
+
+ :param result: A testtools.TestResult to report activity to.
+ :return: The result object the test was run against.
+ """
+ result.startTest(self.case)
+ self.result = result
+ try:
+ self._exceptions = []
+ self._run_core()
+ if self._exceptions:
+ # One or more caught exceptions, now trigger the test's
+ # reporting method for just one.
+ e = self._exceptions.pop()
+ for exc_class, handler in self.handlers:
+ if isinstance(e, exc_class):
+ handler(self.case, self.result, e)
+ break
+ finally:
+ result.stopTest(self.case)
+ return result
+
+ def _run_core(self):
+ """Run the user supplied test code."""
+ if self.exception_caught == self._run_user(self.case._run_setup,
+ self.result):
+ # Don't run the test method if we failed getting here.
+ self._run_cleanups(self.result)
+ return
+ # Run everything from here on in. If any of the methods raise an
+ # exception we'll have failed.
+ failed = False
+ try:
+ if self.exception_caught == self._run_user(
+ self.case._run_test_method, self.result):
+ failed = True
+ finally:
+ try:
+ if self.exception_caught == self._run_user(
+ self.case._run_teardown, self.result):
+ failed = True
+ finally:
+ try:
+ if self.exception_caught == self._run_user(
+ self._run_cleanups, self.result):
+ failed = True
+ finally:
+ if getattr(self.case, 'force_failure', None):
+ self._run_user(_raise_force_fail_error)
+ failed = True
+ if not failed:
+ self.result.addSuccess(self.case,
+ details=self.case.getDetails())
+
+ def _run_cleanups(self, result):
+ """Run the cleanups that have been added with addCleanup.
+
+ See the docstring for addCleanup for more information.
+
+ :return: None if all cleanups ran without error,
+ ``exception_caught`` if there was an error.
+ """
+ failing = False
+ while self.case._cleanups:
+ function, arguments, keywordArguments = self.case._cleanups.pop()
+ got_exception = self._run_user(
+ function, *arguments, **keywordArguments)
+ if got_exception == self.exception_caught:
+ failing = True
+ if failing:
+ return self.exception_caught
+
+ def _run_user(self, fn, *args, **kwargs):
+ """Run a user supplied function.
+
+ Exceptions are processed by `_got_user_exception`.
+
+ :return: Either whatever 'fn' returns or ``exception_caught`` if
+ 'fn' raised an exception.
+ """
+ try:
+ return fn(*args, **kwargs)
+ except KeyboardInterrupt:
+ raise
+ except:
+ return self._got_user_exception(sys.exc_info())
+
+ def _got_user_exception(self, exc_info, tb_label='traceback'):
+ """Called when user code raises an exception.
+
+ If 'exc_info' is a `MultipleExceptions`, then we recurse into it
+ unpacking the errors that it's made up from.
+
+ :param exc_info: A sys.exc_info() tuple for the user error.
+ :param tb_label: An optional string label for the error. If
+ not specified, will default to 'traceback'.
+ :return: 'exception_caught' if we catch one of the exceptions that
+ have handlers in 'handlers', otherwise raise the error.
+ """
+ if exc_info[0] is MultipleExceptions:
+ for sub_exc_info in exc_info[1].args:
+ self._got_user_exception(sub_exc_info, tb_label)
+ return self.exception_caught
+ try:
+ e = exc_info[1]
+ self.case.onException(exc_info, tb_label=tb_label)
+ finally:
+ del exc_info
+ for exc_class, handler in self.handlers:
+ if isinstance(e, exc_class):
+ self._exceptions.append(e)
+ return self.exception_caught
+ raise e
+
+
+def _raise_force_fail_error():
+ raise AssertionError("Forced Test Failure")
+
+
+# Signal that this is part of the testing framework, and that code from this
+# should not normally appear in tracebacks.
+__unittest = True
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tags.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tags.py
new file mode 100644
index 00000000000..b55bd38667b
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tags.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2012 testtools developers. See LICENSE for details.
+
+"""Tag support."""
+
+
+class TagContext(object):
+ """A tag context."""
+
+ def __init__(self, parent=None):
+ """Create a new TagContext.
+
+ :param parent: If provided, uses this as the parent context. Any tags
+ that are current on the parent at the time of construction are
+ current in this context.
+ """
+ self.parent = parent
+ self._tags = set()
+ if parent:
+ self._tags.update(parent.get_current_tags())
+
+ def get_current_tags(self):
+ """Return any current tags."""
+ return set(self._tags)
+
+ def change_tags(self, new_tags, gone_tags):
+ """Change the tags on this context.
+
+ :param new_tags: A set of tags to add to this context.
+ :param gone_tags: A set of tags to remove from this context.
+ :return: The tags now current on this context.
+ """
+ self._tags.update(new_tags)
+ self._tags.difference_update(gone_tags)
+ return self.get_current_tags()
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testcase.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testcase.py
new file mode 100644
index 00000000000..59ea2052a9a
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testcase.py
@@ -0,0 +1,942 @@
+# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
+
+"""Test case related stuff."""
+
+__metaclass__ = type
+__all__ = [
+ 'attr',
+ 'clone_test_with_new_id',
+ 'ExpectedException',
+ 'gather_details',
+ 'run_test_with',
+ 'skip',
+ 'skipIf',
+ 'skipUnless',
+ 'TestCase',
+ ]
+
+import copy
+import itertools
+import sys
+import types
+import unittest
+
+from extras import (
+ safe_hasattr,
+ try_import,
+ )
+
+from testtools import (
+ content,
+ )
+from testtools.compat import (
+ advance_iterator,
+ reraise,
+ )
+from testtools.matchers import (
+ Annotate,
+ Contains,
+ Equals,
+ MatchesAll,
+ MatchesException,
+ MismatchError,
+ Is,
+ IsInstance,
+ Not,
+ Raises,
+ )
+from testtools.monkey import patch
+from testtools.runtest import RunTest
+from testtools.testresult import (
+ ExtendedToOriginalDecorator,
+ TestResult,
+ )
+
+wraps = try_import('functools.wraps')
+
+class TestSkipped(Exception):
+ """Raised within TestCase.run() when a test is skipped."""
+TestSkipped = try_import('unittest2.case.SkipTest', TestSkipped)
+TestSkipped = try_import('unittest.case.SkipTest', TestSkipped)
+
+
+class _UnexpectedSuccess(Exception):
+ """An unexpected success was raised.
+
+ Note that this exception is private plumbing in testtools' testcase
+ module.
+ """
+_UnexpectedSuccess = try_import(
+ 'unittest2.case._UnexpectedSuccess', _UnexpectedSuccess)
+_UnexpectedSuccess = try_import(
+ 'unittest.case._UnexpectedSuccess', _UnexpectedSuccess)
+
+class _ExpectedFailure(Exception):
+ """An expected failure occured.
+
+ Note that this exception is private plumbing in testtools' testcase
+ module.
+ """
+_ExpectedFailure = try_import(
+ 'unittest2.case._ExpectedFailure', _ExpectedFailure)
+_ExpectedFailure = try_import(
+ 'unittest.case._ExpectedFailure', _ExpectedFailure)
+
+
+def run_test_with(test_runner, **kwargs):
+ """Decorate a test as using a specific ``RunTest``.
+
+ e.g.::
+
+ @run_test_with(CustomRunner, timeout=42)
+ def test_foo(self):
+ self.assertTrue(True)
+
+ The returned decorator works by setting an attribute on the decorated
+ function. `TestCase.__init__` looks for this attribute when deciding on a
+ ``RunTest`` factory. If you wish to use multiple decorators on a test
+ method, then you must either make this one the top-most decorator, or you
+ must write your decorators so that they update the wrapping function with
+ the attributes of the wrapped function. The latter is recommended style
+ anyway. ``functools.wraps``, ``functools.wrapper`` and
+ ``twisted.python.util.mergeFunctionMetadata`` can help you do this.
+
+ :param test_runner: A ``RunTest`` factory that takes a test case and an
+ optional list of exception handlers. See ``RunTest``.
+ :param kwargs: Keyword arguments to pass on as extra arguments to
+ 'test_runner'.
+ :return: A decorator to be used for marking a test as needing a special
+ runner.
+ """
+ def decorator(function):
+ # Set an attribute on 'function' which will inform TestCase how to
+ # make the runner.
+ function._run_test_with = (
+ lambda case, handlers=None:
+ test_runner(case, handlers=handlers, **kwargs))
+ return function
+ return decorator
+
+
+def _copy_content(content_object):
+ """Make a copy of the given content object.
+
+ The content within ``content_object`` is iterated and saved. This is
+ useful when the source of the content is volatile, a log file in a
+ temporary directory for example.
+
+ :param content_object: A `content.Content` instance.
+ :return: A `content.Content` instance with the same mime-type as
+ ``content_object`` and a non-volatile copy of its content.
+ """
+ content_bytes = list(content_object.iter_bytes())
+ content_callback = lambda: content_bytes
+ return content.Content(content_object.content_type, content_callback)
+
+
+def gather_details(source_dict, target_dict):
+ """Merge the details from ``source_dict`` into ``target_dict``.
+
+ :param source_dict: A dictionary of details will be gathered.
+ :param target_dict: A dictionary into which details will be gathered.
+ """
+ for name, content_object in source_dict.items():
+ new_name = name
+ disambiguator = itertools.count(1)
+ while new_name in target_dict:
+ new_name = '%s-%d' % (name, advance_iterator(disambiguator))
+ name = new_name
+ target_dict[name] = _copy_content(content_object)
+
+
+class TestCase(unittest.TestCase):
+ """Extensions to the basic TestCase.
+
+ :ivar exception_handlers: Exceptions to catch from setUp, runTest and
+ tearDown. This list is able to be modified at any time and consists of
+ (exception_class, handler(case, result, exception_value)) pairs.
+ :ivar force_failure: Force testtools.RunTest to fail the test after the
+ test has completed.
+ :cvar run_tests_with: A factory to make the ``RunTest`` to run tests with.
+ Defaults to ``RunTest``. The factory is expected to take a test case
+ and an optional list of exception handlers.
+ """
+
+ skipException = TestSkipped
+
+ run_tests_with = RunTest
+
+ def __init__(self, *args, **kwargs):
+ """Construct a TestCase.
+
+ :param testMethod: The name of the method to run.
+ :keyword runTest: Optional class to use to execute the test. If not
+ supplied ``RunTest`` is used. The instance to be used is created
+ when run() is invoked, so will be fresh each time. Overrides
+ ``TestCase.run_tests_with`` if given.
+ """
+ runTest = kwargs.pop('runTest', None)
+ super(TestCase, self).__init__(*args, **kwargs)
+ self._cleanups = []
+ self._unique_id_gen = itertools.count(1)
+ # Generators to ensure unique traceback ids. Maps traceback label to
+ # iterators.
+ self._traceback_id_gens = {}
+ self.__setup_called = False
+ self.__teardown_called = False
+ # __details is lazy-initialized so that a constructed-but-not-run
+ # TestCase is safe to use with clone_test_with_new_id.
+ self.__details = None
+ test_method = self._get_test_method()
+ if runTest is None:
+ runTest = getattr(
+ test_method, '_run_test_with', self.run_tests_with)
+ self.__RunTest = runTest
+ self.__exception_handlers = []
+ self.exception_handlers = [
+ (self.skipException, self._report_skip),
+ (self.failureException, self._report_failure),
+ (_ExpectedFailure, self._report_expected_failure),
+ (_UnexpectedSuccess, self._report_unexpected_success),
+ (Exception, self._report_error),
+ ]
+ if sys.version_info < (2, 6):
+ # Catch old-style string exceptions with None as the instance
+ self.exception_handlers.append((type(None), self._report_error))
+
+ def __eq__(self, other):
+ eq = getattr(unittest.TestCase, '__eq__', None)
+ if eq is not None and not unittest.TestCase.__eq__(self, other):
+ return False
+ return self.__dict__ == other.__dict__
+
+ def __repr__(self):
+ # We add id to the repr because it makes testing testtools easier.
+ return "<%s id=0x%0x>" % (self.id(), id(self))
+
+ def addDetail(self, name, content_object):
+ """Add a detail to be reported with this test's outcome.
+
+ For more details see pydoc testtools.TestResult.
+
+ :param name: The name to give this detail.
+ :param content_object: The content object for this detail. See
+ testtools.content for more detail.
+ """
+ if self.__details is None:
+ self.__details = {}
+ self.__details[name] = content_object
+
+ def getDetails(self):
+ """Get the details dict that will be reported with this test's outcome.
+
+ For more details see pydoc testtools.TestResult.
+ """
+ if self.__details is None:
+ self.__details = {}
+ return self.__details
+
+ def patch(self, obj, attribute, value):
+ """Monkey-patch 'obj.attribute' to 'value' while the test is running.
+
+ If 'obj' has no attribute, then the monkey-patch will still go ahead,
+ and the attribute will be deleted instead of restored to its original
+ value.
+
+ :param obj: The object to patch. Can be anything.
+ :param attribute: The attribute on 'obj' to patch.
+ :param value: The value to set 'obj.attribute' to.
+ """
+ self.addCleanup(patch(obj, attribute, value))
+
+ def shortDescription(self):
+ return self.id()
+
+ def skipTest(self, reason):
+ """Cause this test to be skipped.
+
+ This raises self.skipException(reason). skipException is raised
+ to permit a skip to be triggered at any point (during setUp or the
+ testMethod itself). The run() method catches skipException and
+ translates that into a call to the result objects addSkip method.
+
+ :param reason: The reason why the test is being skipped. This must
+ support being cast into a unicode string for reporting.
+ """
+ raise self.skipException(reason)
+
+ # skipTest is how python2.7 spells this. Sometime in the future
+ # This should be given a deprecation decorator - RBC 20100611.
+ skip = skipTest
+
+ def _formatTypes(self, classOrIterable):
+ """Format a class or a bunch of classes for display in an error."""
+ className = getattr(classOrIterable, '__name__', None)
+ if className is None:
+ className = ', '.join(klass.__name__ for klass in classOrIterable)
+ return className
+
+ def addCleanup(self, function, *arguments, **keywordArguments):
+ """Add a cleanup function to be called after tearDown.
+
+ Functions added with addCleanup will be called in reverse order of
+ adding after tearDown, or after setUp if setUp raises an exception.
+
+ If a function added with addCleanup raises an exception, the error
+ will be recorded as a test error, and the next cleanup will then be
+ run.
+
+ Cleanup functions are always called before a test finishes running,
+ even if setUp is aborted by an exception.
+ """
+ self._cleanups.append((function, arguments, keywordArguments))
+
+ def addOnException(self, handler):
+ """Add a handler to be called when an exception occurs in test code.
+
+ This handler cannot affect what result methods are called, and is
+ called before any outcome is called on the result object. An example
+ use for it is to add some diagnostic state to the test details dict
+ which is expensive to calculate and not interesting for reporting in
+ the success case.
+
+ Handlers are called before the outcome (such as addFailure) that
+ the exception has caused.
+
+ Handlers are called in first-added, first-called order, and if they
+ raise an exception, that will propogate out of the test running
+ machinery, halting test processing. As a result, do not call code that
+ may unreasonably fail.
+ """
+ self.__exception_handlers.append(handler)
+
+ def _add_reason(self, reason):
+ self.addDetail('reason', content.text_content(reason))
+
+ def assertEqual(self, expected, observed, message=''):
+ """Assert that 'expected' is equal to 'observed'.
+
+ :param expected: The expected value.
+ :param observed: The observed value.
+ :param message: An optional message to include in the error.
+ """
+ matcher = Equals(expected)
+ self.assertThat(observed, matcher, message)
+
+ failUnlessEqual = assertEquals = assertEqual
+
+ def assertIn(self, needle, haystack):
+ """Assert that needle is in haystack."""
+ self.assertThat(haystack, Contains(needle))
+
+ def assertIsNone(self, observed, message=''):
+ """Assert that 'observed' is equal to None.
+
+ :param observed: The observed value.
+ :param message: An optional message describing the error.
+ """
+ matcher = Is(None)
+ self.assertThat(observed, matcher, message)
+
+ def assertIsNotNone(self, observed, message=''):
+ """Assert that 'observed' is not equal to None.
+
+ :param observed: The observed value.
+ :param message: An optional message describing the error.
+ """
+ matcher = Not(Is(None))
+ self.assertThat(observed, matcher, message)
+
+ def assertIs(self, expected, observed, message=''):
+ """Assert that 'expected' is 'observed'.
+
+ :param expected: The expected value.
+ :param observed: The observed value.
+ :param message: An optional message describing the error.
+ """
+ matcher = Is(expected)
+ self.assertThat(observed, matcher, message)
+
+ def assertIsNot(self, expected, observed, message=''):
+ """Assert that 'expected' is not 'observed'."""
+ matcher = Not(Is(expected))
+ self.assertThat(observed, matcher, message)
+
+ def assertNotIn(self, needle, haystack):
+ """Assert that needle is not in haystack."""
+ matcher = Not(Contains(needle))
+ self.assertThat(haystack, matcher)
+
+ def assertIsInstance(self, obj, klass, msg=None):
+ if isinstance(klass, tuple):
+ matcher = IsInstance(*klass)
+ else:
+ matcher = IsInstance(klass)
+ self.assertThat(obj, matcher, msg)
+
+ def assertRaises(self, excClass, callableObj, *args, **kwargs):
+ """Fail unless an exception of class excClass is thrown
+ by callableObj when invoked with arguments args and keyword
+ arguments kwargs. If a different type of exception is
+ thrown, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+ """
+ class ReRaiseOtherTypes(object):
+ def match(self, matchee):
+ if not issubclass(matchee[0], excClass):
+ reraise(*matchee)
+ class CaptureMatchee(object):
+ def match(self, matchee):
+ self.matchee = matchee[1]
+ capture = CaptureMatchee()
+ matcher = Raises(MatchesAll(ReRaiseOtherTypes(),
+ MatchesException(excClass), capture))
+ our_callable = Nullary(callableObj, *args, **kwargs)
+ self.assertThat(our_callable, matcher)
+ return capture.matchee
+ failUnlessRaises = assertRaises
+
+ def assertThat(self, matchee, matcher, message='', verbose=False):
+ """Assert that matchee is matched by matcher.
+
+ :param matchee: An object to match with matcher.
+ :param matcher: An object meeting the testtools.Matcher protocol.
+ :raises MismatchError: When matcher does not match thing.
+ """
+ matcher = Annotate.if_message(message, matcher)
+ mismatch = matcher.match(matchee)
+ if not mismatch:
+ return
+ existing_details = self.getDetails()
+ for (name, content) in mismatch.get_details().items():
+ self.addDetailUniqueName(name, content)
+ raise MismatchError(matchee, matcher, mismatch, verbose)
+
+ def addDetailUniqueName(self, name, content_object):
+ """Add a detail to the test, but ensure it's name is unique.
+
+ This method checks whether ``name`` conflicts with a detail that has
+ already been added to the test. If it does, it will modify ``name`` to
+ avoid the conflict.
+
+ For more details see pydoc testtools.TestResult.
+
+ :param name: The name to give this detail.
+ :param content_object: The content object for this detail. See
+ testtools.content for more detail.
+ """
+ existing_details = self.getDetails()
+ full_name = name
+ suffix = 1
+ while full_name in existing_details:
+ full_name = "%s-%d" % (name, suffix)
+ suffix += 1
+ self.addDetail(full_name, content_object)
+
+ def defaultTestResult(self):
+ return TestResult()
+
+ def expectFailure(self, reason, predicate, *args, **kwargs):
+ """Check that a test fails in a particular way.
+
+ If the test fails in the expected way, a KnownFailure is caused. If it
+ succeeds an UnexpectedSuccess is caused.
+
+ The expected use of expectFailure is as a barrier at the point in a
+ test where the test would fail. For example:
+ >>> def test_foo(self):
+ >>> self.expectFailure("1 should be 0", self.assertNotEqual, 1, 0)
+ >>> self.assertEqual(1, 0)
+
+ If in the future 1 were to equal 0, the expectFailure call can simply
+ be removed. This separation preserves the original intent of the test
+ while it is in the expectFailure mode.
+ """
+ # TODO: implement with matchers.
+ self._add_reason(reason)
+ try:
+ predicate(*args, **kwargs)
+ except self.failureException:
+ # GZ 2010-08-12: Don't know how to avoid exc_info cycle as the new
+ # unittest _ExpectedFailure wants old traceback
+ exc_info = sys.exc_info()
+ try:
+ self._report_traceback(exc_info)
+ raise _ExpectedFailure(exc_info)
+ finally:
+ del exc_info
+ else:
+ raise _UnexpectedSuccess(reason)
+
+ def getUniqueInteger(self):
+ """Get an integer unique to this test.
+
+ Returns an integer that is guaranteed to be unique to this instance.
+ Use this when you need an arbitrary integer in your test, or as a
+ helper for custom anonymous factory methods.
+ """
+ return advance_iterator(self._unique_id_gen)
+
+ def getUniqueString(self, prefix=None):
+ """Get a string unique to this test.
+
+ Returns a string that is guaranteed to be unique to this instance. Use
+ this when you need an arbitrary string in your test, or as a helper
+ for custom anonymous factory methods.
+
+ :param prefix: The prefix of the string. If not provided, defaults
+ to the id of the tests.
+ :return: A bytestring of '<prefix>-<unique_int>'.
+ """
+ if prefix is None:
+ prefix = self.id()
+ return '%s-%d' % (prefix, self.getUniqueInteger())
+
+ def onException(self, exc_info, tb_label='traceback'):
+ """Called when an exception propogates from test code.
+
+ :seealso addOnException:
+ """
+ if exc_info[0] not in [
+ TestSkipped, _UnexpectedSuccess, _ExpectedFailure]:
+ self._report_traceback(exc_info, tb_label=tb_label)
+ for handler in self.__exception_handlers:
+ handler(exc_info)
+
+ @staticmethod
+ def _report_error(self, result, err):
+ result.addError(self, details=self.getDetails())
+
+ @staticmethod
+ def _report_expected_failure(self, result, err):
+ result.addExpectedFailure(self, details=self.getDetails())
+
+ @staticmethod
+ def _report_failure(self, result, err):
+ result.addFailure(self, details=self.getDetails())
+
+ @staticmethod
+ def _report_skip(self, result, err):
+ if err.args:
+ reason = err.args[0]
+ else:
+ reason = "no reason given."
+ self._add_reason(reason)
+ result.addSkip(self, details=self.getDetails())
+
+ def _report_traceback(self, exc_info, tb_label='traceback'):
+ id_gen = self._traceback_id_gens.setdefault(
+ tb_label, itertools.count(0))
+ while True:
+ tb_id = advance_iterator(id_gen)
+ if tb_id:
+ tb_label = '%s-%d' % (tb_label, tb_id)
+ if tb_label not in self.getDetails():
+ break
+ self.addDetail(tb_label, content.TracebackContent(exc_info, self))
+
+ @staticmethod
+ def _report_unexpected_success(self, result, err):
+ result.addUnexpectedSuccess(self, details=self.getDetails())
+
+ def run(self, result=None):
+ return self.__RunTest(self, self.exception_handlers).run(result)
+
+ def _run_setup(self, result):
+ """Run the setUp function for this test.
+
+ :param result: A testtools.TestResult to report activity to.
+ :raises ValueError: If the base class setUp is not called, a
+ ValueError is raised.
+ """
+ ret = self.setUp()
+ if not self.__setup_called:
+ raise ValueError(
+ "In File: %s\n"
+ "TestCase.setUp was not called. Have you upcalled all the "
+ "way up the hierarchy from your setUp? e.g. Call "
+ "super(%s, self).setUp() from your setUp()."
+ % (sys.modules[self.__class__.__module__].__file__,
+ self.__class__.__name__))
+ return ret
+
+ def _run_teardown(self, result):
+ """Run the tearDown function for this test.
+
+ :param result: A testtools.TestResult to report activity to.
+ :raises ValueError: If the base class tearDown is not called, a
+ ValueError is raised.
+ """
+ ret = self.tearDown()
+ if not self.__teardown_called:
+ raise ValueError(
+ "In File: %s\n"
+ "TestCase.tearDown was not called. Have you upcalled all the "
+ "way up the hierarchy from your tearDown? e.g. Call "
+ "super(%s, self).tearDown() from your tearDown()."
+ % (sys.modules[self.__class__.__module__].__file__,
+ self.__class__.__name__))
+ return ret
+
+ def _get_test_method(self):
+ absent_attr = object()
+ # Python 2.5+
+ method_name = getattr(self, '_testMethodName', absent_attr)
+ if method_name is absent_attr:
+ # Python 2.4
+ method_name = getattr(self, '_TestCase__testMethodName')
+ return getattr(self, method_name)
+
+ def _run_test_method(self, result):
+ """Run the test method for this test.
+
+ :param result: A testtools.TestResult to report activity to.
+ :return: None.
+ """
+ return self._get_test_method()()
+
+ def useFixture(self, fixture):
+ """Use fixture in a test case.
+
+ The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called.
+
+ :param fixture: The fixture to use.
+ :return: The fixture, after setting it up and scheduling a cleanup for
+ it.
+ """
+ try:
+ fixture.setUp()
+ except:
+ gather_details(fixture.getDetails(), self.getDetails())
+ raise
+ else:
+ self.addCleanup(fixture.cleanUp)
+ self.addCleanup(
+ gather_details, fixture.getDetails(), self.getDetails())
+ return fixture
+
+ def setUp(self):
+ super(TestCase, self).setUp()
+ self.__setup_called = True
+
+ def tearDown(self):
+ super(TestCase, self).tearDown()
+ unittest.TestCase.tearDown(self)
+ self.__teardown_called = True
+
+
+class PlaceHolder(object):
+ """A placeholder test.
+
+ `PlaceHolder` implements much of the same interface as TestCase and is
+ particularly suitable for being added to TestResults.
+ """
+
+ failureException = None
+
+ def __init__(self, test_id, short_description=None, details=None,
+ outcome='addSuccess', error=None, tags=None, timestamps=(None, None)):
+ """Construct a `PlaceHolder`.
+
+ :param test_id: The id of the placeholder test.
+ :param short_description: The short description of the place holder
+ test. If not provided, the id will be used instead.
+ :param details: Outcome details as accepted by addSuccess etc.
+ :param outcome: The outcome to call. Defaults to 'addSuccess'.
+ :param tags: Tags to report for the test.
+ :param timestamps: A two-tuple of timestamps for the test start and
+ finish. Each timestamp may be None to indicate it is not known.
+ """
+ self._test_id = test_id
+ self._short_description = short_description
+ self._details = details or {}
+ self._outcome = outcome
+ if error is not None:
+ self._details['traceback'] = content.TracebackContent(error, self)
+ tags = tags or frozenset()
+ self._tags = frozenset(tags)
+ self._timestamps = timestamps
+
+ def __call__(self, result=None):
+ return self.run(result=result)
+
+ def __repr__(self):
+ internal = [self._outcome, self._test_id, self._details]
+ if self._short_description is not None:
+ internal.append(self._short_description)
+ return "<%s.%s(%s)>" % (
+ self.__class__.__module__,
+ self.__class__.__name__,
+ ", ".join(map(repr, internal)))
+
+ def __str__(self):
+ return self.id()
+
+ def countTestCases(self):
+ return 1
+
+ def debug(self):
+ pass
+
+ def id(self):
+ return self._test_id
+
+ def _result(self, result):
+ if result is None:
+ return TestResult()
+ else:
+ return ExtendedToOriginalDecorator(result)
+
+ def run(self, result=None):
+ result = self._result(result)
+ if self._timestamps[0] is not None:
+ result.time(self._timestamps[0])
+ result.tags(self._tags, set())
+ result.startTest(self)
+ if self._timestamps[1] is not None:
+ result.time(self._timestamps[1])
+ outcome = getattr(result, self._outcome)
+ outcome(self, details=self._details)
+ result.stopTest(self)
+ result.tags(set(), self._tags)
+
+ def shortDescription(self):
+ if self._short_description is None:
+ return self.id()
+ else:
+ return self._short_description
+
+
+def ErrorHolder(test_id, error, short_description=None, details=None):
+ """Construct an `ErrorHolder`.
+
+ :param test_id: The id of the test.
+ :param error: The exc info tuple that will be used as the test's error.
+ This is inserted into the details as 'traceback' - any existing key
+ will be overridden.
+ :param short_description: An optional short description of the test.
+ :param details: Outcome details as accepted by addSuccess etc.
+ """
+ return PlaceHolder(test_id, short_description=short_description,
+ details=details, outcome='addError', error=error)
+
+
+def _clone_test_id_callback(test, callback):
+ """Copy a `TestCase`, and make it call callback for its id().
+
+ This is only expected to be used on tests that have been constructed but
+ not executed.
+
+ :param test: A TestCase instance.
+ :param callback: A callable that takes no parameters and returns a string.
+ :return: A copy.copy of the test with id=callback.
+ """
+ newTest = copy.copy(test)
+ newTest.id = callback
+ return newTest
+
+
+def clone_test_with_new_id(test, new_id):
+ """Copy a `TestCase`, and give the copied test a new id.
+
+ This is only expected to be used on tests that have been constructed but
+ not executed.
+ """
+ return _clone_test_id_callback(test, lambda: new_id)
+
+
+def attr(*args):
+ """Decorator for adding attributes to WithAttributes.
+
+ :param args: The name of attributes to add.
+ :return: A callable that when applied to a WithAttributes will
+ alter its id to enumerate the added attributes.
+ """
+ def decorate(fn):
+ if not safe_hasattr(fn, '__testtools_attrs'):
+ fn.__testtools_attrs = set()
+ fn.__testtools_attrs.update(args)
+ return fn
+ return decorate
+
+
+class WithAttributes(object):
+ """A mix-in class for modifying test id by attributes.
+
+ e.g.
+ >>> class MyTest(WithAttributes, TestCase):
+ ... @attr('foo')
+ ... def test_bar(self):
+ ... pass
+ >>> MyTest('test_bar').id()
+ testtools.testcase.MyTest/test_bar[foo]
+ """
+
+ def id(self):
+ orig = super(WithAttributes, self).id()
+ # Depends on testtools.TestCase._get_test_method, be nice to support
+ # plain unittest.
+ fn = self._get_test_method()
+ attributes = getattr(fn, '__testtools_attrs', None)
+ if not attributes:
+ return orig
+ return orig + '[' + ','.join(sorted(attributes)) + ']'
+
+
+def skip(reason):
+ """A decorator to skip unit tests.
+
+ This is just syntactic sugar so users don't have to change any of their
+ unit tests in order to migrate to python 2.7, which provides the
+ @unittest.skip decorator.
+ """
+ def decorator(test_item):
+ if wraps is not None:
+ @wraps(test_item)
+ def skip_wrapper(*args, **kwargs):
+ raise TestCase.skipException(reason)
+ else:
+ def skip_wrapper(test_item):
+ test_item.skip(reason)
+ return skip_wrapper
+ return decorator
+
+
+def skipIf(condition, reason):
+ """A decorator to skip a test if the condition is true."""
+ if condition:
+ return skip(reason)
+ def _id(obj):
+ return obj
+ return _id
+
+
+def skipUnless(condition, reason):
+ """A decorator to skip a test unless the condition is true."""
+ if not condition:
+ return skip(reason)
+ def _id(obj):
+ return obj
+ return _id
+
+
+class ExpectedException:
+ """A context manager to handle expected exceptions.
+
+ In Python 2.5 or later::
+
+ def test_foo(self):
+ with ExpectedException(ValueError, 'fo.*'):
+ raise ValueError('foo')
+
+ will pass. If the raised exception has a type other than the specified
+ type, it will be re-raised. If it has a 'str()' that does not match the
+ given regular expression, an AssertionError will be raised. If no
+ exception is raised, an AssertionError will be raised.
+ """
+
+ def __init__(self, exc_type, value_re=None, msg=None):
+ """Construct an `ExpectedException`.
+
+ :param exc_type: The type of exception to expect.
+ :param value_re: A regular expression to match against the
+ 'str()' of the raised exception.
+ :param msg: An optional message explaining the failure.
+ """
+ self.exc_type = exc_type
+ self.value_re = value_re
+ self.msg = msg
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if exc_type is None:
+ error_msg = '%s not raised.' % self.exc_type.__name__
+ if self.msg:
+ error_msg = error_msg + ' : ' + self.msg
+ raise AssertionError(error_msg)
+ if exc_type != self.exc_type:
+ return False
+ if self.value_re:
+ matcher = MatchesException(self.exc_type, self.value_re)
+ if self.msg:
+ matcher = Annotate(self.msg, matcher)
+ mismatch = matcher.match((exc_type, exc_value, traceback))
+ if mismatch:
+ raise AssertionError(mismatch.describe())
+ return True
+
+
+class Nullary(object):
+ """Turn a callable into a nullary callable.
+
+ The advantage of this over ``lambda: f(*args, **kwargs)`` is that it
+ preserves the ``repr()`` of ``f``.
+ """
+
+ def __init__(self, callable_object, *args, **kwargs):
+ self._callable_object = callable_object
+ self._args = args
+ self._kwargs = kwargs
+
+ def __call__(self):
+ return self._callable_object(*self._args, **self._kwargs)
+
+ def __repr__(self):
+ return repr(self._callable_object)
+
+
+class DecorateTestCaseResult(object):
+ """Decorate a TestCase and permit customisation of the result for runs."""
+
+ def __init__(self, case, callout, before_run=None, after_run=None):
+ """Construct a DecorateTestCaseResult.
+
+ :param case: The case to decorate.
+ :param callout: A callback to call when run/__call__/debug is called.
+ Must take a result parameter and return a result object to be used.
+ For instance: lambda result: result.
+ :param before_run: If set, call this with the decorated result before
+ calling into the decorated run/__call__ method.
+ :param before_run: If set, call this with the decorated result after
+ calling into the decorated run/__call__ method.
+ """
+ self.decorated = case
+ self.callout = callout
+ self.before_run = before_run
+ self.after_run = after_run
+
+ def _run(self, result, run_method):
+ result = self.callout(result)
+ if self.before_run:
+ self.before_run(result)
+ try:
+ return run_method(result)
+ finally:
+ if self.after_run:
+ self.after_run(result)
+
+ def run(self, result=None):
+ self._run(result, self.decorated.run)
+
+ def __call__(self, result=None):
+ self._run(result, self.decorated)
+
+ def __getattr__(self, name):
+ return getattr(self.decorated, name)
+
+ def __delattr__(self, name):
+ delattr(self.decorated, name)
+
+ def __setattr__(self, name, value):
+ if name in ('decorated', 'callout', 'before_run', 'after_run'):
+ self.__dict__[name] = value
+ return
+ setattr(self.decorated, name, value)
+
+
+# Signal that this is part of the testing framework, and that code from this
+# should not normally appear in tracebacks.
+__unittest = True
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/__init__.py
new file mode 100644
index 00000000000..5bf8f9c673c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/__init__.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""Test result objects."""
+
+__all__ = [
+ 'CopyStreamResult',
+ 'ExtendedToOriginalDecorator',
+ 'ExtendedToStreamDecorator',
+ 'MultiTestResult',
+ 'StreamFailFast',
+ 'StreamResult',
+ 'StreamResultRouter',
+ 'StreamSummary',
+ 'StreamTagger',
+ 'StreamToDict',
+ 'StreamToExtendedDecorator',
+ 'StreamToQueue',
+ 'Tagger',
+ 'TestByTestResult',
+ 'TestControl',
+ 'TestResult',
+ 'TestResultDecorator',
+ 'TextTestResult',
+ 'ThreadsafeForwardingResult',
+ 'TimestampingStreamResult',
+ ]
+
+from testtools.testresult.real import (
+ CopyStreamResult,
+ ExtendedToOriginalDecorator,
+ ExtendedToStreamDecorator,
+ MultiTestResult,
+ StreamFailFast,
+ StreamResult,
+ StreamResultRouter,
+ StreamSummary,
+ StreamTagger,
+ StreamToDict,
+ StreamToExtendedDecorator,
+ StreamToQueue,
+ Tagger,
+ TestByTestResult,
+ TestControl,
+ TestResult,
+ TestResultDecorator,
+ TextTestResult,
+ ThreadsafeForwardingResult,
+ TimestampingStreamResult,
+ )
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/doubles.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/doubles.py
new file mode 100644
index 00000000000..d86f7fae2c1
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/doubles.py
@@ -0,0 +1,174 @@
+# Copyright (c) 2009-2010 testtools developers. See LICENSE for details.
+
+"""Doubles of test result objects, useful for testing unittest code."""
+
+__all__ = [
+ 'Python26TestResult',
+ 'Python27TestResult',
+ 'ExtendedTestResult',
+ 'StreamResult',
+ ]
+
+
+from testtools.tags import TagContext
+
+
+class LoggingBase(object):
+ """Basic support for logging of results."""
+
+ def __init__(self):
+ self._events = []
+ self.shouldStop = False
+ self._was_successful = True
+ self.testsRun = 0
+
+
+class Python26TestResult(LoggingBase):
+ """A precisely python 2.6 like test result, that logs."""
+
+ def addError(self, test, err):
+ self._was_successful = False
+ self._events.append(('addError', test, err))
+
+ def addFailure(self, test, err):
+ self._was_successful = False
+ self._events.append(('addFailure', test, err))
+
+ def addSuccess(self, test):
+ self._events.append(('addSuccess', test))
+
+ def startTest(self, test):
+ self._events.append(('startTest', test))
+ self.testsRun += 1
+
+ def stop(self):
+ self.shouldStop = True
+
+ def stopTest(self, test):
+ self._events.append(('stopTest', test))
+
+ def wasSuccessful(self):
+ return self._was_successful
+
+
+class Python27TestResult(Python26TestResult):
+ """A precisely python 2.7 like test result, that logs."""
+
+ def __init__(self):
+ super(Python27TestResult, self).__init__()
+ self.failfast = False
+
+ def addError(self, test, err):
+ super(Python27TestResult, self).addError(test, err)
+ if self.failfast:
+ self.stop()
+
+ def addFailure(self, test, err):
+ super(Python27TestResult, self).addFailure(test, err)
+ if self.failfast:
+ self.stop()
+
+ def addExpectedFailure(self, test, err):
+ self._events.append(('addExpectedFailure', test, err))
+
+ def addSkip(self, test, reason):
+ self._events.append(('addSkip', test, reason))
+
+ def addUnexpectedSuccess(self, test):
+ self._events.append(('addUnexpectedSuccess', test))
+ if self.failfast:
+ self.stop()
+
+ def startTestRun(self):
+ self._events.append(('startTestRun',))
+
+ def stopTestRun(self):
+ self._events.append(('stopTestRun',))
+
+
+class ExtendedTestResult(Python27TestResult):
+ """A test result like the proposed extended unittest result API."""
+
+ def __init__(self):
+ super(ExtendedTestResult, self).__init__()
+ self._tags = TagContext()
+
+ def addError(self, test, err=None, details=None):
+ self._was_successful = False
+ self._events.append(('addError', test, err or details))
+
+ def addFailure(self, test, err=None, details=None):
+ self._was_successful = False
+ self._events.append(('addFailure', test, err or details))
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._events.append(('addExpectedFailure', test, err or details))
+
+ def addSkip(self, test, reason=None, details=None):
+ self._events.append(('addSkip', test, reason or details))
+
+ def addSuccess(self, test, details=None):
+ if details:
+ self._events.append(('addSuccess', test, details))
+ else:
+ self._events.append(('addSuccess', test))
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._was_successful = False
+ if details is not None:
+ self._events.append(('addUnexpectedSuccess', test, details))
+ else:
+ self._events.append(('addUnexpectedSuccess', test))
+
+ def progress(self, offset, whence):
+ self._events.append(('progress', offset, whence))
+
+ def startTestRun(self):
+ super(ExtendedTestResult, self).startTestRun()
+ self._was_successful = True
+ self._tags = TagContext()
+
+ def startTest(self, test):
+ super(ExtendedTestResult, self).startTest(test)
+ self._tags = TagContext(self._tags)
+
+ def stopTest(self, test):
+ self._tags = self._tags.parent
+ super(ExtendedTestResult, self).stopTest(test)
+
+ @property
+ def current_tags(self):
+ return self._tags.get_current_tags()
+
+ def tags(self, new_tags, gone_tags):
+ self._tags.change_tags(new_tags, gone_tags)
+ self._events.append(('tags', new_tags, gone_tags))
+
+ def time(self, time):
+ self._events.append(('time', time))
+
+ def wasSuccessful(self):
+ return self._was_successful
+
+
+class StreamResult(object):
+ """A StreamResult implementation for testing.
+
+ All events are logged to _events.
+ """
+
+ def __init__(self):
+ self._events = []
+
+ def startTestRun(self):
+ self._events.append(('startTestRun',))
+
+ def stopTestRun(self):
+ self._events.append(('stopTestRun',))
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ self._events.append(('status', test_id, test_status, test_tags,
+ runnable, file_name, file_bytes, eof, mime_type, route_code,
+ timestamp))
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/real.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/real.py
new file mode 100644
index 00000000000..e8d70b399d7
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testresult/real.py
@@ -0,0 +1,1776 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Test results and related things."""
+
+__metaclass__ = type
+__all__ = [
+ 'ExtendedToOriginalDecorator',
+ 'ExtendedToStreamDecorator',
+ 'MultiTestResult',
+ 'StreamFailFast',
+ 'StreamResult',
+ 'StreamSummary',
+ 'StreamTagger',
+ 'StreamToDict',
+ 'StreamToExtendedDecorator',
+ 'StreamToQueue',
+ 'Tagger',
+ 'TestControl',
+ 'TestResult',
+ 'TestResultDecorator',
+ 'ThreadsafeForwardingResult',
+ 'TimestampingStreamResult',
+ ]
+
+import datetime
+from operator import methodcaller
+import sys
+import unittest
+
+from extras import safe_hasattr, try_import, try_imports
+parse_mime_type = try_import('mimeparse.parse_mime_type')
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
+from testtools.compat import all, str_is_unicode, _u, _b
+from testtools.content import (
+ Content,
+ text_content,
+ TracebackContent,
+ )
+from testtools.content_type import ContentType
+from testtools.tags import TagContext
+# circular import
+# from testtools.testcase import PlaceHolder
+PlaceHolder = None
+
+# From http://docs.python.org/library/datetime.html
+_ZERO = datetime.timedelta(0)
+
+# A UTC class.
+
+class UTC(datetime.tzinfo):
+ """UTC"""
+
+ def utcoffset(self, dt):
+ return _ZERO
+
+ def tzname(self, dt):
+ return "UTC"
+
+ def dst(self, dt):
+ return _ZERO
+
+utc = UTC()
+
+
+class TestResult(unittest.TestResult):
+ """Subclass of unittest.TestResult extending the protocol for flexability.
+
+ This test result supports an experimental protocol for providing additional
+ data to in test outcomes. All the outcome methods take an optional dict
+ 'details'. If supplied any other detail parameters like 'err' or 'reason'
+ should not be provided. The details dict is a mapping from names to
+ MIME content objects (see testtools.content). This permits attaching
+ tracebacks, log files, or even large objects like databases that were
+ part of the test fixture. Until this API is accepted into upstream
+ Python it is considered experimental: it may be replaced at any point
+ by a newer version more in line with upstream Python. Compatibility would
+ be aimed for in this case, but may not be possible.
+
+ :ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip.
+ """
+
+ def __init__(self, failfast=False):
+ # startTestRun resets all attributes, and older clients don't know to
+ # call startTestRun, so it is called once here.
+ # Because subclasses may reasonably not expect this, we call the
+ # specific version we want to run.
+ self.failfast = failfast
+ TestResult.startTestRun(self)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ """Called when a test has failed in an expected manner.
+
+ Like with addSuccess and addError, testStopped should still be called.
+
+ :param test: The test that has been skipped.
+ :param err: The exc_info of the error that was raised.
+ :return: None
+ """
+ # This is the python 2.7 implementation
+ self.expectedFailures.append(
+ (test, self._err_details_to_string(test, err, details)))
+
+ def addError(self, test, err=None, details=None):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info().
+
+ :param details: Alternative way to supply details about the outcome.
+ see the class docstring for more information.
+ """
+ self.errors.append((test,
+ self._err_details_to_string(test, err, details)))
+ if self.failfast:
+ self.stop()
+
+ def addFailure(self, test, err=None, details=None):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info().
+
+ :param details: Alternative way to supply details about the outcome.
+ see the class docstring for more information.
+ """
+ self.failures.append((test,
+ self._err_details_to_string(test, err, details)))
+ if self.failfast:
+ self.stop()
+
+ def addSkip(self, test, reason=None, details=None):
+ """Called when a test has been skipped rather than running.
+
+ Like with addSuccess and addError, testStopped should still be called.
+
+ This must be called by the TestCase. 'addError' and 'addFailure' will
+ not call addSkip, since they have no assumptions about the kind of
+ errors that a test can raise.
+
+ :param test: The test that has been skipped.
+ :param reason: The reason for the test being skipped. For instance,
+ u"pyGL is not available".
+ :param details: Alternative way to supply details about the outcome.
+ see the class docstring for more information.
+ :return: None
+ """
+ if reason is None:
+ reason = details.get('reason')
+ if reason is None:
+ reason = 'No reason given'
+ else:
+ reason = reason.as_text()
+ skip_list = self.skip_reasons.setdefault(reason, [])
+ skip_list.append(test)
+
+ def addSuccess(self, test, details=None):
+ """Called when a test succeeded."""
+
+ def addUnexpectedSuccess(self, test, details=None):
+ """Called when a test was expected to fail, but succeed."""
+ self.unexpectedSuccesses.append(test)
+ if self.failfast:
+ self.stop()
+
+ def wasSuccessful(self):
+ """Has this result been successful so far?
+
+ If there have been any errors, failures or unexpected successes,
+ return False. Otherwise, return True.
+
+ Note: This differs from standard unittest in that we consider
+ unexpected successes to be equivalent to failures, rather than
+ successes.
+ """
+ return not (self.errors or self.failures or self.unexpectedSuccesses)
+
+ def _err_details_to_string(self, test, err=None, details=None):
+ """Convert an error in exc_info form or a contents dict to a string."""
+ if err is not None:
+ return TracebackContent(err, test).as_text()
+ return _details_to_str(details, special='traceback')
+
+ def _exc_info_to_unicode(self, err, test):
+ # Deprecated. Only present because subunit upcalls to it. See
+ # <https://bugs.launchpad.net/testtools/+bug/929063>.
+ return TracebackContent(err, test).as_text()
+
+ def _now(self):
+ """Return the current 'test time'.
+
+ If the time() method has not been called, this is equivalent to
+ datetime.now(), otherwise its the last supplied datestamp given to the
+ time() method.
+ """
+ if self.__now is None:
+ return datetime.datetime.now(utc)
+ else:
+ return self.__now
+
+ def startTestRun(self):
+ """Called before a test run starts.
+
+ New in Python 2.7. The testtools version resets the result to a
+ pristine condition ready for use in another test run. Note that this
+ is different from Python 2.7's startTestRun, which does nothing.
+ """
+ # failfast is reset by the super __init__, so stash it.
+ failfast = self.failfast
+ super(TestResult, self).__init__()
+ self.skip_reasons = {}
+ self.__now = None
+ self._tags = TagContext()
+ # -- Start: As per python 2.7 --
+ self.expectedFailures = []
+ self.unexpectedSuccesses = []
+ self.failfast = failfast
+ # -- End: As per python 2.7 --
+
+ def stopTestRun(self):
+ """Called after a test run completes
+
+ New in python 2.7
+ """
+
+ def startTest(self, test):
+ super(TestResult, self).startTest(test)
+ self._tags = TagContext(self._tags)
+
+ def stopTest(self, test):
+ self._tags = self._tags.parent
+ super(TestResult, self).stopTest(test)
+
+ @property
+ def current_tags(self):
+ """The currently set tags."""
+ return self._tags.get_current_tags()
+
+ def tags(self, new_tags, gone_tags):
+ """Add and remove tags from the test.
+
+ :param new_tags: A set of tags to be added to the stream.
+ :param gone_tags: A set of tags to be removed from the stream.
+ """
+ self._tags.change_tags(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ """Provide a timestamp to represent the current time.
+
+ This is useful when test activity is time delayed, or happening
+ concurrently and getting the system time between API calls will not
+ accurately represent the duration of tests (or the whole run).
+
+ Calling time() sets the datetime used by the TestResult object.
+ Time is permitted to go backwards when using this call.
+
+ :param a_datetime: A datetime.datetime object with TZ information or
+ None to reset the TestResult to gathering time from the system.
+ """
+ self.__now = a_datetime
+
+ def done(self):
+ """Called when the test runner is done.
+
+ deprecated in favour of stopTestRun.
+ """
+
+
+class StreamResult(object):
+ """A test result for reporting the activity of a test run.
+
+ Typical use
+ -----------
+
+ >>> result = StreamResult()
+ >>> result.startTestRun()
+ >>> try:
+ ... case.run(result)
+ ... finally:
+ ... result.stopTestRun()
+
+ The case object will be either a TestCase or a TestSuite, and
+ generally make a sequence of calls like::
+
+ >>> result.status(self.id(), 'inprogress')
+ >>> result.status(self.id(), 'success')
+
+ General concepts
+ ----------------
+
+ StreamResult is built to process events that are emitted by tests during a
+ test run or test enumeration. The test run may be running concurrently, and
+ even be spread out across multiple machines.
+
+ All events are timestamped to prevent network buffering or scheduling
+ latency causing false timing reports. Timestamps are datetime objects in
+ the UTC timezone.
+
+ A route_code is a unicode string that identifies where a particular test
+ run. This is optional in the API but very useful when multiplexing multiple
+ streams together as it allows identification of interactions between tests
+ that were run on the same hardware or in the same test process. Generally
+ actual tests never need to bother with this - it is added and processed
+ by StreamResult's that do multiplexing / run analysis. route_codes are
+ also used to route stdin back to pdb instances.
+
+ The StreamResult base class does no accounting or processing, rather it
+ just provides an empty implementation of every method, suitable for use
+ as a base class regardless of intent.
+ """
+
+ def startTestRun(self):
+ """Start a test run.
+
+ This will prepare the test result to process results (which might imply
+ connecting to a database or remote machine).
+ """
+
+ def stopTestRun(self):
+ """Stop a test run.
+
+ This informs the result that no more test updates will be received. At
+ this point any test ids that have started and not completed can be
+ considered failed-or-hung.
+ """
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ """Inform the result about a test status.
+
+ :param test_id: The test whose status is being reported. None to
+ report status about the test run as a whole.
+ :param test_status: The status for the test. There are two sorts of
+ status - interim and final status events. As many interim events
+ can be generated as desired, but only one final event. After a
+ final status event any further file or status events from the
+ same test_id+route_code may be discarded or associated with a new
+ test by the StreamResult. (But no exception will be thrown).
+
+ Interim states:
+ * None - no particular status is being reported, or status being
+ reported is not associated with a test (e.g. when reporting on
+ stdout / stderr chatter).
+ * inprogress - the test is currently running. Emitted by tests when
+ they start running and at any intermediary point they might
+ choose to indicate their continual operation.
+
+ Final states:
+ * exists - the test exists. This is used when a test is not being
+ executed. Typically this is when querying what tests could be run
+ in a test run (which is useful for selecting tests to run).
+ * xfail - the test failed but that was expected. This is purely
+ informative - the test is not considered to be a failure.
+ * uxsuccess - the test passed but was expected to fail. The test
+ will be considered a failure.
+ * success - the test has finished without error.
+ * fail - the test failed (or errored). The test will be considered
+ a failure.
+ * skip - the test was selected to run but chose to be skipped. E.g.
+ a test dependency was missing. This is purely informative - the
+ test is not considered to be a failure.
+
+ :param test_tags: Optional set of tags to apply to the test. Tags
+ have no intrinsic meaning - that is up to the test author.
+ :param runnable: Allows status reports to mark that they are for
+ tests which are not able to be explicitly run. For instance,
+ subtests will report themselves as non-runnable.
+ :param file_name: The name for the file_bytes. Any unicode string may
+ be used. While there is no semantic value attached to the name
+ of any attachment, the names 'stdout' and 'stderr' and 'traceback'
+ are recommended for use only for output sent to stdout, stderr and
+ tracebacks of exceptions. When file_name is supplied, file_bytes
+ must be a bytes instance.
+ :param file_bytes: A bytes object containing content for the named
+ file. This can just be a single chunk of the file - emitting
+ another file event with more later. Must be None unleses a
+ file_name is supplied.
+ :param eof: True if this chunk is the last chunk of the file, any
+ additional chunks with the same name should be treated as an error
+ and discarded. Ignored unless file_name has been supplied.
+ :param mime_type: An optional MIME type for the file. stdout and
+ stderr will generally be "text/plain; charset=utf8". If None,
+ defaults to application/octet-stream. Ignored unless file_name
+ has been supplied.
+ """
+
+
+def domap(*args, **kwargs):
+ return list(map(*args, **kwargs))
+
+
+class CopyStreamResult(StreamResult):
+ """Copies all event it receives to multiple results.
+
+ This provides an easy facility for combining multiple StreamResults.
+
+ For TestResult the equivalent class was ``MultiTestResult``.
+ """
+
+ def __init__(self, targets):
+ super(CopyStreamResult, self).__init__()
+ self.targets = targets
+
+ def startTestRun(self):
+ super(CopyStreamResult, self).startTestRun()
+ domap(methodcaller('startTestRun'), self.targets)
+
+ def stopTestRun(self):
+ super(CopyStreamResult, self).stopTestRun()
+ domap(methodcaller('stopTestRun'), self.targets)
+
+ def status(self, *args, **kwargs):
+ super(CopyStreamResult, self).status(*args, **kwargs)
+ domap(methodcaller('status', *args, **kwargs), self.targets)
+
+
+class StreamFailFast(StreamResult):
+ """Call the supplied callback if an error is seen in a stream.
+
+ An example callback::
+
+ def do_something():
+ pass
+ """
+
+ def __init__(self, on_error):
+ self.on_error = on_error
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ if test_status in ('uxsuccess', 'fail'):
+ self.on_error()
+
+
+class StreamResultRouter(StreamResult):
+ """A StreamResult that routes events.
+
+ StreamResultRouter forwards received events to another StreamResult object,
+ selected by a dynamic forwarding policy. Events where no destination is
+ found are forwarded to the fallback StreamResult, or an error is raised.
+
+ Typical use is to construct a router with a fallback and then either
+ create up front mapping rules, or create them as-needed from the fallback
+ handler::
+
+ >>> router = StreamResultRouter()
+ >>> sink = doubles.StreamResult()
+ >>> router.add_rule(sink, 'route_code_prefix', route_prefix='0',
+ ... consume_route=True)
+ >>> router.status(test_id='foo', route_code='0/1', test_status='uxsuccess')
+
+ StreamResultRouter has no buffering.
+
+ When adding routes (and for the fallback) whether to call startTestRun and
+ stopTestRun or to not call them is controllable by passing
+ 'do_start_stop_run'. The default is to call them for the fallback only.
+ If a route is added after startTestRun has been called, and
+ do_start_stop_run is True then startTestRun is called immediately on the
+ new route sink.
+
+ There is no a-priori defined lookup order for routes: if they are ambiguous
+ the behaviour is undefined. Only a single route is chosen for any event.
+ """
+
+ _policies = {}
+
+ def __init__(self, fallback=None, do_start_stop_run=True):
+ """Construct a StreamResultRouter with optional fallback.
+
+ :param fallback: A StreamResult to forward events to when no route
+ exists for them.
+ :param do_start_stop_run: If False do not pass startTestRun and
+ stopTestRun onto the fallback.
+ """
+ self.fallback = fallback
+ self._route_code_prefixes = {}
+ self._test_ids = {}
+ # Records sinks that should have do_start_stop_run called on them.
+ self._sinks = []
+ if do_start_stop_run and fallback:
+ self._sinks.append(fallback)
+ self._in_run = False
+
+ def startTestRun(self):
+ super(StreamResultRouter, self).startTestRun()
+ for sink in self._sinks:
+ sink.startTestRun()
+ self._in_run = True
+
+ def stopTestRun(self):
+ super(StreamResultRouter, self).stopTestRun()
+ for sink in self._sinks:
+ sink.stopTestRun()
+ self._in_run = False
+
+ def status(self, **kwargs):
+ route_code = kwargs.get('route_code', None)
+ test_id = kwargs.get('test_id', None)
+ if route_code is not None:
+ prefix = route_code.split('/')[0]
+ else:
+ prefix = route_code
+ if prefix in self._route_code_prefixes:
+ target, consume_route = self._route_code_prefixes[prefix]
+ if route_code is not None and consume_route:
+ route_code = route_code[len(prefix) + 1:]
+ if not route_code:
+ route_code = None
+ kwargs['route_code'] = route_code
+ elif test_id in self._test_ids:
+ target = self._test_ids[test_id]
+ else:
+ target = self.fallback
+ target.status(**kwargs)
+
+ def add_rule(self, sink, policy, do_start_stop_run=False, **policy_args):
+ """Add a rule to route events to sink when they match a given policy.
+
+ :param sink: A StreamResult to receive events.
+ :param policy: A routing policy. Valid policies are
+ 'route_code_prefix' and 'test_id'.
+ :param do_start_stop_run: If True then startTestRun and stopTestRun
+ events will be passed onto this sink.
+
+ :raises: ValueError if the policy is unknown
+ :raises: TypeError if the policy is given arguments it cannot handle.
+
+ ``route_code_prefix`` routes events based on a prefix of the route
+ code in the event. It takes a ``route_prefix`` argument to match on
+ (e.g. '0') and a ``consume_route`` argument, which, if True, removes
+ the prefix from the ``route_code`` when forwarding events.
+
+ ``test_id`` routes events based on the test id. It takes a single
+ argument, ``test_id``. Use ``None`` to select non-test events.
+ """
+ policy_method = StreamResultRouter._policies.get(policy, None)
+ if not policy_method:
+ raise ValueError("bad policy %r" % (policy,))
+ policy_method(self, sink, **policy_args)
+ if do_start_stop_run:
+ self._sinks.append(sink)
+ if self._in_run:
+ sink.startTestRun()
+
+ def _map_route_code_prefix(self, sink, route_prefix, consume_route=False):
+ if '/' in route_prefix:
+ raise TypeError(
+ "%r is more than one route step long" % (route_prefix,))
+ self._route_code_prefixes[route_prefix] = (sink, consume_route)
+ _policies['route_code_prefix'] = _map_route_code_prefix
+
+ def _map_test_id(self, sink, test_id):
+ self._test_ids[test_id] = sink
+ _policies['test_id'] = _map_test_id
+
+
+class StreamTagger(CopyStreamResult):
+ """Adds or discards tags from StreamResult events."""
+
+ def __init__(self, targets, add=None, discard=None):
+ """Create a StreamTagger.
+
+ :param targets: A list of targets to forward events onto.
+ :param add: Either None or an iterable of tags to add to each event.
+ :param discard: Either None or an iterable of tags to discard from each
+ event.
+ """
+ super(StreamTagger, self).__init__(targets)
+ self.add = frozenset(add or ())
+ self.discard = frozenset(discard or ())
+
+ def status(self, *args, **kwargs):
+ test_tags = kwargs.get('test_tags') or set()
+ test_tags.update(self.add)
+ test_tags.difference_update(self.discard)
+ kwargs['test_tags'] = test_tags or None
+ super(StreamTagger, self).status(*args, **kwargs)
+
+
+class StreamToDict(StreamResult):
+ """A specialised StreamResult that emits a callback as tests complete.
+
+ Top level file attachments are simply discarded. Hung tests are detected
+ by stopTestRun and notified there and then.
+
+ The callback is passed a dict with the following keys:
+
+ * id: the test id.
+ * tags: The tags for the test. A set of unicode strings.
+ * details: A dict of file attachments - ``testtools.content.Content``
+ objects.
+ * status: One of the StreamResult status codes (including inprogress) or
+ 'unknown' (used if only file events for a test were received...)
+ * timestamps: A pair of timestamps - the first one received with this
+ test id, and the one in the event that triggered the notification.
+ Hung tests have a None for the second end event. Timestamps are not
+ compared - their ordering is purely order received in the stream.
+
+ Only the most recent tags observed in the stream are reported.
+ """
+
+ def __init__(self, on_test):
+ """Create a StreamToDict calling on_test on test completions.
+
+ :param on_test: A callback that accepts one parameter - a dict
+ describing a test.
+ """
+ super(StreamToDict, self).__init__()
+ self.on_test = on_test
+ if parse_mime_type is None:
+ raise ImportError("mimeparse module missing.")
+
+ def startTestRun(self):
+ super(StreamToDict, self).startTestRun()
+ self._inprogress = {}
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ super(StreamToDict, self).status(test_id, test_status,
+ test_tags=test_tags, runnable=runnable, file_name=file_name,
+ file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+ route_code=route_code, timestamp=timestamp)
+ key = self._ensure_key(test_id, route_code, timestamp)
+ # update fields
+ if not key:
+ return
+ if test_status is not None:
+ self._inprogress[key]['status'] = test_status
+ self._inprogress[key]['timestamps'][1] = timestamp
+ case = self._inprogress[key]
+ if file_name is not None:
+ if file_name not in case['details']:
+ if mime_type is None:
+ mime_type = 'application/octet-stream'
+ primary, sub, parameters = parse_mime_type(mime_type)
+ if 'charset' in parameters:
+ if ',' in parameters['charset']:
+ # testtools was emitting a bad encoding, workaround it,
+ # Though this does lose data - probably want to drop
+ # this in a few releases.
+ parameters['charset'] = parameters['charset'][
+ :parameters['charset'].find(',')]
+ content_type = ContentType(primary, sub, parameters)
+ content_bytes = []
+ case['details'][file_name] = Content(
+ content_type, lambda:content_bytes)
+ case['details'][file_name].iter_bytes().append(file_bytes)
+ if test_tags is not None:
+ self._inprogress[key]['tags'] = test_tags
+ # notify completed tests.
+ if test_status not in (None, 'inprogress'):
+ self.on_test(self._inprogress.pop(key))
+
+ def stopTestRun(self):
+ super(StreamToDict, self).stopTestRun()
+ while self._inprogress:
+ case = self._inprogress.popitem()[1]
+ case['timestamps'][1] = None
+ self.on_test(case)
+
+ def _ensure_key(self, test_id, route_code, timestamp):
+ if test_id is None:
+ return
+ key = (test_id, route_code)
+ if key not in self._inprogress:
+ self._inprogress[key] = {
+ 'id': test_id,
+ 'tags': set(),
+ 'details': {},
+ 'status': 'unknown',
+ 'timestamps': [timestamp, None]}
+ return key
+
+
+_status_map = {
+ 'inprogress': 'addFailure',
+ 'unknown': 'addFailure',
+ 'success': 'addSuccess',
+ 'skip': 'addSkip',
+ 'fail': 'addFailure',
+ 'xfail': 'addExpectedFailure',
+ 'uxsuccess': 'addUnexpectedSuccess',
+ }
+
+
+def test_dict_to_case(test_dict):
+ """Convert a test dict into a TestCase object.
+
+ :param test_dict: A test dict as generated by StreamToDict.
+ :return: A PlaceHolder test object.
+ """
+ # Circular import.
+ global PlaceHolder
+ if PlaceHolder is None:
+ from testtools.testcase import PlaceHolder
+ outcome = _status_map[test_dict['status']]
+ return PlaceHolder(test_dict['id'], outcome=outcome,
+ details=test_dict['details'], tags=test_dict['tags'],
+ timestamps=test_dict['timestamps'])
+
+
+class StreamSummary(StreamToDict):
+ """A specialised StreamResult that summarises a stream.
+
+ The summary uses the same representation as the original
+ unittest.TestResult contract, allowing it to be consumed by any test
+ runner.
+ """
+
+ def __init__(self):
+ super(StreamSummary, self).__init__(self._gather_test)
+ self._handle_status = {
+ 'success': self._success,
+ 'skip': self._skip,
+ 'exists': self._exists,
+ 'fail': self._fail,
+ 'xfail': self._xfail,
+ 'uxsuccess': self._uxsuccess,
+ 'unknown': self._incomplete,
+ 'inprogress': self._incomplete,
+ }
+
+ def startTestRun(self):
+ super(StreamSummary, self).startTestRun()
+ self.failures = []
+ self.errors = []
+ self.testsRun = 0
+ self.skipped = []
+ self.expectedFailures = []
+ self.unexpectedSuccesses = []
+
+ def wasSuccessful(self):
+ """Return False if any failure has occured.
+
+ Note that incomplete tests can only be detected when stopTestRun is
+ called, so that should be called before checking wasSuccessful.
+ """
+ return (not self.failures and not self.errors)
+
+ def _gather_test(self, test_dict):
+ if test_dict['status'] == 'exists':
+ return
+ self.testsRun += 1
+ case = test_dict_to_case(test_dict)
+ self._handle_status[test_dict['status']](case)
+
+ def _incomplete(self, case):
+ self.errors.append((case, "Test did not complete"))
+
+ def _success(self, case):
+ pass
+
+ def _skip(self, case):
+ if 'reason' not in case._details:
+ reason = "Unknown"
+ else:
+ reason = case._details['reason'].as_text()
+ self.skipped.append((case, reason))
+
+ def _exists(self, case):
+ pass
+
+ def _fail(self, case):
+ message = _details_to_str(case._details, special="traceback")
+ self.errors.append((case, message))
+
+ def _xfail(self, case):
+ message = _details_to_str(case._details, special="traceback")
+ self.expectedFailures.append((case, message))
+
+ def _uxsuccess(self, case):
+ case._outcome = 'addUnexpectedSuccess'
+ self.unexpectedSuccesses.append(case)
+
+
+class TestControl(object):
+ """Controls a running test run, allowing it to be interrupted.
+
+ :ivar shouldStop: If True, tests should not run and should instead
+ return immediately. Similarly a TestSuite should check this between
+ each test and if set stop dispatching any new tests and return.
+ """
+
+ def __init__(self):
+ super(TestControl, self).__init__()
+ self.shouldStop = False
+
+ def stop(self):
+ """Indicate that tests should stop running."""
+ self.shouldStop = True
+
+
+class MultiTestResult(TestResult):
+ """A test result that dispatches to many test results."""
+
+ def __init__(self, *results):
+ # Setup _results first, as the base class __init__ assigns to failfast.
+ self._results = list(map(ExtendedToOriginalDecorator, results))
+ super(MultiTestResult, self).__init__()
+
+ def __repr__(self):
+ return '<%s (%s)>' % (
+ self.__class__.__name__, ', '.join(map(repr, self._results)))
+
+ def _dispatch(self, message, *args, **kwargs):
+ return tuple(
+ getattr(result, message)(*args, **kwargs)
+ for result in self._results)
+
+ def _get_failfast(self):
+ return getattr(self._results[0], 'failfast', False)
+ def _set_failfast(self, value):
+ self._dispatch('__setattr__', 'failfast', value)
+ failfast = property(_get_failfast, _set_failfast)
+
+ def _get_shouldStop(self):
+ return any(self._dispatch('__getattr__', 'shouldStop'))
+ def _set_shouldStop(self, value):
+ # Called because we subclass TestResult. Probably should not do that.
+ pass
+ shouldStop = property(_get_shouldStop, _set_shouldStop)
+
+ def startTest(self, test):
+ super(MultiTestResult, self).startTest(test)
+ return self._dispatch('startTest', test)
+
+ def stop(self):
+ return self._dispatch('stop')
+
+ def stopTest(self, test):
+ super(MultiTestResult, self).stopTest(test)
+ return self._dispatch('stopTest', test)
+
+ def addError(self, test, error=None, details=None):
+ return self._dispatch('addError', test, error, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ return self._dispatch(
+ 'addExpectedFailure', test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ return self._dispatch('addFailure', test, err, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ return self._dispatch('addSkip', test, reason, details=details)
+
+ def addSuccess(self, test, details=None):
+ return self._dispatch('addSuccess', test, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ return self._dispatch('addUnexpectedSuccess', test, details=details)
+
+ def startTestRun(self):
+ super(MultiTestResult, self).startTestRun()
+ return self._dispatch('startTestRun')
+
+ def stopTestRun(self):
+ return self._dispatch('stopTestRun')
+
+ def tags(self, new_tags, gone_tags):
+ super(MultiTestResult, self).tags(new_tags, gone_tags)
+ return self._dispatch('tags', new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ return self._dispatch('time', a_datetime)
+
+ def done(self):
+ return self._dispatch('done')
+
+ def wasSuccessful(self):
+ """Was this result successful?
+
+ Only returns True if every constituent result was successful.
+ """
+ return all(self._dispatch('wasSuccessful'))
+
+
+class TextTestResult(TestResult):
+ """A TestResult which outputs activity to a text stream."""
+
+ def __init__(self, stream, failfast=False):
+ """Construct a TextTestResult writing to stream."""
+ super(TextTestResult, self).__init__(failfast=failfast)
+ self.stream = stream
+ self.sep1 = '=' * 70 + '\n'
+ self.sep2 = '-' * 70 + '\n'
+
+ def _delta_to_float(self, a_timedelta):
+ return (a_timedelta.days * 86400.0 + a_timedelta.seconds +
+ a_timedelta.microseconds / 1000000.0)
+
+ def _show_list(self, label, error_list):
+ for test, output in error_list:
+ self.stream.write(self.sep1)
+ self.stream.write("%s: %s\n" % (label, test.id()))
+ self.stream.write(self.sep2)
+ self.stream.write(output)
+
+ def startTestRun(self):
+ super(TextTestResult, self).startTestRun()
+ self.__start = self._now()
+ self.stream.write("Tests running...\n")
+
+ def stopTestRun(self):
+ if self.testsRun != 1:
+ plural = 's'
+ else:
+ plural = ''
+ stop = self._now()
+ self._show_list('ERROR', self.errors)
+ self._show_list('FAIL', self.failures)
+ for test in self.unexpectedSuccesses:
+ self.stream.write(
+ "%sUNEXPECTED SUCCESS: %s\n%s" % (
+ self.sep1, test.id(), self.sep2))
+ self.stream.write("\nRan %d test%s in %.3fs\n" %
+ (self.testsRun, plural,
+ self._delta_to_float(stop - self.__start)))
+ if self.wasSuccessful():
+ self.stream.write("OK\n")
+ else:
+ self.stream.write("FAILED (")
+ details = []
+ details.append("failures=%d" % (
+ sum(map(len, (
+ self.failures, self.errors, self.unexpectedSuccesses)))))
+ self.stream.write(", ".join(details))
+ self.stream.write(")\n")
+ super(TextTestResult, self).stopTestRun()
+
+
+class ThreadsafeForwardingResult(TestResult):
+ """A TestResult which ensures the target does not receive mixed up calls.
+
+ Multiple ``ThreadsafeForwardingResults`` can forward to the same target
+ result, and that target result will only ever receive the complete set of
+ events for one test at a time.
+
+ This is enforced using a semaphore, which further guarantees that tests
+ will be sent atomically even if the ``ThreadsafeForwardingResults`` are in
+ different threads.
+
+ ``ThreadsafeForwardingResult`` is typically used by
+ ``ConcurrentTestSuite``, which creates one ``ThreadsafeForwardingResult``
+ per thread, each of which wraps of the TestResult that
+ ``ConcurrentTestSuite.run()`` is called with.
+
+ target.startTestRun() and target.stopTestRun() are called once for each
+ ThreadsafeForwardingResult that forwards to the same target. If the target
+ takes special action on these events, it should take care to accommodate
+ this.
+
+ time() and tags() calls are batched to be adjacent to the test result and
+ in the case of tags() are coerced into test-local scope, avoiding the
+ opportunity for bugs around global state in the target.
+ """
+
+ def __init__(self, target, semaphore):
+ """Create a ThreadsafeForwardingResult forwarding to target.
+
+ :param target: A ``TestResult``.
+ :param semaphore: A ``threading.Semaphore`` with limit 1.
+ """
+ TestResult.__init__(self)
+ self.result = ExtendedToOriginalDecorator(target)
+ self.semaphore = semaphore
+ self._test_start = None
+ self._global_tags = set(), set()
+ self._test_tags = set(), set()
+
+ def __repr__(self):
+ return '<%s %r>' % (self.__class__.__name__, self.result)
+
+ def _any_tags(self, tags):
+ return bool(tags[0] or tags[1])
+
+ def _add_result_with_semaphore(self, method, test, *args, **kwargs):
+ now = self._now()
+ self.semaphore.acquire()
+ try:
+ self.result.time(self._test_start)
+ self.result.startTest(test)
+ self.result.time(now)
+ if self._any_tags(self._global_tags):
+ self.result.tags(*self._global_tags)
+ if self._any_tags(self._test_tags):
+ self.result.tags(*self._test_tags)
+ self._test_tags = set(), set()
+ try:
+ method(test, *args, **kwargs)
+ finally:
+ self.result.stopTest(test)
+ finally:
+ self.semaphore.release()
+ self._test_start = None
+
+ def addError(self, test, err=None, details=None):
+ self._add_result_with_semaphore(self.result.addError,
+ test, err, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._add_result_with_semaphore(self.result.addExpectedFailure,
+ test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ self._add_result_with_semaphore(self.result.addFailure,
+ test, err, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ self._add_result_with_semaphore(self.result.addSkip,
+ test, reason, details=details)
+
+ def addSuccess(self, test, details=None):
+ self._add_result_with_semaphore(self.result.addSuccess,
+ test, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._add_result_with_semaphore(self.result.addUnexpectedSuccess,
+ test, details=details)
+
+ def progress(self, offset, whence):
+ pass
+
+ def startTestRun(self):
+ super(ThreadsafeForwardingResult, self).startTestRun()
+ self.semaphore.acquire()
+ try:
+ self.result.startTestRun()
+ finally:
+ self.semaphore.release()
+
+ def _get_shouldStop(self):
+ self.semaphore.acquire()
+ try:
+ return self.result.shouldStop
+ finally:
+ self.semaphore.release()
+ def _set_shouldStop(self, value):
+ # Another case where we should not subclass TestResult
+ pass
+ shouldStop = property(_get_shouldStop, _set_shouldStop)
+
+ def stop(self):
+ self.semaphore.acquire()
+ try:
+ self.result.stop()
+ finally:
+ self.semaphore.release()
+
+ def stopTestRun(self):
+ self.semaphore.acquire()
+ try:
+ self.result.stopTestRun()
+ finally:
+ self.semaphore.release()
+
+ def done(self):
+ self.semaphore.acquire()
+ try:
+ self.result.done()
+ finally:
+ self.semaphore.release()
+
+ def startTest(self, test):
+ self._test_start = self._now()
+ super(ThreadsafeForwardingResult, self).startTest(test)
+
+ def wasSuccessful(self):
+ return self.result.wasSuccessful()
+
+ def tags(self, new_tags, gone_tags):
+ """See `TestResult`."""
+ super(ThreadsafeForwardingResult, self).tags(new_tags, gone_tags)
+ if self._test_start is not None:
+ self._test_tags = _merge_tags(
+ self._test_tags, (new_tags, gone_tags))
+ else:
+ self._global_tags = _merge_tags(
+ self._global_tags, (new_tags, gone_tags))
+
+
+def _merge_tags(existing, changed):
+ new_tags, gone_tags = changed
+ result_new = set(existing[0])
+ result_gone = set(existing[1])
+ result_new.update(new_tags)
+ result_new.difference_update(gone_tags)
+ result_gone.update(gone_tags)
+ result_gone.difference_update(new_tags)
+ return result_new, result_gone
+
+
+class ExtendedToOriginalDecorator(object):
+ """Permit new TestResult API code to degrade gracefully with old results.
+
+ This decorates an existing TestResult and converts missing outcomes
+ such as addSkip to older outcomes such as addSuccess. It also supports
+ the extended details protocol. In all cases the most recent protocol
+ is attempted first, and fallbacks only occur when the decorated result
+ does not support the newer style of calling.
+ """
+
+ def __init__(self, decorated):
+ self.decorated = decorated
+ self._tags = TagContext()
+ # Only used for old TestResults that do not have failfast.
+ self._failfast = False
+
+ def __repr__(self):
+ return '<%s %r>' % (self.__class__.__name__, self.decorated)
+
+ def __getattr__(self, name):
+ return getattr(self.decorated, name)
+
+ def addError(self, test, err=None, details=None):
+ try:
+ self._check_args(err, details)
+ if details is not None:
+ try:
+ return self.decorated.addError(test, details=details)
+ except TypeError:
+ # have to convert
+ err = self._details_to_exc_info(details)
+ return self.decorated.addError(test, err)
+ finally:
+ if self.failfast:
+ self.stop()
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._check_args(err, details)
+ addExpectedFailure = getattr(
+ self.decorated, 'addExpectedFailure', None)
+ if addExpectedFailure is None:
+ return self.addSuccess(test)
+ if details is not None:
+ try:
+ return addExpectedFailure(test, details=details)
+ except TypeError:
+ # have to convert
+ err = self._details_to_exc_info(details)
+ return addExpectedFailure(test, err)
+
+ def addFailure(self, test, err=None, details=None):
+ try:
+ self._check_args(err, details)
+ if details is not None:
+ try:
+ return self.decorated.addFailure(test, details=details)
+ except TypeError:
+ # have to convert
+ err = self._details_to_exc_info(details)
+ return self.decorated.addFailure(test, err)
+ finally:
+ if self.failfast:
+ self.stop()
+
+ def addSkip(self, test, reason=None, details=None):
+ self._check_args(reason, details)
+ addSkip = getattr(self.decorated, 'addSkip', None)
+ if addSkip is None:
+ return self.decorated.addSuccess(test)
+ if details is not None:
+ try:
+ return addSkip(test, details=details)
+ except TypeError:
+ # extract the reason if it's available
+ try:
+ reason = details['reason'].as_text()
+ except KeyError:
+ reason = _details_to_str(details)
+ return addSkip(test, reason)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ try:
+ outcome = getattr(self.decorated, 'addUnexpectedSuccess', None)
+ if outcome is None:
+ try:
+ test.fail("")
+ except test.failureException:
+ return self.addFailure(test, sys.exc_info())
+ if details is not None:
+ try:
+ return outcome(test, details=details)
+ except TypeError:
+ pass
+ return outcome(test)
+ finally:
+ if self.failfast:
+ self.stop()
+
+ def addSuccess(self, test, details=None):
+ if details is not None:
+ try:
+ return self.decorated.addSuccess(test, details=details)
+ except TypeError:
+ pass
+ return self.decorated.addSuccess(test)
+
+ def _check_args(self, err, details):
+ param_count = 0
+ if err is not None:
+ param_count += 1
+ if details is not None:
+ param_count += 1
+ if param_count != 1:
+ raise ValueError("Must pass only one of err '%s' and details '%s"
+ % (err, details))
+
+ def _details_to_exc_info(self, details):
+ """Convert a details dict to an exc_info tuple."""
+ return (
+ _StringException,
+ _StringException(_details_to_str(details, special='traceback')),
+ None)
+
+ @property
+ def current_tags(self):
+ return getattr(
+ self.decorated, 'current_tags', self._tags.get_current_tags())
+
+ def done(self):
+ try:
+ return self.decorated.done()
+ except AttributeError:
+ return
+
+ def _get_failfast(self):
+ return getattr(self.decorated, 'failfast', self._failfast)
+ def _set_failfast(self, value):
+ if safe_hasattr(self.decorated, 'failfast'):
+ self.decorated.failfast = value
+ else:
+ self._failfast = value
+ failfast = property(_get_failfast, _set_failfast)
+
+ def progress(self, offset, whence):
+ method = getattr(self.decorated, 'progress', None)
+ if method is None:
+ return
+ return method(offset, whence)
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def startTest(self, test):
+ self._tags = TagContext(self._tags)
+ return self.decorated.startTest(test)
+
+ def startTestRun(self):
+ self._tags = TagContext()
+ try:
+ return self.decorated.startTestRun()
+ except AttributeError:
+ return
+
+ def stop(self):
+ return self.decorated.stop()
+
+ def stopTest(self, test):
+ self._tags = self._tags.parent
+ return self.decorated.stopTest(test)
+
+ def stopTestRun(self):
+ try:
+ return self.decorated.stopTestRun()
+ except AttributeError:
+ return
+
+ def tags(self, new_tags, gone_tags):
+ method = getattr(self.decorated, 'tags', None)
+ if method is not None:
+ return method(new_tags, gone_tags)
+ else:
+ self._tags.change_tags(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ method = getattr(self.decorated, 'time', None)
+ if method is None:
+ return
+ return method(a_datetime)
+
+ def wasSuccessful(self):
+ return self.decorated.wasSuccessful()
+
+
+class ExtendedToStreamDecorator(CopyStreamResult, StreamSummary, TestControl):
+ """Permit using old TestResult API code with new StreamResult objects.
+
+ This decorates a StreamResult and converts old (Python 2.6 / 2.7 /
+ Extended) TestResult API calls into StreamResult calls.
+
+ It also supports regular StreamResult calls, making it safe to wrap around
+ any StreamResult.
+ """
+
+ def __init__(self, decorated):
+ super(ExtendedToStreamDecorator, self).__init__([decorated])
+ # Deal with mismatched base class constructors.
+ TestControl.__init__(self)
+ self._started = False
+
+ def _get_failfast(self):
+ return len(self.targets) == 2
+ def _set_failfast(self, value):
+ if value:
+ if len(self.targets) == 2:
+ return
+ self.targets.append(StreamFailFast(self.stop))
+ else:
+ del self.targets[1:]
+ failfast = property(_get_failfast, _set_failfast)
+
+ def startTest(self, test):
+ if not self._started:
+ self.startTestRun()
+ self.status(test_id=test.id(), test_status='inprogress', timestamp=self._now())
+ self._tags = TagContext(self._tags)
+
+ def stopTest(self, test):
+ self._tags = self._tags.parent
+
+ def addError(self, test, err=None, details=None):
+ self._check_args(err, details)
+ self._convert(test, err, details, 'fail')
+ addFailure = addError
+
+ def _convert(self, test, err, details, status, reason=None):
+ if not self._started:
+ self.startTestRun()
+ test_id = test.id()
+ now = self._now()
+ if err is not None:
+ if details is None:
+ details = {}
+ details['traceback'] = TracebackContent(err, test)
+ if details is not None:
+ for name, content in details.items():
+ mime_type = repr(content.content_type)
+ for file_bytes in content.iter_bytes():
+ self.status(file_name=name, file_bytes=file_bytes,
+ mime_type=mime_type, test_id=test_id, timestamp=now)
+ self.status(file_name=name, file_bytes=_b(""), eof=True,
+ mime_type=mime_type, test_id=test_id, timestamp=now)
+ if reason is not None:
+ self.status(file_name='reason', file_bytes=reason.encode('utf8'),
+ eof=True, mime_type="text/plain; charset=utf8",
+ test_id=test_id, timestamp=now)
+ self.status(test_id=test_id, test_status=status,
+ test_tags=self.current_tags, timestamp=now)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._check_args(err, details)
+ self._convert(test, err, details, 'xfail')
+
+ def addSkip(self, test, reason=None, details=None):
+ self._convert(test, None, details, 'skip', reason)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._convert(test, None, details, 'uxsuccess')
+
+ def addSuccess(self, test, details=None):
+ self._convert(test, None, details, 'success')
+
+ def _check_args(self, err, details):
+ param_count = 0
+ if err is not None:
+ param_count += 1
+ if details is not None:
+ param_count += 1
+ if param_count != 1:
+ raise ValueError("Must pass only one of err '%s' and details '%s"
+ % (err, details))
+
+ def startTestRun(self):
+ super(ExtendedToStreamDecorator, self).startTestRun()
+ self._tags = TagContext()
+ self.shouldStop = False
+ self.__now = None
+ self._started = True
+
+ def stopTest(self, test):
+ self._tags = self._tags.parent
+
+ @property
+ def current_tags(self):
+ """The currently set tags."""
+ return self._tags.get_current_tags()
+
+ def tags(self, new_tags, gone_tags):
+ """Add and remove tags from the test.
+
+ :param new_tags: A set of tags to be added to the stream.
+ :param gone_tags: A set of tags to be removed from the stream.
+ """
+ self._tags.change_tags(new_tags, gone_tags)
+
+ def _now(self):
+ """Return the current 'test time'.
+
+ If the time() method has not been called, this is equivalent to
+ datetime.now(), otherwise its the last supplied datestamp given to the
+ time() method.
+ """
+ if self.__now is None:
+ return datetime.datetime.now(utc)
+ else:
+ return self.__now
+
+ def time(self, a_datetime):
+ self.__now = a_datetime
+
+ def wasSuccessful(self):
+ if not self._started:
+ self.startTestRun()
+ return super(ExtendedToStreamDecorator, self).wasSuccessful()
+
+
+class StreamToExtendedDecorator(StreamResult):
+ """Convert StreamResult API calls into ExtendedTestResult calls.
+
+ This will buffer all calls for all concurrently active tests, and
+ then flush each test as they complete.
+
+ Incomplete tests will be flushed as errors when the test run stops.
+
+ Non test file attachments are accumulated into a test called
+ 'testtools.extradata' flushed at the end of the run.
+ """
+
+ def __init__(self, decorated):
+ # ExtendedToOriginalDecorator takes care of thunking details back to
+ # exceptions/reasons etc.
+ self.decorated = ExtendedToOriginalDecorator(decorated)
+ # StreamToDict buffers and gives us individual tests.
+ self.hook = StreamToDict(self._handle_tests)
+
+ def status(self, test_id=None, test_status=None, *args, **kwargs):
+ if test_status == 'exists':
+ return
+ self.hook.status(
+ test_id=test_id, test_status=test_status, *args, **kwargs)
+
+ def startTestRun(self):
+ self.decorated.startTestRun()
+ self.hook.startTestRun()
+
+ def stopTestRun(self):
+ self.hook.stopTestRun()
+ self.decorated.stopTestRun()
+
+ def _handle_tests(self, test_dict):
+ case = test_dict_to_case(test_dict)
+ case.run(self.decorated)
+
+
+class StreamToQueue(StreamResult):
+ """A StreamResult which enqueues events as a dict to a queue.Queue.
+
+ Events have their route code updated to include the route code
+ StreamToQueue was constructed with before they are submitted. If the event
+ route code is None, it is replaced with the StreamToQueue route code,
+ otherwise it is prefixed with the supplied code + a hyphen.
+
+ startTestRun and stopTestRun are forwarded to the queue. Implementors that
+ dequeue events back into StreamResult calls should take care not to call
+ startTestRun / stopTestRun on other StreamResult objects multiple times
+ (e.g. by filtering startTestRun and stopTestRun).
+
+ ``StreamToQueue`` is typically used by
+ ``ConcurrentStreamTestSuite``, which creates one ``StreamToQueue``
+ per thread, forwards status events to the the StreamResult that
+ ``ConcurrentStreamTestSuite.run()`` was called with, and uses the
+ stopTestRun event to trigger calling join() on the each thread.
+
+ Unlike ThreadsafeForwardingResult which this supercedes, no buffering takes
+ place - any event supplied to a StreamToQueue will be inserted into the
+ queue immediately.
+
+ Events are forwarded as a dict with a key ``event`` which is one of
+ ``startTestRun``, ``stopTestRun`` or ``status``. When ``event`` is
+ ``status`` the dict also has keys matching the keyword arguments
+ of ``StreamResult.status``, otherwise it has one other key ``result`` which
+ is the result that invoked ``startTestRun``.
+ """
+
+ def __init__(self, queue, routing_code):
+ """Create a StreamToQueue forwarding to target.
+
+ :param queue: A ``queue.Queue`` to receive events.
+ :param routing_code: The routing code to apply to messages.
+ """
+ super(StreamToQueue, self).__init__()
+ self.queue = queue
+ self.routing_code = routing_code
+
+ def startTestRun(self):
+ self.queue.put(dict(event='startTestRun', result=self))
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ self.queue.put(dict(event='status', test_id=test_id,
+ test_status=test_status, test_tags=test_tags, runnable=runnable,
+ file_name=file_name, file_bytes=file_bytes, eof=eof,
+ mime_type=mime_type, route_code=self.route_code(route_code),
+ timestamp=timestamp))
+
+ def stopTestRun(self):
+ self.queue.put(dict(event='stopTestRun', result=self))
+
+ def route_code(self, route_code):
+ """Adjust route_code on the way through."""
+ if route_code is None:
+ return self.routing_code
+ return self.routing_code + _u("/") + route_code
+
+
+class TestResultDecorator(object):
+ """General pass-through decorator.
+
+ This provides a base that other TestResults can inherit from to
+ gain basic forwarding functionality.
+ """
+
+ def __init__(self, decorated):
+ """Create a TestResultDecorator forwarding to decorated."""
+ self.decorated = decorated
+
+ def startTest(self, test):
+ return self.decorated.startTest(test)
+
+ def startTestRun(self):
+ return self.decorated.startTestRun()
+
+ def stopTest(self, test):
+ return self.decorated.stopTest(test)
+
+ def stopTestRun(self):
+ return self.decorated.stopTestRun()
+
+ def addError(self, test, err=None, details=None):
+ return self.decorated.addError(test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ return self.decorated.addFailure(test, err, details=details)
+
+ def addSuccess(self, test, details=None):
+ return self.decorated.addSuccess(test, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ return self.decorated.addSkip(test, reason, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ return self.decorated.addExpectedFailure(test, err, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ return self.decorated.addUnexpectedSuccess(test, details=details)
+
+ def progress(self, offset, whence):
+ return self.decorated.progress(offset, whence)
+
+ def wasSuccessful(self):
+ return self.decorated.wasSuccessful()
+
+ @property
+ def current_tags(self):
+ return self.decorated.current_tags
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def stop(self):
+ return self.decorated.stop()
+
+ @property
+ def testsRun(self):
+ return self.decorated.testsRun
+
+ def tags(self, new_tags, gone_tags):
+ return self.decorated.tags(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ return self.decorated.time(a_datetime)
+
+
+class Tagger(TestResultDecorator):
+ """Tag each test individually."""
+
+ def __init__(self, decorated, new_tags, gone_tags):
+ """Wrap 'decorated' such that each test is tagged.
+
+ :param new_tags: Tags to be added for each test.
+ :param gone_tags: Tags to be removed for each test.
+ """
+ super(Tagger, self).__init__(decorated)
+ self._new_tags = set(new_tags)
+ self._gone_tags = set(gone_tags)
+
+ def startTest(self, test):
+ super(Tagger, self).startTest(test)
+ self.tags(self._new_tags, self._gone_tags)
+
+
+class TestByTestResult(TestResult):
+ """Call something every time a test completes."""
+
+ def __init__(self, on_test):
+ """Construct a ``TestByTestResult``.
+
+ :param on_test: A callable that take a test case, a status (one of
+ "success", "failure", "error", "skip", or "xfail"), a start time
+ (a ``datetime`` with timezone), a stop time, an iterable of tags,
+ and a details dict. Is called at the end of each test (i.e. on
+ ``stopTest``) with the accumulated values for that test.
+ """
+ super(TestByTestResult, self).__init__()
+ self._on_test = on_test
+
+ def startTest(self, test):
+ super(TestByTestResult, self).startTest(test)
+ self._start_time = self._now()
+ # There's no supported (i.e. tested) behaviour that relies on these
+ # being set, but it makes me more comfortable all the same. -- jml
+ self._status = None
+ self._details = None
+ self._stop_time = None
+
+ def stopTest(self, test):
+ self._stop_time = self._now()
+ tags = set(self.current_tags)
+ super(TestByTestResult, self).stopTest(test)
+ self._on_test(
+ test=test,
+ status=self._status,
+ start_time=self._start_time,
+ stop_time=self._stop_time,
+ tags=tags,
+ details=self._details)
+
+ def _err_to_details(self, test, err, details):
+ if details:
+ return details
+ return {'traceback': TracebackContent(err, test)}
+
+ def addSuccess(self, test, details=None):
+ super(TestByTestResult, self).addSuccess(test)
+ self._status = 'success'
+ self._details = details
+
+ def addFailure(self, test, err=None, details=None):
+ super(TestByTestResult, self).addFailure(test, err, details)
+ self._status = 'failure'
+ self._details = self._err_to_details(test, err, details)
+
+ def addError(self, test, err=None, details=None):
+ super(TestByTestResult, self).addError(test, err, details)
+ self._status = 'error'
+ self._details = self._err_to_details(test, err, details)
+
+ def addSkip(self, test, reason=None, details=None):
+ super(TestByTestResult, self).addSkip(test, reason, details)
+ self._status = 'skip'
+ if details is None:
+ details = {'reason': text_content(reason)}
+ elif reason:
+ # XXX: What if details already has 'reason' key?
+ details['reason'] = text_content(reason)
+ self._details = details
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ super(TestByTestResult, self).addExpectedFailure(test, err, details)
+ self._status = 'xfail'
+ self._details = self._err_to_details(test, err, details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ super(TestByTestResult, self).addUnexpectedSuccess(test, details)
+ self._status = 'success'
+ self._details = details
+
+
+class TimestampingStreamResult(CopyStreamResult):
+ """A StreamResult decorator that assigns a timestamp when none is present.
+
+ This is convenient for ensuring events are timestamped.
+ """
+
+ def __init__(self, target):
+ super(TimestampingStreamResult, self).__init__([target])
+
+ def status(self, *args, **kwargs):
+ timestamp = kwargs.pop('timestamp', None)
+ if timestamp is None:
+ timestamp = datetime.datetime.now(utc)
+ super(TimestampingStreamResult, self).status(
+ *args, timestamp=timestamp, **kwargs)
+
+
+class _StringException(Exception):
+ """An exception made from an arbitrary string."""
+
+ if not str_is_unicode:
+ def __init__(self, string):
+ if type(string) is not unicode:
+ raise TypeError("_StringException expects unicode, got %r" %
+ (string,))
+ Exception.__init__(self, string)
+
+ def __str__(self):
+ return self.args[0].encode("utf-8")
+
+ def __unicode__(self):
+ return self.args[0]
+ # For 3.0 and above the default __str__ is fine, so we don't define one.
+
+ def __hash__(self):
+ return id(self)
+
+ def __eq__(self, other):
+ try:
+ return self.args == other.args
+ except AttributeError:
+ return False
+
+
+def _format_text_attachment(name, text):
+ if '\n' in text:
+ return "%s: {{{\n%s\n}}}\n" % (name, text)
+ return "%s: {{{%s}}}" % (name, text)
+
+
+def _details_to_str(details, special=None):
+ """Convert a details dict to a string.
+
+ :param details: A dictionary mapping short names to ``Content`` objects.
+ :param special: If specified, an attachment that should have special
+ attention drawn to it. The primary attachment. Normally it's the
+ traceback that caused the test to fail.
+ :return: A formatted string that can be included in text test results.
+ """
+ empty_attachments = []
+ binary_attachments = []
+ text_attachments = []
+ special_content = None
+ # sorted is for testing, may want to remove that and use a dict
+ # subclass with defined order for items instead.
+ for key, content in sorted(details.items()):
+ if content.content_type.type != 'text':
+ binary_attachments.append((key, content.content_type))
+ continue
+ text = content.as_text().strip()
+ if not text:
+ empty_attachments.append(key)
+ continue
+ # We want the 'special' attachment to be at the bottom.
+ if key == special:
+ special_content = '%s\n' % (text,)
+ continue
+ text_attachments.append(_format_text_attachment(key, text))
+ if text_attachments and not text_attachments[-1].endswith('\n'):
+ text_attachments.append('')
+ if special_content:
+ text_attachments.append(special_content)
+ lines = []
+ if binary_attachments:
+ lines.append('Binary content:\n')
+ for name, content_type in binary_attachments:
+ lines.append(' %s (%s)\n' % (name, content_type))
+ if empty_attachments:
+ lines.append('Empty attachments:\n')
+ for name in empty_attachments:
+ lines.append(' %s\n' % (name,))
+ if (binary_attachments or empty_attachments) and text_attachments:
+ lines.append('\n')
+ lines.append('\n'.join(text_attachments))
+ return _u('').join(lines)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py
new file mode 100644
index 00000000000..db215ff12f8
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2008-2013 testtools developers. See LICENSE for details.
+
+"""Tests for testtools itself."""
+
+
+from unittest import TestSuite
+
+
+def test_suite():
+ from testtools.tests import (
+ matchers,
+ test_compat,
+ test_content,
+ test_content_type,
+ test_deferredruntest,
+ test_distutilscmd,
+ test_fixturesupport,
+ test_helpers,
+ test_monkey,
+ test_run,
+ test_runtest,
+ test_spinner,
+ test_tags,
+ test_testcase,
+ test_testresult,
+ test_testsuite,
+ )
+ modules = [
+ matchers,
+ test_compat,
+ test_content,
+ test_content_type,
+ test_deferredruntest,
+ test_distutilscmd,
+ test_fixturesupport,
+ test_helpers,
+ test_monkey,
+ test_run,
+ test_runtest,
+ test_spinner,
+ test_tags,
+ test_testcase,
+ test_testresult,
+ test_testsuite,
+ ]
+ suites = map(lambda x: x.test_suite(), modules)
+ return TestSuite(suites)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py
new file mode 100644
index 00000000000..f766da33c9f
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py
@@ -0,0 +1,108 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Helpers for tests."""
+
+__all__ = [
+ 'LoggingResult',
+ ]
+
+import sys
+
+from extras import safe_hasattr
+
+from testtools import TestResult
+from testtools.content import StackLinesContent
+from testtools import runtest
+
+
+# Importing to preserve compatibility.
+safe_hasattr
+
+# GZ 2010-08-12: Don't do this, pointlessly creates an exc_info cycle
+try:
+ raise Exception
+except Exception:
+ an_exc_info = sys.exc_info()
+
+# Deprecated: This classes attributes are somewhat non deterministic which
+# leads to hard to predict tests (because Python upstream are changing things.
+class LoggingResult(TestResult):
+ """TestResult that logs its event to a list."""
+
+ def __init__(self, log):
+ self._events = log
+ super(LoggingResult, self).__init__()
+
+ def startTest(self, test):
+ self._events.append(('startTest', test))
+ super(LoggingResult, self).startTest(test)
+
+ def stop(self):
+ self._events.append('stop')
+ super(LoggingResult, self).stop()
+
+ def stopTest(self, test):
+ self._events.append(('stopTest', test))
+ super(LoggingResult, self).stopTest(test)
+
+ def addFailure(self, test, error):
+ self._events.append(('addFailure', test, error))
+ super(LoggingResult, self).addFailure(test, error)
+
+ def addError(self, test, error):
+ self._events.append(('addError', test, error))
+ super(LoggingResult, self).addError(test, error)
+
+ def addSkip(self, test, reason):
+ self._events.append(('addSkip', test, reason))
+ super(LoggingResult, self).addSkip(test, reason)
+
+ def addSuccess(self, test):
+ self._events.append(('addSuccess', test))
+ super(LoggingResult, self).addSuccess(test)
+
+ def startTestRun(self):
+ self._events.append('startTestRun')
+ super(LoggingResult, self).startTestRun()
+
+ def stopTestRun(self):
+ self._events.append('stopTestRun')
+ super(LoggingResult, self).stopTestRun()
+
+ def done(self):
+ self._events.append('done')
+ super(LoggingResult, self).done()
+
+ def tags(self, new_tags, gone_tags):
+ self._events.append(('tags', new_tags, gone_tags))
+ super(LoggingResult, self).tags(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ self._events.append(('time', a_datetime))
+ super(LoggingResult, self).time(a_datetime)
+
+
+def is_stack_hidden():
+ return StackLinesContent.HIDE_INTERNAL_STACK
+
+
+def hide_testtools_stack(should_hide=True):
+ result = StackLinesContent.HIDE_INTERNAL_STACK
+ StackLinesContent.HIDE_INTERNAL_STACK = should_hide
+ return result
+
+
+def run_with_stack_hidden(should_hide, f, *args, **kwargs):
+ old_should_hide = hide_testtools_stack(should_hide)
+ try:
+ return f(*args, **kwargs)
+ finally:
+ hide_testtools_stack(old_should_hide)
+
+
+class FullStackRunTest(runtest.RunTest):
+
+ def _run_user(self, fn, *args, **kwargs):
+ return run_with_stack_hidden(
+ False,
+ super(FullStackRunTest, self)._run_user, fn, *args, **kwargs)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py
new file mode 100644
index 00000000000..ebab308e77c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+
+from unittest import TestSuite
+
+
+def test_suite():
+ from testtools.tests.matchers import (
+ test_basic,
+ test_datastructures,
+ test_dict,
+ test_doctest,
+ test_exception,
+ test_filesystem,
+ test_higherorder,
+ test_impl,
+ )
+ modules = [
+ test_basic,
+ test_datastructures,
+ test_dict,
+ test_doctest,
+ test_exception,
+ test_filesystem,
+ test_higherorder,
+ test_impl,
+ ]
+ suites = map(lambda x: x.test_suite(), modules)
+ return TestSuite(suites)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py
new file mode 100644
index 00000000000..3ff87278dae
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py
@@ -0,0 +1,42 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+from testtools.tests.helpers import FullStackRunTest
+
+
+class TestMatchersInterface(object):
+
+ run_tests_with = FullStackRunTest
+
+ def test_matches_match(self):
+ matcher = self.matches_matcher
+ matches = self.matches_matches
+ mismatches = self.matches_mismatches
+ for candidate in matches:
+ self.assertEqual(None, matcher.match(candidate))
+ for candidate in mismatches:
+ mismatch = matcher.match(candidate)
+ self.assertNotEqual(None, mismatch)
+ self.assertNotEqual(None, getattr(mismatch, 'describe', None))
+
+ def test__str__(self):
+ # [(expected, object to __str__)].
+ from testtools.matchers._doctest import DocTestMatches
+ examples = self.str_examples
+ for expected, matcher in examples:
+ self.assertThat(matcher, DocTestMatches(expected))
+
+ def test_describe_difference(self):
+ # [(expected, matchee, matcher), ...]
+ examples = self.describe_examples
+ for difference, matchee, matcher in examples:
+ mismatch = matcher.match(matchee)
+ self.assertEqual(difference, mismatch.describe())
+
+ def test_mismatch_details(self):
+ # The mismatch object must provide get_details, which must return a
+ # dictionary mapping names to Content objects.
+ examples = self.describe_examples
+ for difference, matchee, matcher in examples:
+ mismatch = matcher.match(matchee)
+ details = mismatch.get_details()
+ self.assertEqual(dict(details), details)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py
new file mode 100644
index 00000000000..c53bc9e9c42
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py
@@ -0,0 +1,396 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import re
+
+from testtools import TestCase
+from testtools.compat import (
+ text_repr,
+ _b,
+ _u,
+ )
+from testtools.matchers._basic import (
+ _BinaryMismatch,
+ Contains,
+ DoesNotEndWith,
+ DoesNotStartWith,
+ EndsWith,
+ Equals,
+ Is,
+ IsInstance,
+ LessThan,
+ GreaterThan,
+ HasLength,
+ MatchesRegex,
+ NotEquals,
+ SameMembers,
+ StartsWith,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class Test_BinaryMismatch(TestCase):
+ """Mismatches from binary comparisons need useful describe output"""
+
+ _long_string = "This is a longish multiline non-ascii string\n\xa7"
+ _long_b = _b(_long_string)
+ _long_u = _u(_long_string)
+
+ class CustomRepr(object):
+ def __init__(self, repr_string):
+ self._repr_string = repr_string
+ def __repr__(self):
+ return _u('<object ') + _u(self._repr_string) + _u('>')
+
+ def test_short_objects(self):
+ o1, o2 = self.CustomRepr('a'), self.CustomRepr('b')
+ mismatch = _BinaryMismatch(o1, "!~", o2)
+ self.assertEqual(mismatch.describe(), "%r !~ %r" % (o1, o2))
+
+ def test_short_mixed_strings(self):
+ b, u = _b("\xa7"), _u("\xa7")
+ mismatch = _BinaryMismatch(b, "!~", u)
+ self.assertEqual(mismatch.describe(), "%r !~ %r" % (b, u))
+
+ def test_long_bytes(self):
+ one_line_b = self._long_b.replace(_b("\n"), _b(" "))
+ mismatch = _BinaryMismatch(one_line_b, "!~", self._long_b)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(one_line_b),
+ text_repr(self._long_b, multiline=True)))
+
+ def test_long_unicode(self):
+ one_line_u = self._long_u.replace("\n", " ")
+ mismatch = _BinaryMismatch(one_line_u, "!~", self._long_u)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(one_line_u),
+ text_repr(self._long_u, multiline=True)))
+
+ def test_long_mixed_strings(self):
+ mismatch = _BinaryMismatch(self._long_b, "!~", self._long_u)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(self._long_b, multiline=True),
+ text_repr(self._long_u, multiline=True)))
+
+ def test_long_bytes_and_object(self):
+ obj = object()
+ mismatch = _BinaryMismatch(self._long_b, "!~", obj)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(self._long_b, multiline=True),
+ repr(obj)))
+
+ def test_long_unicode_and_object(self):
+ obj = object()
+ mismatch = _BinaryMismatch(self._long_u, "!~", obj)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(self._long_u, multiline=True),
+ repr(obj)))
+
+
+class TestEqualsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Equals(1)
+ matches_matches = [1]
+ matches_mismatches = [2]
+
+ str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))]
+
+ describe_examples = [("1 != 2", 2, Equals(1))]
+
+
+class TestNotEqualsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = NotEquals(1)
+ matches_matches = [2]
+ matches_mismatches = [1]
+
+ str_examples = [
+ ("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))]
+
+ describe_examples = [("1 == 1", 1, NotEquals(1))]
+
+
+class TestIsInterface(TestCase, TestMatchersInterface):
+
+ foo = object()
+ bar = object()
+
+ matches_matcher = Is(foo)
+ matches_matches = [foo]
+ matches_mismatches = [bar, 1]
+
+ str_examples = [("Is(2)", Is(2))]
+
+ describe_examples = [("1 is not 2", 2, Is(1))]
+
+
+class TestIsInstanceInterface(TestCase, TestMatchersInterface):
+
+ class Foo:pass
+
+ matches_matcher = IsInstance(Foo)
+ matches_matches = [Foo()]
+ matches_mismatches = [object(), 1, Foo]
+
+ str_examples = [
+ ("IsInstance(str)", IsInstance(str)),
+ ("IsInstance(str, int)", IsInstance(str, int)),
+ ]
+
+ describe_examples = [
+ ("'foo' is not an instance of int", 'foo', IsInstance(int)),
+ ("'foo' is not an instance of any of (int, type)", 'foo',
+ IsInstance(int, type)),
+ ]
+
+
+class TestLessThanInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = LessThan(4)
+ matches_matches = [-5, 3]
+ matches_mismatches = [4, 5, 5000]
+
+ str_examples = [
+ ("LessThan(12)", LessThan(12)),
+ ]
+
+ describe_examples = [
+ ('4 is not > 5', 5, LessThan(4)),
+ ('4 is not > 4', 4, LessThan(4)),
+ ]
+
+
+class TestGreaterThanInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = GreaterThan(4)
+ matches_matches = [5, 8]
+ matches_mismatches = [-2, 0, 4]
+
+ str_examples = [
+ ("GreaterThan(12)", GreaterThan(12)),
+ ]
+
+ describe_examples = [
+ ('5 is not < 4', 4, GreaterThan(5)),
+ ('4 is not < 4', 4, GreaterThan(4)),
+ ]
+
+
+class TestContainsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Contains('foo')
+ matches_matches = ['foo', 'afoo', 'fooa']
+ matches_mismatches = ['f', 'fo', 'oo', 'faoo', 'foao']
+
+ str_examples = [
+ ("Contains(1)", Contains(1)),
+ ("Contains('foo')", Contains('foo')),
+ ]
+
+ describe_examples = [("1 not in 2", 2, Contains(1))]
+
+
+class DoesNotStartWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_describe(self):
+ mismatch = DoesNotStartWith("fo", "bo")
+ self.assertEqual("'fo' does not start with 'bo'.", mismatch.describe())
+
+ def test_describe_non_ascii_unicode(self):
+ string = _u("A\xA7")
+ suffix = _u("B\xA7")
+ mismatch = DoesNotStartWith(string, suffix)
+ self.assertEqual("%s does not start with %s." % (
+ text_repr(string), text_repr(suffix)),
+ mismatch.describe())
+
+ def test_describe_non_ascii_bytes(self):
+ string = _b("A\xA7")
+ suffix = _b("B\xA7")
+ mismatch = DoesNotStartWith(string, suffix)
+ self.assertEqual("%r does not start with %r." % (string, suffix),
+ mismatch.describe())
+
+
+class StartsWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_str(self):
+ matcher = StartsWith("bar")
+ self.assertEqual("StartsWith('bar')", str(matcher))
+
+ def test_str_with_bytes(self):
+ b = _b("\xA7")
+ matcher = StartsWith(b)
+ self.assertEqual("StartsWith(%r)" % (b,), str(matcher))
+
+ def test_str_with_unicode(self):
+ u = _u("\xA7")
+ matcher = StartsWith(u)
+ self.assertEqual("StartsWith(%r)" % (u,), str(matcher))
+
+ def test_match(self):
+ matcher = StartsWith("bar")
+ self.assertIs(None, matcher.match("barf"))
+
+ def test_mismatch_returns_does_not_start_with(self):
+ matcher = StartsWith("bar")
+ self.assertIsInstance(matcher.match("foo"), DoesNotStartWith)
+
+ def test_mismatch_sets_matchee(self):
+ matcher = StartsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("foo", mismatch.matchee)
+
+ def test_mismatch_sets_expected(self):
+ matcher = StartsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("bar", mismatch.expected)
+
+
+class DoesNotEndWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_describe(self):
+ mismatch = DoesNotEndWith("fo", "bo")
+ self.assertEqual("'fo' does not end with 'bo'.", mismatch.describe())
+
+ def test_describe_non_ascii_unicode(self):
+ string = _u("A\xA7")
+ suffix = _u("B\xA7")
+ mismatch = DoesNotEndWith(string, suffix)
+ self.assertEqual("%s does not end with %s." % (
+ text_repr(string), text_repr(suffix)),
+ mismatch.describe())
+
+ def test_describe_non_ascii_bytes(self):
+ string = _b("A\xA7")
+ suffix = _b("B\xA7")
+ mismatch = DoesNotEndWith(string, suffix)
+ self.assertEqual("%r does not end with %r." % (string, suffix),
+ mismatch.describe())
+
+
+class EndsWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_str(self):
+ matcher = EndsWith("bar")
+ self.assertEqual("EndsWith('bar')", str(matcher))
+
+ def test_str_with_bytes(self):
+ b = _b("\xA7")
+ matcher = EndsWith(b)
+ self.assertEqual("EndsWith(%r)" % (b,), str(matcher))
+
+ def test_str_with_unicode(self):
+ u = _u("\xA7")
+ matcher = EndsWith(u)
+ self.assertEqual("EndsWith(%r)" % (u,), str(matcher))
+
+ def test_match(self):
+ matcher = EndsWith("arf")
+ self.assertIs(None, matcher.match("barf"))
+
+ def test_mismatch_returns_does_not_end_with(self):
+ matcher = EndsWith("bar")
+ self.assertIsInstance(matcher.match("foo"), DoesNotEndWith)
+
+ def test_mismatch_sets_matchee(self):
+ matcher = EndsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("foo", mismatch.matchee)
+
+ def test_mismatch_sets_expected(self):
+ matcher = EndsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("bar", mismatch.expected)
+
+
+class TestSameMembers(TestCase, TestMatchersInterface):
+
+ matches_matcher = SameMembers([1, 1, 2, 3, {'foo': 'bar'}])
+ matches_matches = [
+ [1, 1, 2, 3, {'foo': 'bar'}],
+ [3, {'foo': 'bar'}, 1, 2, 1],
+ [3, 2, 1, {'foo': 'bar'}, 1],
+ (2, {'foo': 'bar'}, 3, 1, 1),
+ ]
+ matches_mismatches = [
+ set([1, 2, 3]),
+ [1, 1, 2, 3, 5],
+ [1, 2, 3, {'foo': 'bar'}],
+ 'foo',
+ ]
+
+ describe_examples = [
+ (("elements differ:\n"
+ "reference = ['apple', 'orange', 'canteloupe', 'watermelon', 'lemon', 'banana']\n"
+ "actual = ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe']\n"
+ ": \n"
+ "missing: ['watermelon']\n"
+ "extra: ['sparrow']"
+ ),
+ ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe',],
+ SameMembers(
+ ['apple', 'orange', 'canteloupe', 'watermelon',
+ 'lemon', 'banana',])),
+ ]
+
+ str_examples = [
+ ('SameMembers([1, 2, 3])', SameMembers([1, 2, 3])),
+ ]
+
+
+class TestMatchesRegex(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesRegex('a|b')
+ matches_matches = ['a', 'b']
+ matches_mismatches = ['c']
+
+ str_examples = [
+ ("MatchesRegex('a|b')", MatchesRegex('a|b')),
+ ("MatchesRegex('a|b', re.M)", MatchesRegex('a|b', re.M)),
+ ("MatchesRegex('a|b', re.I|re.M)", MatchesRegex('a|b', re.I|re.M)),
+ ("MatchesRegex(%r)" % (_b("\xA7"),), MatchesRegex(_b("\xA7"))),
+ ("MatchesRegex(%r)" % (_u("\xA7"),), MatchesRegex(_u("\xA7"))),
+ ]
+
+ describe_examples = [
+ ("'c' does not match /a|b/", 'c', MatchesRegex('a|b')),
+ ("'c' does not match /a\d/", 'c', MatchesRegex(r'a\d')),
+ ("%r does not match /\\s+\\xa7/" % (_b('c'),),
+ _b('c'), MatchesRegex(_b("\\s+\xA7"))),
+ ("%r does not match /\\s+\\xa7/" % (_u('c'),),
+ _u('c'), MatchesRegex(_u("\\s+\xA7"))),
+ ]
+
+
+class TestHasLength(TestCase, TestMatchersInterface):
+
+ matches_matcher = HasLength(2)
+ matches_matches = [[1, 2]]
+ matches_mismatches = [[], [1], [3, 2, 1]]
+
+ str_examples = [
+ ("HasLength(2)", HasLength(2)),
+ ]
+
+ describe_examples = [
+ ("len([]) != 1", [], HasLength(1)),
+ ]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py
new file mode 100644
index 00000000000..f6d9d8658c8
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py
@@ -0,0 +1,209 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import doctest
+import re
+import sys
+
+from testtools import TestCase
+from testtools.compat import StringIO
+from testtools.matchers import (
+ Annotate,
+ Equals,
+ LessThan,
+ MatchesRegex,
+ NotEquals,
+ )
+from testtools.matchers._datastructures import (
+ ContainsAll,
+ MatchesListwise,
+ MatchesStructure,
+ MatchesSetwise,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+def run_doctest(obj, name):
+ p = doctest.DocTestParser()
+ t = p.get_doctest(
+ obj.__doc__, sys.modules[obj.__module__].__dict__, name, '', 0)
+ r = doctest.DocTestRunner()
+ output = StringIO()
+ r.run(t, out=output.write)
+ return r.failures, output.getvalue()
+
+
+class TestMatchesListwise(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_docstring(self):
+ failure_count, output = run_doctest(
+ MatchesListwise, "MatchesListwise")
+ if failure_count:
+ self.fail("Doctest failed with %s" % output)
+
+
+class TestMatchesStructure(TestCase, TestMatchersInterface):
+
+ class SimpleClass:
+ def __init__(self, x, y):
+ self.x = x
+ self.y = y
+
+ matches_matcher = MatchesStructure(x=Equals(1), y=Equals(2))
+ matches_matches = [SimpleClass(1, 2)]
+ matches_mismatches = [
+ SimpleClass(2, 2),
+ SimpleClass(1, 1),
+ SimpleClass(3, 3),
+ ]
+
+ str_examples = [
+ ("MatchesStructure(x=Equals(1))", MatchesStructure(x=Equals(1))),
+ ("MatchesStructure(y=Equals(2))", MatchesStructure(y=Equals(2))),
+ ("MatchesStructure(x=Equals(1), y=Equals(2))",
+ MatchesStructure(x=Equals(1), y=Equals(2))),
+ ]
+
+ describe_examples = [
+ ("""\
+Differences: [
+3 != 1: x
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(3), y=Equals(2))),
+ ("""\
+Differences: [
+3 != 2: y
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(1), y=Equals(3))),
+ ("""\
+Differences: [
+0 != 1: x
+0 != 2: y
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(0), y=Equals(0))),
+ ]
+
+ def test_fromExample(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure.fromExample(self.SimpleClass(1, 3), 'x'))
+
+ def test_byEquality(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure.byEquality(x=1))
+
+ def test_withStructure(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure.byMatcher(LessThan, x=2))
+
+ def test_update(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure(x=NotEquals(1)).update(x=Equals(1)))
+
+ def test_update_none(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure(x=Equals(1), z=NotEquals(42)).update(
+ z=None))
+
+
+class TestMatchesSetwise(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def assertMismatchWithDescriptionMatching(self, value, matcher,
+ description_matcher):
+ mismatch = matcher.match(value)
+ if mismatch is None:
+ self.fail("%s matched %s" % (matcher, value))
+ actual_description = mismatch.describe()
+ self.assertThat(
+ actual_description,
+ Annotate(
+ "%s matching %s" % (matcher, value),
+ description_matcher))
+
+ def test_matches(self):
+ self.assertIs(
+ None, MatchesSetwise(Equals(1), Equals(2)).match([2, 1]))
+
+ def test_mismatches(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex('.*There was 1 mismatch$', re.S))
+
+ def test_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
+ Equals('There was 1 matcher left over: Equals(1)'))
+
+ def test_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [1, 2, 3], MatchesSetwise(Equals(1), Equals(2)),
+ Equals('There was 1 value left over: [3]'))
+
+ def test_two_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
+ MatchesRegex(
+ 'There were 2 matchers left over: Equals\([12]\), '
+ 'Equals\([12]\)'))
+
+ def test_two_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [1, 2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex(
+ 'There were 2 values left over: \[[34], [34]\]'))
+
+ def test_mismatch_and_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3], MatchesSetwise(Equals(0), Equals(1), Equals(2)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 1 extra matcher: Equals\([01]\)',
+ re.S))
+
+ def test_mismatch_and_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 1 extra value: \[[34]\]',
+ re.S))
+
+ def test_mismatch_and_two_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [3, 4], MatchesSetwise(
+ Equals(0), Equals(1), Equals(2), Equals(3)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 2 extra matchers: '
+ 'Equals\([012]\), Equals\([012]\)', re.S))
+
+ def test_mismatch_and_two_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3, 4, 5], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 2 extra values: \[[145], [145]\]',
+ re.S))
+
+
+class TestContainsAllInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = ContainsAll(['foo', 'bar'])
+ matches_matches = [['foo', 'bar'], ['foo', 'z', 'bar'], ['bar', 'foo']]
+ matches_mismatches = [['f', 'g'], ['foo', 'baz'], []]
+
+ str_examples = [(
+ "MatchesAll(Contains('foo'), Contains('bar'))",
+ ContainsAll(['foo', 'bar'])),
+ ]
+
+ describe_examples = [("""Differences: [
+'baz' not in 'foo'
+]""",
+ 'foo', ContainsAll(['foo', 'baz']))]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py
new file mode 100644
index 00000000000..00368dd6ceb
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py
@@ -0,0 +1,227 @@
+from testtools import TestCase
+from testtools.matchers import (
+ Equals,
+ NotEquals,
+ Not,
+ )
+from testtools.matchers._dict import (
+ ContainedByDict,
+ ContainsDict,
+ KeysEqual,
+ MatchesAllDict,
+ MatchesDict,
+ _SubDictOf,
+ )
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class TestMatchesAllDictInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})
+ matches_matches = [3, 4]
+ matches_mismatches = [1, 2]
+
+ str_examples = [
+ ("MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})",
+ matches_matcher)]
+
+ describe_examples = [
+ ("""a: 1 == 1""", 1, matches_matcher),
+ ]
+
+
+class TestKeysEqualWithList(TestCase, TestMatchersInterface):
+
+ matches_matcher = KeysEqual('foo', 'bar')
+ matches_matches = [
+ {'foo': 0, 'bar': 1},
+ ]
+ matches_mismatches = [
+ {},
+ {'foo': 0},
+ {'bar': 1},
+ {'foo': 0, 'bar': 1, 'baz': 2},
+ {'a': None, 'b': None, 'c': None},
+ ]
+
+ str_examples = [
+ ("KeysEqual('foo', 'bar')", KeysEqual('foo', 'bar')),
+ ]
+
+ describe_examples = []
+
+ def test_description(self):
+ matchee = {'foo': 0, 'bar': 1, 'baz': 2}
+ mismatch = KeysEqual('foo', 'bar').match(matchee)
+ description = mismatch.describe()
+ self.assertThat(
+ description, Equals(
+ "['bar', 'foo'] does not match %r: Keys not equal"
+ % (matchee,)))
+
+
+class TestKeysEqualWithDict(TestKeysEqualWithList):
+
+ matches_matcher = KeysEqual({'foo': 3, 'bar': 4})
+
+
+class TestSubDictOf(TestCase, TestMatchersInterface):
+
+ matches_matcher = _SubDictOf({'foo': 'bar', 'baz': 'qux'})
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bar'},
+ ]
+
+ matches_mismatches = [
+ {'foo': 'bar', 'baz': 'qux', 'cat': 'dog'},
+ {'foo': 'bar', 'cat': 'dog'},
+ ]
+
+ str_examples = []
+ describe_examples = []
+
+
+class TestMatchesDict(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesDict(
+ {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': None},
+ {'foo': 'bar', 'baz': 'quux'},
+ ]
+ matches_mismatches = [
+ {},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux'},
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+ {'foo': 'bar', 'cat': 'dog'},
+ ]
+
+ str_examples = [
+ ("MatchesDict({'baz': %s, 'foo': %s})" % (
+ Not(Equals('qux')), Equals('bar')),
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ " 'foo': Equals('bar'),\n"
+ "}",
+ {}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ "}",
+ {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ " 'foo': 'bar' != 'bop',\n"
+ "}",
+ {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+ ("Extra: {\n"
+ " 'cat': 'dog',\n"
+ "}",
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'}, matches_matcher),
+ ("Extra: {\n"
+ " 'cat': 'dog',\n"
+ "}\n"
+ "Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ "}",
+ {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+ ]
+
+
+class TestContainsDict(TestCase, TestMatchersInterface):
+
+ matches_matcher = ContainsDict(
+ {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': None},
+ {'foo': 'bar', 'baz': 'quux'},
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+ ]
+ matches_mismatches = [
+ {},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux'},
+ {'foo': 'bar', 'cat': 'dog'},
+ {'foo': 'bar'},
+ ]
+
+ str_examples = [
+ ("ContainsDict({'baz': %s, 'foo': %s})" % (
+ Not(Equals('qux')), Equals('bar')),
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ " 'foo': Equals('bar'),\n"
+ "}",
+ {}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ "}",
+ {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ " 'foo': 'bar' != 'bop',\n"
+ "}",
+ {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+ ("Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ "}",
+ {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+ ]
+
+
+class TestContainedByDict(TestCase, TestMatchersInterface):
+
+ matches_matcher = ContainedByDict(
+ {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+ matches_matches = [
+ {},
+ {'foo': 'bar'},
+ {'foo': 'bar', 'baz': 'quux'},
+ {'baz': 'quux'},
+ ]
+ matches_mismatches = [
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux'},
+ {'foo': 'bar', 'cat': 'dog'},
+ ]
+
+ str_examples = [
+ ("ContainedByDict({'baz': %s, 'foo': %s})" % (
+ Not(Equals('qux')), Equals('bar')),
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ "}",
+ {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ " 'foo': 'bar' != 'bop',\n"
+ "}",
+ {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+ ("Extra: {\n"
+ " 'cat': 'dog',\n"
+ "}",
+ {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+ ]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py
new file mode 100644
index 00000000000..81b9579dbf0
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import doctest
+
+from testtools import TestCase
+from testtools.compat import (
+ str_is_unicode,
+ _b,
+ _u,
+ )
+from testtools.matchers._doctest import DocTestMatches
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+
+class TestDocTestMatchesInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS)
+ matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"]
+ matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"]
+
+ str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')",
+ DocTestMatches("Ran 1 test in ...s")),
+ ("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)),
+ ]
+
+ describe_examples = [('Expected:\n Ran 1 tests in ...s\nGot:\n'
+ ' Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s",
+ DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))]
+
+
+class TestDocTestMatchesInterfaceUnicode(TestCase, TestMatchersInterface):
+
+ matches_matcher = DocTestMatches(_u("\xa7..."), doctest.ELLIPSIS)
+ matches_matches = [_u("\xa7"), _u("\xa7 more\n")]
+ matches_mismatches = ["\\xa7", _u("more \xa7"), _u("\n\xa7")]
+
+ str_examples = [("DocTestMatches(%r)" % (_u("\xa7\n"),),
+ DocTestMatches(_u("\xa7"))),
+ ]
+
+ describe_examples = [(
+ _u("Expected:\n \xa7\nGot:\n a\n"),
+ "a",
+ DocTestMatches(_u("\xa7"), doctest.ELLIPSIS))]
+
+
+class TestDocTestMatchesSpecific(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test___init__simple(self):
+ matcher = DocTestMatches("foo")
+ self.assertEqual("foo\n", matcher.want)
+
+ def test___init__flags(self):
+ matcher = DocTestMatches("bar\n", doctest.ELLIPSIS)
+ self.assertEqual("bar\n", matcher.want)
+ self.assertEqual(doctest.ELLIPSIS, matcher.flags)
+
+ def test_describe_non_ascii_bytes(self):
+ """Even with bytestrings, the mismatch should be coercible to unicode
+
+ DocTestMatches is intended for text, but the Python 2 str type also
+ permits arbitrary binary inputs. This is a slightly bogus thing to do,
+ and under Python 3 using bytes objects will reasonably raise an error.
+ """
+ header = _b("\x89PNG\r\n\x1a\n...")
+ if str_is_unicode:
+ self.assertRaises(TypeError,
+ DocTestMatches, header, doctest.ELLIPSIS)
+ return
+ matcher = DocTestMatches(header, doctest.ELLIPSIS)
+ mismatch = matcher.match(_b("GIF89a\1\0\1\0\0\0\0;"))
+ # Must be treatable as unicode text, the exact output matters less
+ self.assertTrue(unicode(mismatch.describe()))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py
new file mode 100644
index 00000000000..ef7185f19a4
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py
@@ -0,0 +1,192 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import sys
+
+from testtools import TestCase
+from testtools.matchers import (
+ AfterPreprocessing,
+ Equals,
+ )
+from testtools.matchers._exception import (
+ MatchesException,
+ Raises,
+ raises,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+def make_error(type, *args, **kwargs):
+ try:
+ raise type(*args, **kwargs)
+ except type:
+ return sys.exc_info()
+
+
+class TestMatchesExceptionInstanceInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(ValueError("foo"))
+ error_foo = make_error(ValueError, 'foo')
+ error_bar = make_error(ValueError, 'bar')
+ error_base_foo = make_error(Exception, 'foo')
+ matches_matches = [error_foo]
+ matches_mismatches = [error_bar, error_base_foo]
+
+ str_examples = [
+ ("MatchesException(Exception('foo',))",
+ MatchesException(Exception('foo')))
+ ]
+ describe_examples = [
+ ("%r is not a %r" % (Exception, ValueError),
+ error_base_foo,
+ MatchesException(ValueError("foo"))),
+ ("ValueError('bar',) has different arguments to ValueError('foo',).",
+ error_bar,
+ MatchesException(ValueError("foo"))),
+ ]
+
+
+class TestMatchesExceptionTypeInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(ValueError)
+ error_foo = make_error(ValueError, 'foo')
+ error_sub = make_error(UnicodeError, 'bar')
+ error_base_foo = make_error(Exception, 'foo')
+ matches_matches = [error_foo, error_sub]
+ matches_mismatches = [error_base_foo]
+
+ str_examples = [
+ ("MatchesException(%r)" % Exception,
+ MatchesException(Exception))
+ ]
+ describe_examples = [
+ ("%r is not a %r" % (Exception, ValueError),
+ error_base_foo,
+ MatchesException(ValueError)),
+ ]
+
+
+class TestMatchesExceptionTypeReInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(ValueError, 'fo.')
+ error_foo = make_error(ValueError, 'foo')
+ error_sub = make_error(UnicodeError, 'foo')
+ error_bar = make_error(ValueError, 'bar')
+ matches_matches = [error_foo, error_sub]
+ matches_mismatches = [error_bar]
+
+ str_examples = [
+ ("MatchesException(%r)" % Exception,
+ MatchesException(Exception, 'fo.'))
+ ]
+ describe_examples = [
+ ("'bar' does not match /fo./",
+ error_bar, MatchesException(ValueError, "fo.")),
+ ]
+
+
+class TestMatchesExceptionTypeMatcherInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(
+ ValueError, AfterPreprocessing(str, Equals('foo')))
+ error_foo = make_error(ValueError, 'foo')
+ error_sub = make_error(UnicodeError, 'foo')
+ error_bar = make_error(ValueError, 'bar')
+ matches_matches = [error_foo, error_sub]
+ matches_mismatches = [error_bar]
+
+ str_examples = [
+ ("MatchesException(%r)" % Exception,
+ MatchesException(Exception, Equals('foo')))
+ ]
+ describe_examples = [
+ ("5 != %r" % (error_bar[1],),
+ error_bar, MatchesException(ValueError, Equals(5))),
+ ]
+
+
+class TestRaisesInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Raises()
+ def boom():
+ raise Exception('foo')
+ matches_matches = [boom]
+ matches_mismatches = [lambda:None]
+
+ # Tricky to get function objects to render constantly, and the interfaces
+ # helper uses assertEqual rather than (for instance) DocTestMatches.
+ str_examples = []
+
+ describe_examples = []
+
+
+class TestRaisesExceptionMatcherInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Raises(
+ exception_matcher=MatchesException(Exception('foo')))
+ def boom_bar():
+ raise Exception('bar')
+ def boom_foo():
+ raise Exception('foo')
+ matches_matches = [boom_foo]
+ matches_mismatches = [lambda:None, boom_bar]
+
+ # Tricky to get function objects to render constantly, and the interfaces
+ # helper uses assertEqual rather than (for instance) DocTestMatches.
+ str_examples = []
+
+ describe_examples = []
+
+
+class TestRaisesBaseTypes(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def raiser(self):
+ raise KeyboardInterrupt('foo')
+
+ def test_KeyboardInterrupt_matched(self):
+ # When KeyboardInterrupt is matched, it is swallowed.
+ matcher = Raises(MatchesException(KeyboardInterrupt))
+ self.assertThat(self.raiser, matcher)
+
+ def test_KeyboardInterrupt_propogates(self):
+ # The default 'it raised' propogates KeyboardInterrupt.
+ match_keyb = Raises(MatchesException(KeyboardInterrupt))
+ def raise_keyb_from_match():
+ matcher = Raises()
+ matcher.match(self.raiser)
+ self.assertThat(raise_keyb_from_match, match_keyb)
+
+ def test_KeyboardInterrupt_match_Exception_propogates(self):
+ # If the raised exception isn't matched, and it is not a subclass of
+ # Exception, it is propogated.
+ match_keyb = Raises(MatchesException(KeyboardInterrupt))
+ def raise_keyb_from_match():
+ if sys.version_info > (2, 5):
+ matcher = Raises(MatchesException(Exception))
+ else:
+ # On Python 2.4 KeyboardInterrupt is a StandardError subclass
+ # but should propogate from less generic exception matchers
+ matcher = Raises(MatchesException(EnvironmentError))
+ matcher.match(self.raiser)
+ self.assertThat(raise_keyb_from_match, match_keyb)
+
+
+class TestRaisesConvenience(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_exc_type(self):
+ self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
+
+ def test_exc_value(self):
+ e = RuntimeError("You lose!")
+ def raiser():
+ raise e
+ self.assertThat(raiser, raises(e))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py
new file mode 100644
index 00000000000..917ff2ed058
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py
@@ -0,0 +1,243 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import os
+import shutil
+import tarfile
+import tempfile
+
+from testtools import TestCase
+from testtools.matchers import (
+ Contains,
+ DocTestMatches,
+ Equals,
+ )
+from testtools.matchers._filesystem import (
+ DirContains,
+ DirExists,
+ FileContains,
+ FileExists,
+ HasPermissions,
+ PathExists,
+ SamePath,
+ TarballContains,
+ )
+
+
+class PathHelpers(object):
+
+ def mkdtemp(self):
+ directory = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, directory)
+ return directory
+
+ def create_file(self, filename, contents=''):
+ fp = open(filename, 'w')
+ try:
+ fp.write(contents)
+ finally:
+ fp.close()
+
+ def touch(self, filename):
+ return self.create_file(filename)
+
+
+class TestPathExists(TestCase, PathHelpers):
+
+ def test_exists(self):
+ tempdir = self.mkdtemp()
+ self.assertThat(tempdir, PathExists())
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = PathExists().match(doesntexist)
+ self.assertThat(
+ "%s does not exist." % doesntexist, Equals(mismatch.describe()))
+
+
+class TestDirExists(TestCase, PathHelpers):
+
+ def test_exists(self):
+ tempdir = self.mkdtemp()
+ self.assertThat(tempdir, DirExists())
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = DirExists().match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_not_a_directory(self):
+ filename = os.path.join(self.mkdtemp(), 'foo')
+ self.touch(filename)
+ mismatch = DirExists().match(filename)
+ self.assertThat(
+ "%s is not a directory." % filename, Equals(mismatch.describe()))
+
+
+class TestFileExists(TestCase, PathHelpers):
+
+ def test_exists(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'filename')
+ self.touch(filename)
+ self.assertThat(filename, FileExists())
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = FileExists().match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_not_a_file(self):
+ tempdir = self.mkdtemp()
+ mismatch = FileExists().match(tempdir)
+ self.assertThat(
+ "%s is not a file." % tempdir, Equals(mismatch.describe()))
+
+
+class TestDirContains(TestCase, PathHelpers):
+
+ def test_empty(self):
+ tempdir = self.mkdtemp()
+ self.assertThat(tempdir, DirContains([]))
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = DirContains([]).match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_contains_files(self):
+ tempdir = self.mkdtemp()
+ self.touch(os.path.join(tempdir, 'foo'))
+ self.touch(os.path.join(tempdir, 'bar'))
+ self.assertThat(tempdir, DirContains(['bar', 'foo']))
+
+ def test_matcher(self):
+ tempdir = self.mkdtemp()
+ self.touch(os.path.join(tempdir, 'foo'))
+ self.touch(os.path.join(tempdir, 'bar'))
+ self.assertThat(tempdir, DirContains(matcher=Contains('bar')))
+
+ def test_neither_specified(self):
+ self.assertRaises(AssertionError, DirContains)
+
+ def test_both_specified(self):
+ self.assertRaises(
+ AssertionError, DirContains, filenames=[], matcher=Contains('a'))
+
+ def test_does_not_contain_files(self):
+ tempdir = self.mkdtemp()
+ self.touch(os.path.join(tempdir, 'foo'))
+ mismatch = DirContains(['bar', 'foo']).match(tempdir)
+ self.assertThat(
+ Equals(['bar', 'foo']).match(['foo']).describe(),
+ Equals(mismatch.describe()))
+
+
+class TestFileContains(TestCase, PathHelpers):
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = FileContains('').match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_contains(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'foo')
+ self.create_file(filename, 'Hello World!')
+ self.assertThat(filename, FileContains('Hello World!'))
+
+ def test_matcher(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'foo')
+ self.create_file(filename, 'Hello World!')
+ self.assertThat(
+ filename, FileContains(matcher=DocTestMatches('Hello World!')))
+
+ def test_neither_specified(self):
+ self.assertRaises(AssertionError, FileContains)
+
+ def test_both_specified(self):
+ self.assertRaises(
+ AssertionError, FileContains, contents=[], matcher=Contains('a'))
+
+ def test_does_not_contain(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'foo')
+ self.create_file(filename, 'Goodbye Cruel World!')
+ mismatch = FileContains('Hello World!').match(filename)
+ self.assertThat(
+ Equals('Hello World!').match('Goodbye Cruel World!').describe(),
+ Equals(mismatch.describe()))
+class TestTarballContains(TestCase, PathHelpers):
+
+ def test_match(self):
+ tempdir = self.mkdtemp()
+ in_temp_dir = lambda x: os.path.join(tempdir, x)
+ self.touch(in_temp_dir('a'))
+ self.touch(in_temp_dir('b'))
+ tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
+ tarball.add(in_temp_dir('a'), 'a')
+ tarball.add(in_temp_dir('b'), 'b')
+ tarball.close()
+ self.assertThat(
+ in_temp_dir('foo.tar.gz'), TarballContains(['b', 'a']))
+
+ def test_mismatch(self):
+ tempdir = self.mkdtemp()
+ in_temp_dir = lambda x: os.path.join(tempdir, x)
+ self.touch(in_temp_dir('a'))
+ self.touch(in_temp_dir('b'))
+ tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
+ tarball.add(in_temp_dir('a'), 'a')
+ tarball.add(in_temp_dir('b'), 'b')
+ tarball.close()
+ mismatch = TarballContains(['d', 'c']).match(in_temp_dir('foo.tar.gz'))
+ self.assertEqual(
+ mismatch.describe(),
+ Equals(['c', 'd']).match(['a', 'b']).describe())
+
+
+class TestSamePath(TestCase, PathHelpers):
+
+ def test_same_string(self):
+ self.assertThat('foo', SamePath('foo'))
+
+ def test_relative_and_absolute(self):
+ path = 'foo'
+ abspath = os.path.abspath(path)
+ self.assertThat(path, SamePath(abspath))
+ self.assertThat(abspath, SamePath(path))
+
+ def test_real_path(self):
+ tempdir = self.mkdtemp()
+ source = os.path.join(tempdir, 'source')
+ self.touch(source)
+ target = os.path.join(tempdir, 'target')
+ try:
+ os.symlink(source, target)
+ except (AttributeError, NotImplementedError):
+ self.skip("No symlink support")
+ self.assertThat(source, SamePath(target))
+ self.assertThat(target, SamePath(source))
+
+
+class TestHasPermissions(TestCase, PathHelpers):
+
+ def test_match(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'filename')
+ self.touch(filename)
+ permissions = oct(os.stat(filename).st_mode)[-4:]
+ self.assertThat(filename, HasPermissions(permissions))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py
new file mode 100644
index 00000000000..fb86b7fe2f9
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py
@@ -0,0 +1,254 @@
+# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.matchers import (
+ DocTestMatches,
+ Equals,
+ LessThan,
+ MatchesStructure,
+ Mismatch,
+ NotEquals,
+ )
+from testtools.matchers._higherorder import (
+ AfterPreprocessing,
+ AllMatch,
+ Annotate,
+ AnnotatedMismatch,
+ AnyMatch,
+ MatchesAny,
+ MatchesAll,
+ MatchesPredicate,
+ MatchesPredicateWithParams,
+ Not,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class TestAllMatch(TestCase, TestMatchersInterface):
+
+ matches_matcher = AllMatch(LessThan(10))
+ matches_matches = [
+ [9, 9, 9],
+ (9, 9),
+ iter([9, 9, 9, 9, 9]),
+ ]
+ matches_mismatches = [
+ [11, 9, 9],
+ iter([9, 12, 9, 11]),
+ ]
+
+ str_examples = [
+ ("AllMatch(LessThan(12))", AllMatch(LessThan(12))),
+ ]
+
+ describe_examples = [
+ ('Differences: [\n'
+ '10 is not > 11\n'
+ '10 is not > 10\n'
+ ']',
+ [11, 9, 10],
+ AllMatch(LessThan(10))),
+ ]
+
+
+class TestAnyMatch(TestCase, TestMatchersInterface):
+
+ matches_matcher = AnyMatch(Equals('elephant'))
+ matches_matches = [
+ ['grass', 'cow', 'steak', 'milk', 'elephant'],
+ (13, 'elephant'),
+ ['elephant', 'elephant', 'elephant'],
+ set(['hippo', 'rhino', 'elephant']),
+ ]
+ matches_mismatches = [
+ [],
+ ['grass', 'cow', 'steak', 'milk'],
+ (13, 12, 10),
+ ['element', 'hephalump', 'pachyderm'],
+ set(['hippo', 'rhino', 'diplodocus']),
+ ]
+
+ str_examples = [
+ ("AnyMatch(Equals('elephant'))", AnyMatch(Equals('elephant'))),
+ ]
+
+ describe_examples = [
+ ('Differences: [\n'
+ '7 != 11\n'
+ '7 != 9\n'
+ '7 != 10\n'
+ ']',
+ [11, 9, 10],
+ AnyMatch(Equals(7))),
+ ]
+
+
+class TestAfterPreprocessing(TestCase, TestMatchersInterface):
+
+ def parity(x):
+ return x % 2
+
+ matches_matcher = AfterPreprocessing(parity, Equals(1))
+ matches_matches = [3, 5]
+ matches_mismatches = [2]
+
+ str_examples = [
+ ("AfterPreprocessing(<function parity>, Equals(1))",
+ AfterPreprocessing(parity, Equals(1))),
+ ]
+
+ describe_examples = [
+ ("1 != 0: after <function parity> on 2", 2,
+ AfterPreprocessing(parity, Equals(1))),
+ ("1 != 0", 2,
+ AfterPreprocessing(parity, Equals(1), annotate=False)),
+ ]
+
+class TestMatchersAnyInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2"))
+ matches_matches = ["1", "2"]
+ matches_mismatches = ["3"]
+
+ str_examples = [(
+ "MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))",
+ MatchesAny(DocTestMatches("1"), DocTestMatches("2"))),
+ ]
+
+ describe_examples = [("""Differences: [
+Expected:
+ 1
+Got:
+ 3
+
+Expected:
+ 2
+Got:
+ 3
+
+]""",
+ "3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))]
+
+
+class TestMatchesAllInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAll(NotEquals(1), NotEquals(2))
+ matches_matches = [3, 4]
+ matches_mismatches = [1, 2]
+
+ str_examples = [
+ ("MatchesAll(NotEquals(1), NotEquals(2))",
+ MatchesAll(NotEquals(1), NotEquals(2)))]
+
+ describe_examples = [
+ ("""Differences: [
+1 == 1
+]""",
+ 1, MatchesAll(NotEquals(1), NotEquals(2))),
+ ("1 == 1", 1,
+ MatchesAll(NotEquals(2), NotEquals(1), Equals(3), first_only=True)),
+ ]
+
+
+class TestAnnotate(TestCase, TestMatchersInterface):
+
+ matches_matcher = Annotate("foo", Equals(1))
+ matches_matches = [1]
+ matches_mismatches = [2]
+
+ str_examples = [
+ ("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))]
+
+ describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))]
+
+ def test_if_message_no_message(self):
+ # Annotate.if_message returns the given matcher if there is no
+ # message.
+ matcher = Equals(1)
+ not_annotated = Annotate.if_message('', matcher)
+ self.assertIs(matcher, not_annotated)
+
+ def test_if_message_given_message(self):
+ # Annotate.if_message returns an annotated version of the matcher if a
+ # message is provided.
+ matcher = Equals(1)
+ expected = Annotate('foo', matcher)
+ annotated = Annotate.if_message('foo', matcher)
+ self.assertThat(
+ annotated,
+ MatchesStructure.fromExample(expected, 'annotation', 'matcher'))
+
+
+class TestAnnotatedMismatch(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_forwards_details(self):
+ x = Mismatch('description', {'foo': 'bar'})
+ annotated = AnnotatedMismatch("annotation", x)
+ self.assertEqual(x.get_details(), annotated.get_details())
+
+
+class TestNotInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Not(Equals(1))
+ matches_matches = [2]
+ matches_mismatches = [1]
+
+ str_examples = [
+ ("Not(Equals(1))", Not(Equals(1))),
+ ("Not(Equals('1'))", Not(Equals('1')))]
+
+ describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))]
+
+
+def is_even(x):
+ return x % 2 == 0
+
+
+class TestMatchesPredicate(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesPredicate(is_even, "%s is not even")
+ matches_matches = [2, 4, 6, 8]
+ matches_mismatches = [3, 5, 7, 9]
+
+ str_examples = [
+ ("MatchesPredicate(%r, %r)" % (is_even, "%s is not even"),
+ MatchesPredicate(is_even, "%s is not even")),
+ ]
+
+ describe_examples = [
+ ('7 is not even', 7, MatchesPredicate(is_even, "%s is not even")),
+ ]
+
+
+def between(x, low, high):
+ return low < x < high
+
+
+class TestMatchesPredicateWithParams(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}")(1, 9)
+ matches_matches = [2, 4, 6, 8]
+ matches_mismatches = [0, 1, 9, 10]
+
+ str_examples = [
+ ("MatchesPredicateWithParams(%r, %r)(%s)" % (
+ between, "{0} is not between {1} and {2}", "1, 2"),
+ MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}")(1, 2)),
+ ("Between(1, 2)", MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}", "Between")(1, 2)),
+ ]
+
+ describe_examples = [
+ ('1 is not between 2 and 3', 1, MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}")(2, 3)),
+ ]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py
new file mode 100644
index 00000000000..10967ead25b
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py
@@ -0,0 +1,132 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Tests for matchers."""
+
+from testtools import (
+ Matcher, # check that Matcher is exposed at the top level for docs.
+ TestCase,
+ )
+from testtools.compat import (
+ str_is_unicode,
+ text_repr,
+ _u,
+ )
+from testtools.matchers import (
+ Equals,
+ MatchesException,
+ Raises,
+ )
+from testtools.matchers._impl import (
+ Mismatch,
+ MismatchDecorator,
+ MismatchError,
+ )
+from testtools.tests.helpers import FullStackRunTest
+
+# Silence pyflakes.
+Matcher
+
+
+class TestMismatch(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_constructor_arguments(self):
+ mismatch = Mismatch("some description", {'detail': "things"})
+ self.assertEqual("some description", mismatch.describe())
+ self.assertEqual({'detail': "things"}, mismatch.get_details())
+
+ def test_constructor_no_arguments(self):
+ mismatch = Mismatch()
+ self.assertThat(mismatch.describe,
+ Raises(MatchesException(NotImplementedError)))
+ self.assertEqual({}, mismatch.get_details())
+
+
+class TestMismatchError(TestCase):
+
+ def test_is_assertion_error(self):
+ # MismatchError is an AssertionError, so that most of the time, it
+ # looks like a test failure, rather than an error.
+ def raise_mismatch_error():
+ raise MismatchError(2, Equals(3), Equals(3).match(2))
+ self.assertRaises(AssertionError, raise_mismatch_error)
+
+ def test_default_description_is_mismatch(self):
+ mismatch = Equals(3).match(2)
+ e = MismatchError(2, Equals(3), mismatch)
+ self.assertEqual(mismatch.describe(), str(e))
+
+ def test_default_description_unicode(self):
+ matchee = _u('\xa7')
+ matcher = Equals(_u('a'))
+ mismatch = matcher.match(matchee)
+ e = MismatchError(matchee, matcher, mismatch)
+ self.assertEqual(mismatch.describe(), str(e))
+
+ def test_verbose_description(self):
+ matchee = 2
+ matcher = Equals(3)
+ mismatch = matcher.match(2)
+ e = MismatchError(matchee, matcher, mismatch, True)
+ expected = (
+ 'Match failed. Matchee: %r\n'
+ 'Matcher: %s\n'
+ 'Difference: %s\n' % (
+ matchee,
+ matcher,
+ matcher.match(matchee).describe(),
+ ))
+ self.assertEqual(expected, str(e))
+
+ def test_verbose_unicode(self):
+ # When assertThat is given matchees or matchers that contain non-ASCII
+ # unicode strings, we can still provide a meaningful error.
+ matchee = _u('\xa7')
+ matcher = Equals(_u('a'))
+ mismatch = matcher.match(matchee)
+ expected = (
+ 'Match failed. Matchee: %s\n'
+ 'Matcher: %s\n'
+ 'Difference: %s\n' % (
+ text_repr(matchee),
+ matcher,
+ mismatch.describe(),
+ ))
+ e = MismatchError(matchee, matcher, mismatch, True)
+ if str_is_unicode:
+ actual = str(e)
+ else:
+ actual = unicode(e)
+ # Using str() should still work, and return ascii only
+ self.assertEqual(
+ expected.replace(matchee, matchee.encode("unicode-escape")),
+ str(e).decode("ascii"))
+ self.assertEqual(expected, actual)
+
+
+class TestMismatchDecorator(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_forwards_description(self):
+ x = Mismatch("description", {'foo': 'bar'})
+ decorated = MismatchDecorator(x)
+ self.assertEqual(x.describe(), decorated.describe())
+
+ def test_forwards_details(self):
+ x = Mismatch("description", {'foo': 'bar'})
+ decorated = MismatchDecorator(x)
+ self.assertEqual(x.get_details(), decorated.get_details())
+
+ def test_repr(self):
+ x = Mismatch("description", {'foo': 'bar'})
+ decorated = MismatchDecorator(x)
+ self.assertEqual(
+ '<testtools.matchers.MismatchDecorator(%r)>' % (x,),
+ repr(decorated))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py
new file mode 100644
index 00000000000..84e57be472c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py
@@ -0,0 +1,603 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Tests for miscellaneous compatibility functions"""
+
+import io
+import linecache
+import os
+import sys
+import tempfile
+import traceback
+
+import testtools
+
+from testtools.compat import (
+ _b,
+ _detect_encoding,
+ _format_exc_info,
+ _format_exception_only,
+ _format_stack_list,
+ _get_source_encoding,
+ _u,
+ reraise,
+ str_is_unicode,
+ text_repr,
+ unicode_output_stream,
+ )
+from testtools.matchers import (
+ Equals,
+ Is,
+ IsInstance,
+ MatchesException,
+ Not,
+ Raises,
+ )
+
+
+class TestDetectEncoding(testtools.TestCase):
+ """Test detection of Python source encodings"""
+
+ def _check_encoding(self, expected, lines, possibly_invalid=False):
+ """Check lines are valid Python and encoding is as expected"""
+ if not possibly_invalid:
+ compile(_b("".join(lines)), "<str>", "exec")
+ encoding = _detect_encoding(lines)
+ self.assertEqual(expected, encoding,
+ "Encoding %r expected but got %r from lines %r" %
+ (expected, encoding, lines))
+
+ def test_examples_from_pep(self):
+ """Check the examples given in PEP 263 all work as specified
+
+ See 'Examples' section of <http://www.python.org/dev/peps/pep-0263/>
+ """
+ # With interpreter binary and using Emacs style file encoding comment:
+ self._check_encoding("latin-1", (
+ "#!/usr/bin/python\n",
+ "# -*- coding: latin-1 -*-\n",
+ "import os, sys\n"))
+ self._check_encoding("iso-8859-15", (
+ "#!/usr/bin/python\n",
+ "# -*- coding: iso-8859-15 -*-\n",
+ "import os, sys\n"))
+ self._check_encoding("ascii", (
+ "#!/usr/bin/python\n",
+ "# -*- coding: ascii -*-\n",
+ "import os, sys\n"))
+ # Without interpreter line, using plain text:
+ self._check_encoding("utf-8", (
+ "# This Python file uses the following encoding: utf-8\n",
+ "import os, sys\n"))
+ # Text editors might have different ways of defining the file's
+ # encoding, e.g.
+ self._check_encoding("latin-1", (
+ "#!/usr/local/bin/python\n",
+ "# coding: latin-1\n",
+ "import os, sys\n"))
+ # Without encoding comment, Python's parser will assume ASCII text:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "import os, sys\n"))
+ # Encoding comments which don't work:
+ # Missing "coding:" prefix:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "# latin-1\n",
+ "import os, sys\n"))
+ # Encoding comment not on line 1 or 2:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "#\n",
+ "# -*- coding: latin-1 -*-\n",
+ "import os, sys\n"))
+ # Unsupported encoding:
+ self._check_encoding("ascii", (
+ "#!/usr/local/bin/python\n",
+ "# -*- coding: utf-42 -*-\n",
+ "import os, sys\n"),
+ possibly_invalid=True)
+
+ def test_bom(self):
+ """Test the UTF-8 BOM counts as an encoding declaration"""
+ self._check_encoding("utf-8", (
+ "\xef\xbb\xbfimport sys\n",
+ ))
+ self._check_encoding("utf-8", (
+ "\xef\xbb\xbf# File encoding: utf-8\n",
+ ))
+ self._check_encoding("utf-8", (
+ '\xef\xbb\xbf"""Module docstring\n',
+ '\xef\xbb\xbfThat should just be a ZWNB"""\n'))
+ self._check_encoding("latin-1", (
+ '"""Is this coding: latin-1 or coding: utf-8 instead?\n',
+ '\xef\xbb\xbfThose should be latin-1 bytes"""\n'))
+ self._check_encoding("utf-8", (
+ "\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n",
+ '"""Module docstring say \xe2\x98\x86"""\n'),
+ possibly_invalid=True)
+
+ def test_multiple_coding_comments(self):
+ """Test only the first of multiple coding declarations counts"""
+ self._check_encoding("iso-8859-1", (
+ "# Is the coding: iso-8859-1\n",
+ "# Or is it coding: iso-8859-2\n"),
+ possibly_invalid=True)
+ self._check_encoding("iso-8859-1", (
+ "#!/usr/bin/python\n",
+ "# Is the coding: iso-8859-1\n",
+ "# Or is it coding: iso-8859-2\n"))
+ self._check_encoding("iso-8859-1", (
+ "# Is the coding: iso-8859-1 or coding: iso-8859-2\n",
+ "# Or coding: iso-8859-3 or coding: iso-8859-4\n"),
+ possibly_invalid=True)
+ self._check_encoding("iso-8859-2", (
+ "# Is the coding iso-8859-1 or coding: iso-8859-2\n",
+ "# Spot the missing colon above\n"))
+
+
+class TestGetSourceEncoding(testtools.TestCase):
+ """Test reading and caching the encodings of source files"""
+
+ def setUp(self):
+ testtools.TestCase.setUp(self)
+ dir = tempfile.mkdtemp()
+ self.addCleanup(os.rmdir, dir)
+ self.filename = os.path.join(dir, self.id().rsplit(".", 1)[1] + ".py")
+ self._written = False
+
+ def put_source(self, text):
+ f = open(self.filename, "w")
+ try:
+ f.write(text)
+ finally:
+ f.close()
+ if not self._written:
+ self._written = True
+ self.addCleanup(os.remove, self.filename)
+ self.addCleanup(linecache.cache.pop, self.filename, None)
+
+ def test_nonexistant_file_as_ascii(self):
+ """When file can't be found, the encoding should default to ascii"""
+ self.assertEquals("ascii", _get_source_encoding(self.filename))
+
+ def test_encoding_is_cached(self):
+ """The encoding should stay the same if the cache isn't invalidated"""
+ self.put_source(
+ "# coding: iso-8859-13\n"
+ "import os\n")
+ self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
+ self.put_source(
+ "# coding: rot-13\n"
+ "vzcbeg bf\n")
+ self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
+
+ def test_traceback_rechecks_encoding(self):
+ """A traceback function checks the cache and resets the encoding"""
+ self.put_source(
+ "# coding: iso-8859-8\n"
+ "import os\n")
+ self.assertEquals("iso-8859-8", _get_source_encoding(self.filename))
+ self.put_source(
+ "# coding: utf-8\n"
+ "import os\n")
+ try:
+ exec (compile("raise RuntimeError\n", self.filename, "exec"))
+ except RuntimeError:
+ traceback.extract_tb(sys.exc_info()[2])
+ else:
+ self.fail("RuntimeError not raised")
+ self.assertEquals("utf-8", _get_source_encoding(self.filename))
+
+
+class _FakeOutputStream(object):
+ """A simple file-like object for testing"""
+
+ def __init__(self):
+ self.writelog = []
+
+ def write(self, obj):
+ self.writelog.append(obj)
+
+
+class TestUnicodeOutputStream(testtools.TestCase):
+ """Test wrapping output streams so they work with arbitrary unicode"""
+
+ uni = _u("pa\u026a\u03b8\u0259n")
+
+ def setUp(self):
+ super(TestUnicodeOutputStream, self).setUp()
+ if sys.platform == "cli":
+ self.skip("IronPython shouldn't wrap streams to do encoding")
+
+ def test_no_encoding_becomes_ascii(self):
+ """A stream with no encoding attribute gets ascii/replace strings"""
+ sout = _FakeOutputStream()
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa???n")], sout.writelog)
+
+ def test_encoding_as_none_becomes_ascii(self):
+ """A stream with encoding value of None gets ascii/replace strings"""
+ sout = _FakeOutputStream()
+ sout.encoding = None
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa???n")], sout.writelog)
+
+ def test_bogus_encoding_becomes_ascii(self):
+ """A stream with a bogus encoding gets ascii/replace strings"""
+ sout = _FakeOutputStream()
+ sout.encoding = "bogus"
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa???n")], sout.writelog)
+
+ def test_partial_encoding_replace(self):
+ """A string which can be partly encoded correctly should be"""
+ sout = _FakeOutputStream()
+ sout.encoding = "iso-8859-7"
+ unicode_output_stream(sout).write(self.uni)
+ self.assertEqual([_b("pa?\xe8?n")], sout.writelog)
+
+ @testtools.skipIf(str_is_unicode, "Tests behaviour when str is not unicode")
+ def test_unicode_encodings_wrapped_when_str_is_not_unicode(self):
+ """A unicode encoding is wrapped but needs no error handler"""
+ sout = _FakeOutputStream()
+ sout.encoding = "utf-8"
+ uout = unicode_output_stream(sout)
+ self.assertEqual(uout.errors, "strict")
+ uout.write(self.uni)
+ self.assertEqual([_b("pa\xc9\xaa\xce\xb8\xc9\x99n")], sout.writelog)
+
+ @testtools.skipIf(not str_is_unicode, "Tests behaviour when str is unicode")
+ def test_unicode_encodings_not_wrapped_when_str_is_unicode(self):
+ # No wrapping needed if native str type is unicode
+ sout = _FakeOutputStream()
+ sout.encoding = "utf-8"
+ uout = unicode_output_stream(sout)
+ self.assertIs(uout, sout)
+
+ def test_stringio(self):
+ """A StringIO object should maybe get an ascii native str type"""
+ try:
+ from cStringIO import StringIO
+ newio = False
+ except ImportError:
+ from io import StringIO
+ newio = True
+ sout = StringIO()
+ soutwrapper = unicode_output_stream(sout)
+ soutwrapper.write(self.uni)
+ if newio:
+ self.assertEqual(self.uni, sout.getvalue())
+ else:
+ self.assertEqual("pa???n", sout.getvalue())
+
+ def test_io_stringio(self):
+ # io.StringIO only accepts unicode so should be returned as itself.
+ s = io.StringIO()
+ self.assertEqual(s, unicode_output_stream(s))
+
+ def test_io_bytesio(self):
+ # io.BytesIO only accepts bytes so should be wrapped.
+ bytes_io = io.BytesIO()
+ self.assertThat(bytes_io, Not(Is(unicode_output_stream(bytes_io))))
+ # Will error if s was not wrapped properly.
+ unicode_output_stream(bytes_io).write(_u('foo'))
+
+ def test_io_textwrapper(self):
+ # textwrapper is unicode, should be returned as itself.
+ text_io = io.TextIOWrapper(io.BytesIO())
+ self.assertThat(unicode_output_stream(text_io), Is(text_io))
+ # To be sure...
+ unicode_output_stream(text_io).write(_u('foo'))
+
+
+class TestTextRepr(testtools.TestCase):
+ """Ensure in extending repr, basic behaviours are not being broken"""
+
+ ascii_examples = (
+ # Single character examples
+ # C0 control codes should be escaped except multiline \n
+ ("\x00", "'\\x00'", "'''\\\n\\x00'''"),
+ ("\b", "'\\x08'", "'''\\\n\\x08'''"),
+ ("\t", "'\\t'", "'''\\\n\\t'''"),
+ ("\n", "'\\n'", "'''\\\n\n'''"),
+ ("\r", "'\\r'", "'''\\\n\\r'''"),
+ # Quotes and backslash should match normal repr behaviour
+ ('"', "'\"'", "'''\\\n\"'''"),
+ ("'", "\"'\"", "'''\\\n\\''''"),
+ ("\\", "'\\\\'", "'''\\\n\\\\'''"),
+ # DEL is also unprintable and should be escaped
+ ("\x7F", "'\\x7f'", "'''\\\n\\x7f'''"),
+
+ # Character combinations that need double checking
+ ("\r\n", "'\\r\\n'", "'''\\\n\\r\n'''"),
+ ("\"'", "'\"\\''", "'''\\\n\"\\''''"),
+ ("'\"", "'\\'\"'", "'''\\\n'\"'''"),
+ ("\\n", "'\\\\n'", "'''\\\n\\\\n'''"),
+ ("\\\n", "'\\\\\\n'", "'''\\\n\\\\\n'''"),
+ ("\\' ", "\"\\\\' \"", "'''\\\n\\\\' '''"),
+ ("\\'\n", "\"\\\\'\\n\"", "'''\\\n\\\\'\n'''"),
+ ("\\'\"", "'\\\\\\'\"'", "'''\\\n\\\\'\"'''"),
+ ("\\'''", "\"\\\\'''\"", "'''\\\n\\\\\\'\\'\\''''"),
+ )
+
+ # Bytes with the high bit set should always be escaped
+ bytes_examples = (
+ (_b("\x80"), "'\\x80'", "'''\\\n\\x80'''"),
+ (_b("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"),
+ (_b("\xC0"), "'\\xc0'", "'''\\\n\\xc0'''"),
+ (_b("\xFF"), "'\\xff'", "'''\\\n\\xff'''"),
+ (_b("\xC2\xA7"), "'\\xc2\\xa7'", "'''\\\n\\xc2\\xa7'''"),
+ )
+
+ # Unicode doesn't escape printable characters as per the Python 3 model
+ unicode_examples = (
+ # C1 codes are unprintable
+ (_u("\x80"), "'\\x80'", "'''\\\n\\x80'''"),
+ (_u("\x9F"), "'\\x9f'", "'''\\\n\\x9f'''"),
+ # No-break space is unprintable
+ (_u("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"),
+ # Letters latin alphabets are printable
+ (_u("\xA1"), _u("'\xa1'"), _u("'''\\\n\xa1'''")),
+ (_u("\xFF"), _u("'\xff'"), _u("'''\\\n\xff'''")),
+ (_u("\u0100"), _u("'\u0100'"), _u("'''\\\n\u0100'''")),
+ # Line and paragraph seperators are unprintable
+ (_u("\u2028"), "'\\u2028'", "'''\\\n\\u2028'''"),
+ (_u("\u2029"), "'\\u2029'", "'''\\\n\\u2029'''"),
+ # Unpaired surrogates are unprintable
+ (_u("\uD800"), "'\\ud800'", "'''\\\n\\ud800'''"),
+ (_u("\uDFFF"), "'\\udfff'", "'''\\\n\\udfff'''"),
+ # Unprintable general categories not fully tested: Cc, Cf, Co, Cn, Zs
+ )
+
+ b_prefix = repr(_b(""))[:-2]
+ u_prefix = repr(_u(""))[:-2]
+
+ def test_ascii_examples_oneline_bytes(self):
+ for s, expected, _ in self.ascii_examples:
+ b = _b(s)
+ actual = text_repr(b, multiline=False)
+ # Add self.assertIsInstance check?
+ self.assertEqual(actual, self.b_prefix + expected)
+ self.assertEqual(eval(actual), b)
+
+ def test_ascii_examples_oneline_unicode(self):
+ for s, expected, _ in self.ascii_examples:
+ u = _u(s)
+ actual = text_repr(u, multiline=False)
+ self.assertEqual(actual, self.u_prefix + expected)
+ self.assertEqual(eval(actual), u)
+
+ def test_ascii_examples_multiline_bytes(self):
+ for s, _, expected in self.ascii_examples:
+ b = _b(s)
+ actual = text_repr(b, multiline=True)
+ self.assertEqual(actual, self.b_prefix + expected)
+ self.assertEqual(eval(actual), b)
+
+ def test_ascii_examples_multiline_unicode(self):
+ for s, _, expected in self.ascii_examples:
+ u = _u(s)
+ actual = text_repr(u, multiline=True)
+ self.assertEqual(actual, self.u_prefix + expected)
+ self.assertEqual(eval(actual), u)
+
+ def test_ascii_examples_defaultline_bytes(self):
+ for s, one, multi in self.ascii_examples:
+ expected = "\n" in s and multi or one
+ self.assertEqual(text_repr(_b(s)), self.b_prefix + expected)
+
+ def test_ascii_examples_defaultline_unicode(self):
+ for s, one, multi in self.ascii_examples:
+ expected = "\n" in s and multi or one
+ self.assertEqual(text_repr(_u(s)), self.u_prefix + expected)
+
+ def test_bytes_examples_oneline(self):
+ for b, expected, _ in self.bytes_examples:
+ actual = text_repr(b, multiline=False)
+ self.assertEqual(actual, self.b_prefix + expected)
+ self.assertEqual(eval(actual), b)
+
+ def test_bytes_examples_multiline(self):
+ for b, _, expected in self.bytes_examples:
+ actual = text_repr(b, multiline=True)
+ self.assertEqual(actual, self.b_prefix + expected)
+ self.assertEqual(eval(actual), b)
+
+ def test_unicode_examples_oneline(self):
+ for u, expected, _ in self.unicode_examples:
+ actual = text_repr(u, multiline=False)
+ self.assertEqual(actual, self.u_prefix + expected)
+ self.assertEqual(eval(actual), u)
+
+ def test_unicode_examples_multiline(self):
+ for u, _, expected in self.unicode_examples:
+ actual = text_repr(u, multiline=True)
+ self.assertEqual(actual, self.u_prefix + expected)
+ self.assertEqual(eval(actual), u)
+
+
+
+class TestReraise(testtools.TestCase):
+ """Tests for trivial reraise wrapper needed for Python 2/3 changes"""
+
+ def test_exc_info(self):
+ """After reraise exc_info matches plus some extra traceback"""
+ try:
+ raise ValueError("Bad value")
+ except ValueError:
+ _exc_info = sys.exc_info()
+ try:
+ reraise(*_exc_info)
+ except ValueError:
+ _new_exc_info = sys.exc_info()
+ self.assertIs(_exc_info[0], _new_exc_info[0])
+ self.assertIs(_exc_info[1], _new_exc_info[1])
+ expected_tb = traceback.extract_tb(_exc_info[2])
+ self.assertEqual(expected_tb,
+ traceback.extract_tb(_new_exc_info[2])[-len(expected_tb):])
+
+ def test_custom_exception_no_args(self):
+ """Reraising does not require args attribute to contain params"""
+
+ class CustomException(Exception):
+ """Exception that expects and sets attrs but not args"""
+
+ def __init__(self, value):
+ Exception.__init__(self)
+ self.value = value
+
+ try:
+ raise CustomException("Some value")
+ except CustomException:
+ _exc_info = sys.exc_info()
+ self.assertRaises(CustomException, reraise, *_exc_info)
+
+
+class Python2CompatibilityTests(testtools.TestCase):
+
+ def setUp(self):
+ super(Python2CompatibilityTests, self).setUp()
+ if sys.version[0] >= '3':
+ self.skip("These tests are only applicable to python 2.")
+
+
+class TestExceptionFormatting(Python2CompatibilityTests):
+ """Test the _format_exception_only function."""
+
+ def _assert_exception_format(self, eclass, evalue, expected):
+ actual = _format_exception_only(eclass, evalue)
+ self.assertThat(actual, Equals(expected))
+ self.assertThat(''.join(actual), IsInstance(unicode))
+
+ def test_supports_string_exception(self):
+ self._assert_exception_format(
+ "String_Exception",
+ None,
+ [_u("String_Exception\n")]
+ )
+
+ def test_supports_regular_exception(self):
+ self._assert_exception_format(
+ RuntimeError,
+ RuntimeError("Something went wrong"),
+ [_u("RuntimeError: Something went wrong\n")]
+ )
+
+ def test_supports_unprintable_exceptions(self):
+ """Verify support for exception classes that raise an exception when
+ __unicode__ or __str__ is called.
+ """
+ class UnprintableException(Exception):
+
+ def __str__(self):
+ raise Exception()
+
+ def __unicode__(self):
+ raise Exception()
+
+ self._assert_exception_format(
+ UnprintableException,
+ UnprintableException("Foo"),
+ [_u("UnprintableException: <unprintable UnprintableException object>\n")]
+ )
+
+ def test_supports_exceptions_with_no_string_value(self):
+ class NoStringException(Exception):
+
+ def __str__(self):
+ return ""
+
+ def __unicode__(self):
+ return _u("")
+
+ self._assert_exception_format(
+ NoStringException,
+ NoStringException("Foo"),
+ [_u("NoStringException\n")]
+ )
+
+ def test_supports_strange_syntax_error(self):
+ """Test support for syntax errors with unusual number of arguments"""
+ self._assert_exception_format(
+ SyntaxError,
+ SyntaxError("Message"),
+ [_u("SyntaxError: Message\n")]
+ )
+
+ def test_supports_syntax_error(self):
+ self._assert_exception_format(
+ SyntaxError,
+ SyntaxError(
+ "Some Syntax Message",
+ (
+ "/path/to/file",
+ 12,
+ 2,
+ "This is the line of code",
+ )
+ ),
+ [
+ _u(' File "/path/to/file", line 12\n'),
+ _u(' This is the line of code\n'),
+ _u(' ^\n'),
+ _u('SyntaxError: Some Syntax Message\n'),
+ ]
+ )
+
+
+class StackListFormattingTests(Python2CompatibilityTests):
+ """Test the _format_stack_list function."""
+
+ def _assert_stack_format(self, stack_lines, expected_output):
+ actual = _format_stack_list(stack_lines)
+ self.assertThat(actual, Equals([expected_output]))
+
+ def test_single_complete_stack_line(self):
+ stack_lines = [(
+ '/path/to/filename',
+ 12,
+ 'func_name',
+ 'some_code()',
+ )]
+ expected = \
+ _u(' File "/path/to/filename", line 12, in func_name\n' \
+ ' some_code()\n')
+
+ self._assert_stack_format(stack_lines, expected)
+
+ def test_single_stack_line_no_code(self):
+ stack_lines = [(
+ '/path/to/filename',
+ 12,
+ 'func_name',
+ None
+ )]
+ expected = _u(' File "/path/to/filename", line 12, in func_name\n')
+ self._assert_stack_format(stack_lines, expected)
+
+
+class FormatExceptionInfoTests(Python2CompatibilityTests):
+
+ def test_individual_functions_called(self):
+ self.patch(
+ testtools.compat,
+ '_format_stack_list',
+ lambda stack_list: [_u("format stack list called\n")]
+ )
+ self.patch(
+ testtools.compat,
+ '_format_exception_only',
+ lambda etype, evalue: [_u("format exception only called\n")]
+ )
+ result = _format_exc_info(None, None, None)
+ expected = [
+ _u("Traceback (most recent call last):\n"),
+ _u("format stack list called\n"),
+ _u("format exception only called\n"),
+ ]
+ self.assertThat(expected, Equals(result))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py
new file mode 100644
index 00000000000..9ed1b2ffba5
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py
@@ -0,0 +1,349 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import json
+import os
+import tempfile
+import unittest
+
+from testtools import TestCase
+from testtools.compat import (
+ _b,
+ _u,
+ BytesIO,
+ StringIO,
+ )
+from testtools.content import (
+ attach_file,
+ Content,
+ content_from_file,
+ content_from_stream,
+ JSON,
+ json_content,
+ StackLinesContent,
+ StacktraceContent,
+ TracebackContent,
+ text_content,
+ )
+from testtools.content_type import (
+ ContentType,
+ UTF8_TEXT,
+ )
+from testtools.matchers import (
+ Equals,
+ MatchesException,
+ Raises,
+ raises,
+ )
+from testtools.tests.helpers import an_exc_info
+
+
+raises_value_error = Raises(MatchesException(ValueError))
+
+
+class TestContent(TestCase):
+
+ def test___init___None_errors(self):
+ self.assertThat(lambda: Content(None, None), raises_value_error)
+ self.assertThat(
+ lambda: Content(None, lambda: ["traceback"]), raises_value_error)
+ self.assertThat(
+ lambda: Content(ContentType("text", "traceback"), None),
+ raises_value_error)
+
+ def test___init___sets_ivars(self):
+ content_type = ContentType("foo", "bar")
+ content = Content(content_type, lambda: ["bytes"])
+ self.assertEqual(content_type, content.content_type)
+ self.assertEqual(["bytes"], list(content.iter_bytes()))
+
+ def test___eq__(self):
+ content_type = ContentType("foo", "bar")
+ one_chunk = lambda: [_b("bytes")]
+ two_chunk = lambda: [_b("by"), _b("tes")]
+ content1 = Content(content_type, one_chunk)
+ content2 = Content(content_type, one_chunk)
+ content3 = Content(content_type, two_chunk)
+ content4 = Content(content_type, lambda: [_b("by"), _b("te")])
+ content5 = Content(ContentType("f", "b"), two_chunk)
+ self.assertEqual(content1, content2)
+ self.assertEqual(content1, content3)
+ self.assertNotEqual(content1, content4)
+ self.assertNotEqual(content1, content5)
+
+ def test___repr__(self):
+ content = Content(ContentType("application", "octet-stream"),
+ lambda: [_b("\x00bin"), _b("ary\xff")])
+ self.assertIn("\\x00binary\\xff", repr(content))
+
+ def test_iter_text_not_text_errors(self):
+ content_type = ContentType("foo", "bar")
+ content = Content(content_type, lambda: ["bytes"])
+ self.assertThat(content.iter_text, raises_value_error)
+
+ def test_iter_text_decodes(self):
+ content_type = ContentType("text", "strange", {"charset": "utf8"})
+ content = Content(
+ content_type, lambda: [_u("bytes\xea").encode("utf8")])
+ self.assertEqual([_u("bytes\xea")], list(content.iter_text()))
+
+ def test_iter_text_default_charset_iso_8859_1(self):
+ content_type = ContentType("text", "strange")
+ text = _u("bytes\xea")
+ iso_version = text.encode("ISO-8859-1")
+ content = Content(content_type, lambda: [iso_version])
+ self.assertEqual([text], list(content.iter_text()))
+
+ def test_as_text(self):
+ content_type = ContentType("text", "strange", {"charset": "utf8"})
+ content = Content(
+ content_type, lambda: [_u("bytes\xea").encode("utf8")])
+ self.assertEqual(_u("bytes\xea"), content.as_text())
+
+ def test_from_file(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ os.write(fd, _b('some data'))
+ os.close(fd)
+ content = content_from_file(path, UTF8_TEXT, chunk_size=2)
+ self.assertThat(
+ list(content.iter_bytes()),
+ Equals([_b('so'), _b('me'), _b(' d'), _b('at'), _b('a')]))
+
+ def test_from_nonexistent_file(self):
+ directory = tempfile.mkdtemp()
+ nonexistent = os.path.join(directory, 'nonexistent-file')
+ content = content_from_file(nonexistent)
+ self.assertThat(content.iter_bytes, raises(IOError))
+
+ def test_from_file_default_type(self):
+ content = content_from_file('/nonexistent/path')
+ self.assertThat(content.content_type, Equals(UTF8_TEXT))
+
+ def test_from_file_eager_loading(self):
+ fd, path = tempfile.mkstemp()
+ os.write(fd, _b('some data'))
+ os.close(fd)
+ content = content_from_file(path, UTF8_TEXT, buffer_now=True)
+ os.remove(path)
+ self.assertThat(
+ ''.join(content.iter_text()), Equals('some data'))
+
+ def test_from_file_with_simple_seek(self):
+ f = tempfile.NamedTemporaryFile()
+ f.write(_b('some data'))
+ f.flush()
+ self.addCleanup(f.close)
+ content = content_from_file(
+ f.name, UTF8_TEXT, chunk_size=50, seek_offset=5)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
+ def test_from_file_with_whence_seek(self):
+ f = tempfile.NamedTemporaryFile()
+ f.write(_b('some data'))
+ f.flush()
+ self.addCleanup(f.close)
+ content = content_from_file(
+ f.name, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
+ def test_from_stream(self):
+ data = StringIO('some data')
+ content = content_from_stream(data, UTF8_TEXT, chunk_size=2)
+ self.assertThat(
+ list(content.iter_bytes()), Equals(['so', 'me', ' d', 'at', 'a']))
+
+ def test_from_stream_default_type(self):
+ data = StringIO('some data')
+ content = content_from_stream(data)
+ self.assertThat(content.content_type, Equals(UTF8_TEXT))
+
+ def test_from_stream_eager_loading(self):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ self.addCleanup(os.close, fd)
+ os.write(fd, _b('some data'))
+ stream = open(path, 'rb')
+ self.addCleanup(stream.close)
+ content = content_from_stream(stream, UTF8_TEXT, buffer_now=True)
+ os.write(fd, _b('more data'))
+ self.assertThat(
+ ''.join(content.iter_text()), Equals('some data'))
+
+ def test_from_stream_with_simple_seek(self):
+ data = BytesIO(_b('some data'))
+ content = content_from_stream(
+ data, UTF8_TEXT, chunk_size=50, seek_offset=5)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
+ def test_from_stream_with_whence_seek(self):
+ data = BytesIO(_b('some data'))
+ content = content_from_stream(
+ data, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
+ def test_from_text(self):
+ data = _u("some data")
+ expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')])
+ self.assertEqual(expected, text_content(data))
+
+ def test_json_content(self):
+ data = {'foo': 'bar'}
+ expected = Content(JSON, lambda: [_b('{"foo": "bar"}')])
+ self.assertEqual(expected, json_content(data))
+
+
+class TestStackLinesContent(TestCase):
+
+ def _get_stack_line_and_expected_output(self):
+ stack_lines = [
+ ('/path/to/file', 42, 'some_function', 'print("Hello World")'),
+ ]
+ expected = ' File "/path/to/file", line 42, in some_function\n' \
+ ' print("Hello World")\n'
+ return stack_lines, expected
+
+ def test_single_stack_line(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ actual = StackLinesContent(stack_lines).as_text()
+
+ self.assertEqual(expected, actual)
+
+ def test_prefix_content(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ prefix = self.getUniqueString() + '\n'
+ content = StackLinesContent(stack_lines, prefix_content=prefix)
+ actual = content.as_text()
+ expected = prefix + expected
+
+ self.assertEqual(expected, actual)
+
+ def test_postfix_content(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ postfix = '\n' + self.getUniqueString()
+ content = StackLinesContent(stack_lines, postfix_content=postfix)
+ actual = content.as_text()
+ expected = expected + postfix
+
+ self.assertEqual(expected, actual)
+
+ def test___init___sets_content_type(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ content = StackLinesContent(stack_lines)
+ expected_content_type = ContentType("text", "x-traceback",
+ {"language": "python", "charset": "utf8"})
+
+ self.assertEqual(expected_content_type, content.content_type)
+
+
+class TestTracebackContent(TestCase):
+
+ def test___init___None_errors(self):
+ self.assertThat(
+ lambda: TracebackContent(None, None), raises_value_error)
+
+ def test___init___sets_ivars(self):
+ content = TracebackContent(an_exc_info, self)
+ content_type = ContentType("text", "x-traceback",
+ {"language": "python", "charset": "utf8"})
+ self.assertEqual(content_type, content.content_type)
+ result = unittest.TestResult()
+ expected = result._exc_info_to_string(an_exc_info, self)
+ self.assertEqual(expected, ''.join(list(content.iter_text())))
+
+
+class TestStacktraceContent(TestCase):
+
+ def test___init___sets_ivars(self):
+ content = StacktraceContent()
+ content_type = ContentType("text", "x-traceback",
+ {"language": "python", "charset": "utf8"})
+
+ self.assertEqual(content_type, content.content_type)
+
+ def test_prefix_is_used(self):
+ prefix = self.getUniqueString()
+ actual = StacktraceContent(prefix_content=prefix).as_text()
+
+ self.assertTrue(actual.startswith(prefix))
+
+ def test_postfix_is_used(self):
+ postfix = self.getUniqueString()
+ actual = StacktraceContent(postfix_content=postfix).as_text()
+
+ self.assertTrue(actual.endswith(postfix))
+
+ def test_top_frame_is_skipped_when_no_stack_is_specified(self):
+ actual = StacktraceContent().as_text()
+
+ self.assertTrue('testtools/content.py' not in actual)
+
+
+class TestAttachFile(TestCase):
+
+ def make_file(self, data):
+ # GZ 2011-04-21: This helper could be useful for methods above trying
+ # to use mkstemp, but should handle write failures and
+ # always close the fd. There must be a better way.
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ os.write(fd, _b(data))
+ os.close(fd)
+ return path
+
+ def test_simple(self):
+ class SomeTest(TestCase):
+ def test_foo(self):
+ pass
+ test = SomeTest('test_foo')
+ data = 'some data'
+ path = self.make_file(data)
+ my_content = text_content(data)
+ attach_file(test, path, name='foo')
+ self.assertEqual({'foo': my_content}, test.getDetails())
+
+ def test_optional_name(self):
+ # If no name is provided, attach_file just uses the base name of the
+ # file.
+ class SomeTest(TestCase):
+ def test_foo(self):
+ pass
+ test = SomeTest('test_foo')
+ path = self.make_file('some data')
+ base_path = os.path.basename(path)
+ attach_file(test, path)
+ self.assertEqual([base_path], list(test.getDetails()))
+
+ def test_lazy_read(self):
+ class SomeTest(TestCase):
+ def test_foo(self):
+ pass
+ test = SomeTest('test_foo')
+ path = self.make_file('some data')
+ attach_file(test, path, name='foo', buffer_now=False)
+ content = test.getDetails()['foo']
+ content_file = open(path, 'w')
+ content_file.write('new data')
+ content_file.close()
+ self.assertEqual(''.join(content.iter_text()), 'new data')
+
+ def test_eager_read_by_default(self):
+ class SomeTest(TestCase):
+ def test_foo(self):
+ pass
+ test = SomeTest('test_foo')
+ path = self.make_file('some data')
+ attach_file(test, path, name='foo')
+ content = test.getDetails()['foo']
+ content_file = open(path, 'w')
+ content_file.write('new data')
+ content_file.close()
+ self.assertEqual(''.join(content.iter_text()), 'some data')
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py
new file mode 100644
index 00000000000..2d34f95e479
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2008, 2012 testtools developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.matchers import Equals, MatchesException, Raises
+from testtools.content_type import (
+ ContentType,
+ JSON,
+ UTF8_TEXT,
+ )
+
+
+class TestContentType(TestCase):
+
+ def test___init___None_errors(self):
+ raises_value_error = Raises(MatchesException(ValueError))
+ self.assertThat(lambda:ContentType(None, None), raises_value_error)
+ self.assertThat(lambda:ContentType(None, "traceback"),
+ raises_value_error)
+ self.assertThat(lambda:ContentType("text", None), raises_value_error)
+
+ def test___init___sets_ivars(self):
+ content_type = ContentType("foo", "bar")
+ self.assertEqual("foo", content_type.type)
+ self.assertEqual("bar", content_type.subtype)
+ self.assertEqual({}, content_type.parameters)
+
+ def test___init___with_parameters(self):
+ content_type = ContentType("foo", "bar", {"quux": "thing"})
+ self.assertEqual({"quux": "thing"}, content_type.parameters)
+
+ def test___eq__(self):
+ content_type1 = ContentType("foo", "bar", {"quux": "thing"})
+ content_type2 = ContentType("foo", "bar", {"quux": "thing"})
+ content_type3 = ContentType("foo", "bar", {"quux": "thing2"})
+ self.assertTrue(content_type1.__eq__(content_type2))
+ self.assertFalse(content_type1.__eq__(content_type3))
+
+ def test_basic_repr(self):
+ content_type = ContentType('text', 'plain')
+ self.assertThat(repr(content_type), Equals('text/plain'))
+
+ def test_extended_repr(self):
+ content_type = ContentType(
+ 'text', 'plain', {'foo': 'bar', 'baz': 'qux'})
+ self.assertThat(
+ repr(content_type), Equals('text/plain; baz="qux"; foo="bar"'))
+
+
+class TestBuiltinContentTypes(TestCase):
+
+ def test_plain_text(self):
+ # The UTF8_TEXT content type represents UTF-8 encoded text/plain.
+ self.assertThat(UTF8_TEXT.type, Equals('text'))
+ self.assertThat(UTF8_TEXT.subtype, Equals('plain'))
+ self.assertThat(UTF8_TEXT.parameters, Equals({'charset': 'utf8'}))
+
+ def test_json_content(self):
+ # The JSON content type represents implictly UTF-8 application/json.
+ self.assertThat(JSON.type, Equals('application'))
+ self.assertThat(JSON.subtype, Equals('json'))
+ self.assertThat(JSON.parameters, Equals({}))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py
new file mode 100644
index 00000000000..f0510dc9a9f
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py
@@ -0,0 +1,767 @@
+# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
+
+"""Tests for the DeferredRunTest single test execution logic."""
+
+import os
+import signal
+
+from extras import try_import
+
+from testtools import (
+ skipIf,
+ TestCase,
+ TestResult,
+ )
+from testtools.content import (
+ text_content,
+ )
+from testtools.matchers import (
+ Equals,
+ KeysEqual,
+ MatchesException,
+ Raises,
+ )
+from testtools.runtest import RunTest
+from testtools.testresult.doubles import ExtendedTestResult
+from testtools.tests.test_spinner import NeedsTwistedTestCase
+
+assert_fails_with = try_import('testtools.deferredruntest.assert_fails_with')
+AsynchronousDeferredRunTest = try_import(
+ 'testtools.deferredruntest.AsynchronousDeferredRunTest')
+flush_logged_errors = try_import(
+ 'testtools.deferredruntest.flush_logged_errors')
+SynchronousDeferredRunTest = try_import(
+ 'testtools.deferredruntest.SynchronousDeferredRunTest')
+
+defer = try_import('twisted.internet.defer')
+failure = try_import('twisted.python.failure')
+log = try_import('twisted.python.log')
+DelayedCall = try_import('twisted.internet.base.DelayedCall')
+
+
+class X(object):
+ """Tests that we run as part of our tests, nested to avoid discovery."""
+
+ class Base(TestCase):
+ def setUp(self):
+ super(X.Base, self).setUp()
+ self.calls = ['setUp']
+ self.addCleanup(self.calls.append, 'clean-up')
+ def test_something(self):
+ self.calls.append('test')
+ def tearDown(self):
+ self.calls.append('tearDown')
+ super(X.Base, self).tearDown()
+
+ class ErrorInSetup(Base):
+ expected_calls = ['setUp', 'clean-up']
+ expected_results = [('addError', RuntimeError)]
+ def setUp(self):
+ super(X.ErrorInSetup, self).setUp()
+ raise RuntimeError("Error in setUp")
+
+ class ErrorInTest(Base):
+ expected_calls = ['setUp', 'tearDown', 'clean-up']
+ expected_results = [('addError', RuntimeError)]
+ def test_something(self):
+ raise RuntimeError("Error in test")
+
+ class FailureInTest(Base):
+ expected_calls = ['setUp', 'tearDown', 'clean-up']
+ expected_results = [('addFailure', AssertionError)]
+ def test_something(self):
+ self.fail("test failed")
+
+ class ErrorInTearDown(Base):
+ expected_calls = ['setUp', 'test', 'clean-up']
+ expected_results = [('addError', RuntimeError)]
+ def tearDown(self):
+ raise RuntimeError("Error in tearDown")
+
+ class ErrorInCleanup(Base):
+ expected_calls = ['setUp', 'test', 'tearDown', 'clean-up']
+ expected_results = [('addError', ZeroDivisionError)]
+ def test_something(self):
+ self.calls.append('test')
+ self.addCleanup(lambda: 1/0)
+
+ class TestIntegration(NeedsTwistedTestCase):
+
+ def assertResultsMatch(self, test, result):
+ events = list(result._events)
+ self.assertEqual(('startTest', test), events.pop(0))
+ for expected_result in test.expected_results:
+ result = events.pop(0)
+ if len(expected_result) == 1:
+ self.assertEqual((expected_result[0], test), result)
+ else:
+ self.assertEqual((expected_result[0], test), result[:2])
+ error_type = expected_result[1]
+ self.assertIn(error_type.__name__, str(result[2]))
+ self.assertEqual([('stopTest', test)], events)
+
+ def test_runner(self):
+ result = ExtendedTestResult()
+ test = self.test_factory('test_something', runTest=self.runner)
+ test.run(result)
+ self.assertEqual(test.calls, self.test_factory.expected_calls)
+ self.assertResultsMatch(test, result)
+
+
+def make_integration_tests():
+ from unittest import TestSuite
+ from testtools import clone_test_with_new_id
+ runners = [
+ ('RunTest', RunTest),
+ ('SynchronousDeferredRunTest', SynchronousDeferredRunTest),
+ ('AsynchronousDeferredRunTest', AsynchronousDeferredRunTest),
+ ]
+
+ tests = [
+ X.ErrorInSetup,
+ X.ErrorInTest,
+ X.ErrorInTearDown,
+ X.FailureInTest,
+ X.ErrorInCleanup,
+ ]
+ base_test = X.TestIntegration('test_runner')
+ integration_tests = []
+ for runner_name, runner in runners:
+ for test in tests:
+ new_test = clone_test_with_new_id(
+ base_test, '%s(%s, %s)' % (
+ base_test.id(),
+ runner_name,
+ test.__name__))
+ new_test.test_factory = test
+ new_test.runner = runner
+ integration_tests.append(new_test)
+ return TestSuite(integration_tests)
+
+
+class TestSynchronousDeferredRunTest(NeedsTwistedTestCase):
+
+ def make_result(self):
+ return ExtendedTestResult()
+
+ def make_runner(self, test):
+ return SynchronousDeferredRunTest(test, test.exception_handlers)
+
+ def test_success(self):
+ class SomeCase(TestCase):
+ def test_success(self):
+ return defer.succeed(None)
+ test = SomeCase('test_success')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ result._events, Equals([
+ ('startTest', test),
+ ('addSuccess', test),
+ ('stopTest', test)]))
+
+ def test_failure(self):
+ class SomeCase(TestCase):
+ def test_failure(self):
+ return defer.maybeDeferred(self.fail, "Egads!")
+ test = SomeCase('test_failure')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events], Equals([
+ ('startTest', test),
+ ('addFailure', test),
+ ('stopTest', test)]))
+
+ def test_setUp_followed_by_test(self):
+ class SomeCase(TestCase):
+ def setUp(self):
+ super(SomeCase, self).setUp()
+ return defer.succeed(None)
+ def test_failure(self):
+ return defer.maybeDeferred(self.fail, "Egads!")
+ test = SomeCase('test_failure')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events], Equals([
+ ('startTest', test),
+ ('addFailure', test),
+ ('stopTest', test)]))
+
+
+class TestAsynchronousDeferredRunTest(NeedsTwistedTestCase):
+
+ def make_reactor(self):
+ from twisted.internet import reactor
+ return reactor
+
+ def make_result(self):
+ return ExtendedTestResult()
+
+ def make_runner(self, test, timeout=None):
+ if timeout is None:
+ timeout = self.make_timeout()
+ return AsynchronousDeferredRunTest(
+ test, test.exception_handlers, timeout=timeout)
+
+ def make_timeout(self):
+ return 0.005
+
+ def test_setUp_returns_deferred_that_fires_later(self):
+ # setUp can return a Deferred that might fire at any time.
+ # AsynchronousDeferredRunTest will not go on to running the test until
+ # the Deferred returned by setUp actually fires.
+ call_log = []
+ marker = object()
+ d = defer.Deferred().addCallback(call_log.append)
+ class SomeCase(TestCase):
+ def setUp(self):
+ super(SomeCase, self).setUp()
+ call_log.append('setUp')
+ return d
+ def test_something(self):
+ call_log.append('test')
+ def fire_deferred():
+ self.assertThat(call_log, Equals(['setUp']))
+ d.callback(marker)
+ test = SomeCase('test_something')
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout=timeout)
+ result = self.make_result()
+ reactor = self.make_reactor()
+ reactor.callLater(timeout, fire_deferred)
+ runner.run(result)
+ self.assertThat(call_log, Equals(['setUp', marker, 'test']))
+
+ def test_calls_setUp_test_tearDown_in_sequence(self):
+ # setUp, the test method and tearDown can all return
+ # Deferreds. AsynchronousDeferredRunTest will make sure that each of
+ # these are run in turn, only going on to the next stage once the
+ # Deferred from the previous stage has fired.
+ call_log = []
+ a = defer.Deferred()
+ a.addCallback(lambda x: call_log.append('a'))
+ b = defer.Deferred()
+ b.addCallback(lambda x: call_log.append('b'))
+ c = defer.Deferred()
+ c.addCallback(lambda x: call_log.append('c'))
+ class SomeCase(TestCase):
+ def setUp(self):
+ super(SomeCase, self).setUp()
+ call_log.append('setUp')
+ return a
+ def test_success(self):
+ call_log.append('test')
+ return b
+ def tearDown(self):
+ super(SomeCase, self).tearDown()
+ call_log.append('tearDown')
+ return c
+ test = SomeCase('test_success')
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout)
+ result = self.make_result()
+ reactor = self.make_reactor()
+ def fire_a():
+ self.assertThat(call_log, Equals(['setUp']))
+ a.callback(None)
+ def fire_b():
+ self.assertThat(call_log, Equals(['setUp', 'a', 'test']))
+ b.callback(None)
+ def fire_c():
+ self.assertThat(
+ call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown']))
+ c.callback(None)
+ reactor.callLater(timeout * 0.25, fire_a)
+ reactor.callLater(timeout * 0.5, fire_b)
+ reactor.callLater(timeout * 0.75, fire_c)
+ runner.run(result)
+ self.assertThat(
+ call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown', 'c']))
+
+ def test_async_cleanups(self):
+ # Cleanups added with addCleanup can return
+ # Deferreds. AsynchronousDeferredRunTest will run each of them in
+ # turn.
+ class SomeCase(TestCase):
+ def test_whatever(self):
+ pass
+ test = SomeCase('test_whatever')
+ call_log = []
+ a = defer.Deferred().addCallback(lambda x: call_log.append('a'))
+ b = defer.Deferred().addCallback(lambda x: call_log.append('b'))
+ c = defer.Deferred().addCallback(lambda x: call_log.append('c'))
+ test.addCleanup(lambda: a)
+ test.addCleanup(lambda: b)
+ test.addCleanup(lambda: c)
+ def fire_a():
+ self.assertThat(call_log, Equals([]))
+ a.callback(None)
+ def fire_b():
+ self.assertThat(call_log, Equals(['a']))
+ b.callback(None)
+ def fire_c():
+ self.assertThat(call_log, Equals(['a', 'b']))
+ c.callback(None)
+ timeout = self.make_timeout()
+ reactor = self.make_reactor()
+ reactor.callLater(timeout * 0.25, fire_a)
+ reactor.callLater(timeout * 0.5, fire_b)
+ reactor.callLater(timeout * 0.75, fire_c)
+ runner = self.make_runner(test, timeout)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(call_log, Equals(['a', 'b', 'c']))
+
+ def test_clean_reactor(self):
+ # If there's cruft left over in the reactor, the test fails.
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ class SomeCase(TestCase):
+ def test_cruft(self):
+ reactor.callLater(timeout * 10.0, lambda: None)
+ test = SomeCase('test_cruft')
+ runner = self.make_runner(test, timeout)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals(
+ [('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+ def test_exports_reactor(self):
+ # The reactor is set as an attribute on the test case.
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ class SomeCase(TestCase):
+ def test_cruft(self):
+ self.assertIs(reactor, self.reactor)
+ test = SomeCase('test_cruft')
+ runner = self.make_runner(test, timeout)
+ result = TestResult()
+ runner.run(result)
+ self.assertEqual([], result.errors)
+ self.assertEqual([], result.failures)
+
+ def test_unhandled_error_from_deferred(self):
+ # If there's a Deferred with an unhandled error, the test fails. Each
+ # unhandled error is reported with a separate traceback.
+ class SomeCase(TestCase):
+ def test_cruft(self):
+ # Note we aren't returning the Deferred so that the error will
+ # be unhandled.
+ defer.maybeDeferred(lambda: 1/0)
+ defer.maybeDeferred(lambda: 2/0)
+ test = SomeCase('test_cruft')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ error = result._events[1][2]
+ result._events[1] = ('addError', test, None)
+ self.assertThat(result._events, Equals(
+ [('startTest', test),
+ ('addError', test, None),
+ ('stopTest', test)]))
+ self.assertThat(
+ error, KeysEqual(
+ 'twisted-log',
+ 'unhandled-error-in-deferred',
+ 'unhandled-error-in-deferred-1',
+ ))
+
+ def test_unhandled_error_from_deferred_combined_with_error(self):
+ # If there's a Deferred with an unhandled error, the test fails. Each
+ # unhandled error is reported with a separate traceback, and the error
+ # is still reported.
+ class SomeCase(TestCase):
+ def test_cruft(self):
+ # Note we aren't returning the Deferred so that the error will
+ # be unhandled.
+ defer.maybeDeferred(lambda: 1/0)
+ 2 / 0
+ test = SomeCase('test_cruft')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ error = result._events[1][2]
+ result._events[1] = ('addError', test, None)
+ self.assertThat(result._events, Equals(
+ [('startTest', test),
+ ('addError', test, None),
+ ('stopTest', test)]))
+ self.assertThat(
+ error, KeysEqual(
+ 'traceback',
+ 'twisted-log',
+ 'unhandled-error-in-deferred',
+ ))
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_keyboard_interrupt_stops_test_run(self):
+ # If we get a SIGINT during a test run, the test stops and no more
+ # tests run.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ raise self.skipTest("SIGINT unavailable")
+ class SomeCase(TestCase):
+ def test_pause(self):
+ return defer.Deferred()
+ test = SomeCase('test_pause')
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout * 5)
+ result = self.make_result()
+ reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:runner.run(result),
+ Raises(MatchesException(KeyboardInterrupt)))
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_fast_keyboard_interrupt_stops_test_run(self):
+ # If we get a SIGINT during a test run, the test stops and no more
+ # tests run.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ raise self.skipTest("SIGINT unavailable")
+ class SomeCase(TestCase):
+ def test_pause(self):
+ return defer.Deferred()
+ test = SomeCase('test_pause')
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ runner = self.make_runner(test, timeout * 5)
+ result = self.make_result()
+ reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:runner.run(result),
+ Raises(MatchesException(KeyboardInterrupt)))
+
+ def test_timeout_causes_test_error(self):
+ # If a test times out, it reports itself as having failed with a
+ # TimeoutError.
+ class SomeCase(TestCase):
+ def test_pause(self):
+ return defer.Deferred()
+ test = SomeCase('test_pause')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ error = result._events[1][2]
+ self.assertThat(
+ [event[:2] for event in result._events], Equals(
+ [('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ self.assertIn('TimeoutError', str(error['traceback']))
+
+ def test_convenient_construction(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ reactor = object()
+ timeout = object()
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(reactor, timeout)
+ runner = factory(self, [handler])
+ self.assertIs(reactor, runner._reactor)
+ self.assertIs(timeout, runner._timeout)
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+
+ def test_use_convenient_factory(self):
+ # Make sure that the factory can actually be used.
+ factory = AsynchronousDeferredRunTest.make_factory()
+ class SomeCase(TestCase):
+ run_tests_with = factory
+ def test_something(self):
+ pass
+ case = SomeCase('test_something')
+ case.run()
+
+ def test_convenient_construction_default_reactor(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ reactor = object()
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(reactor=reactor)
+ runner = factory(self, [handler])
+ self.assertIs(reactor, runner._reactor)
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+
+ def test_convenient_construction_default_timeout(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ timeout = object()
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(timeout=timeout)
+ runner = factory(self, [handler])
+ self.assertIs(timeout, runner._timeout)
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+
+ def test_convenient_construction_default_debugging(self):
+ # As a convenience method, AsynchronousDeferredRunTest has a
+ # classmethod that returns an AsynchronousDeferredRunTest
+ # factory. This factory has the same API as the RunTest constructor.
+ handler = object()
+ factory = AsynchronousDeferredRunTest.make_factory(debug=True)
+ runner = factory(self, [handler])
+ self.assertIs(self, runner.case)
+ self.assertEqual([handler], runner.handlers)
+ self.assertEqual(True, runner._debug)
+
+ def test_deferred_error(self):
+ class SomeTest(TestCase):
+ def test_something(self):
+ return defer.maybeDeferred(lambda: 1/0)
+ test = SomeTest('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+ def test_only_addError_once(self):
+ # Even if the reactor is unclean and the test raises an error and the
+ # cleanups raise errors, we only called addError once per test.
+ reactor = self.make_reactor()
+ class WhenItRains(TestCase):
+ def it_pours(self):
+ # Add a dirty cleanup.
+ self.addCleanup(lambda: 3 / 0)
+ # Dirty the reactor.
+ from twisted.internet.protocol import ServerFactory
+ reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
+ # Unhandled error.
+ defer.maybeDeferred(lambda: 2 / 0)
+ # Actual error.
+ raise RuntimeError("Excess precipitation")
+ test = WhenItRains('it_pours')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(
+ error, KeysEqual(
+ 'traceback',
+ 'traceback-1',
+ 'traceback-2',
+ 'twisted-log',
+ 'unhandled-error-in-deferred',
+ ))
+
+ def test_log_err_is_error(self):
+ # An error logged during the test run is recorded as an error in the
+ # tests.
+ class LogAnError(TestCase):
+ def test_something(self):
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = failure.Failure()
+ log.err(f)
+ test = LogAnError('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('logged-error', 'twisted-log'))
+
+ def test_log_err_flushed_is_success(self):
+ # An error logged during the test run is recorded as an error in the
+ # tests.
+ class LogAnError(TestCase):
+ def test_something(self):
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = failure.Failure()
+ log.err(f)
+ flush_logged_errors(ZeroDivisionError)
+ test = LogAnError('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ result._events,
+ Equals([
+ ('startTest', test),
+ ('addSuccess', test, {'twisted-log': text_content('')}),
+ ('stopTest', test)]))
+
+ def test_log_in_details(self):
+ class LogAnError(TestCase):
+ def test_something(self):
+ log.msg("foo")
+ 1/0
+ test = LogAnError('test_something')
+ runner = self.make_runner(test)
+ result = self.make_result()
+ runner.run(result)
+ self.assertThat(
+ [event[:2] for event in result._events],
+ Equals([
+ ('startTest', test),
+ ('addError', test),
+ ('stopTest', test)]))
+ error = result._events[1][2]
+ self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+ def test_debugging_unchanged_during_test_by_default(self):
+ debugging = [(defer.Deferred.debug, DelayedCall.debug)]
+ class SomeCase(TestCase):
+ def test_debugging_enabled(self):
+ debugging.append((defer.Deferred.debug, DelayedCall.debug))
+ test = SomeCase('test_debugging_enabled')
+ runner = AsynchronousDeferredRunTest(
+ test, handlers=test.exception_handlers,
+ reactor=self.make_reactor(), timeout=self.make_timeout())
+ runner.run(self.make_result())
+ self.assertEqual(debugging[0], debugging[1])
+
+ def test_debugging_enabled_during_test_with_debug_flag(self):
+ self.patch(defer.Deferred, 'debug', False)
+ self.patch(DelayedCall, 'debug', False)
+ debugging = []
+ class SomeCase(TestCase):
+ def test_debugging_enabled(self):
+ debugging.append((defer.Deferred.debug, DelayedCall.debug))
+ test = SomeCase('test_debugging_enabled')
+ runner = AsynchronousDeferredRunTest(
+ test, handlers=test.exception_handlers,
+ reactor=self.make_reactor(), timeout=self.make_timeout(),
+ debug=True)
+ runner.run(self.make_result())
+ self.assertEqual([(True, True)], debugging)
+ self.assertEqual(False, defer.Deferred.debug)
+ self.assertEqual(False, defer.Deferred.debug)
+
+
+class TestAssertFailsWith(NeedsTwistedTestCase):
+ """Tests for `assert_fails_with`."""
+
+ if SynchronousDeferredRunTest is not None:
+ run_tests_with = SynchronousDeferredRunTest
+
+ def test_assert_fails_with_success(self):
+ # assert_fails_with fails the test if it's given a Deferred that
+ # succeeds.
+ marker = object()
+ d = assert_fails_with(defer.succeed(marker), RuntimeError)
+ def check_result(failure):
+ failure.trap(self.failureException)
+ self.assertThat(
+ str(failure.value),
+ Equals("RuntimeError not raised (%r returned)" % (marker,)))
+ d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+ return d
+
+ def test_assert_fails_with_success_multiple_types(self):
+ # assert_fails_with fails the test if it's given a Deferred that
+ # succeeds.
+ marker = object()
+ d = assert_fails_with(
+ defer.succeed(marker), RuntimeError, ZeroDivisionError)
+ def check_result(failure):
+ failure.trap(self.failureException)
+ self.assertThat(
+ str(failure.value),
+ Equals("RuntimeError, ZeroDivisionError not raised "
+ "(%r returned)" % (marker,)))
+ d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+ return d
+
+ def test_assert_fails_with_wrong_exception(self):
+ # assert_fails_with fails the test if it's given a Deferred that
+ # succeeds.
+ d = assert_fails_with(
+ defer.maybeDeferred(lambda: 1/0), RuntimeError, KeyboardInterrupt)
+ def check_result(failure):
+ failure.trap(self.failureException)
+ lines = str(failure.value).splitlines()
+ self.assertThat(
+ lines[:2],
+ Equals([
+ ("ZeroDivisionError raised instead of RuntimeError, "
+ "KeyboardInterrupt:"),
+ " Traceback (most recent call last):",
+ ]))
+ d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+ return d
+
+ def test_assert_fails_with_expected_exception(self):
+ # assert_fails_with calls back with the value of the failure if it's
+ # one of the expected types of failures.
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = failure.Failure()
+ d = assert_fails_with(defer.fail(f), ZeroDivisionError)
+ return d.addCallback(self.assertThat, Equals(f.value))
+
+ def test_custom_failure_exception(self):
+ # If assert_fails_with is passed a 'failureException' keyword
+ # argument, then it will raise that instead of `AssertionError`.
+ class CustomException(Exception):
+ pass
+ marker = object()
+ d = assert_fails_with(
+ defer.succeed(marker), RuntimeError,
+ failureException=CustomException)
+ def check_result(failure):
+ failure.trap(CustomException)
+ self.assertThat(
+ str(failure.value),
+ Equals("RuntimeError not raised (%r returned)" % (marker,)))
+ return d.addCallbacks(
+ lambda x: self.fail("Should not have succeeded"), check_result)
+
+
+class TestRunWithLogObservers(NeedsTwistedTestCase):
+
+ def test_restores_observers(self):
+ from testtools.deferredruntest import run_with_log_observers
+ from twisted.python import log
+ # Make sure there's at least one observer. This reproduces bug
+ # #926189.
+ log.addObserver(lambda *args: None)
+ observers = list(log.theLogPublisher.observers)
+ run_with_log_observers([], lambda: None)
+ self.assertEqual(observers, log.theLogPublisher.observers)
+
+
+def test_suite():
+ from unittest import TestLoader, TestSuite
+ return TestSuite(
+ [TestLoader().loadTestsFromName(__name__),
+ make_integration_tests()])
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py
new file mode 100644
index 00000000000..7bfc1fa267b
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py
@@ -0,0 +1,100 @@
+# Copyright (c) 2010-2011 Testtools authors. See LICENSE for details.
+
+"""Tests for the distutils test command logic."""
+
+from distutils.dist import Distribution
+
+from extras import try_import
+
+from testtools.compat import (
+ _b,
+ _u,
+ BytesIO,
+ )
+fixtures = try_import('fixtures')
+
+import testtools
+from testtools import TestCase
+from testtools.distutilscmd import TestCommand
+from testtools.matchers import MatchesRegex
+
+
+if fixtures:
+ class SampleTestFixture(fixtures.Fixture):
+ """Creates testtools.runexample temporarily."""
+
+ def __init__(self):
+ self.package = fixtures.PythonPackage(
+ 'runexample', [('__init__.py', _b("""
+from testtools import TestCase
+
+class TestFoo(TestCase):
+ def test_bar(self):
+ pass
+ def test_quux(self):
+ pass
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
+"""))])
+
+ def setUp(self):
+ super(SampleTestFixture, self).setUp()
+ self.useFixture(self.package)
+ testtools.__path__.append(self.package.base)
+ self.addCleanup(testtools.__path__.remove, self.package.base)
+
+
+class TestCommandTest(TestCase):
+
+ def setUp(self):
+ super(TestCommandTest, self).setUp()
+ if fixtures is None:
+ self.skipTest("Need fixtures")
+
+ def test_test_module(self):
+ self.useFixture(SampleTestFixture())
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
+ dist = Distribution()
+ dist.script_name = 'setup.py'
+ dist.script_args = ['test']
+ dist.cmdclass = {'test': TestCommand}
+ dist.command_options = {
+ 'test': {'test_module': ('command line', 'testtools.runexample')}}
+ cmd = dist.reinitialize_command('test')
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ dist.run_command('test')
+ self.assertThat(
+ stdout.getDetails()['stdout'].as_text(),
+ MatchesRegex(_u("""Tests running...
+
+Ran 2 tests in \\d.\\d\\d\\ds
+OK
+""")))
+
+ def test_test_suite(self):
+ self.useFixture(SampleTestFixture())
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
+ dist = Distribution()
+ dist.script_name = 'setup.py'
+ dist.script_args = ['test']
+ dist.cmdclass = {'test': TestCommand}
+ dist.command_options = {
+ 'test': {
+ 'test_suite': (
+ 'command line', 'testtools.runexample.test_suite')}}
+ cmd = dist.reinitialize_command('test')
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ dist.run_command('test')
+ self.assertThat(
+ stdout.getDetails()['stdout'].as_text(),
+ MatchesRegex(_u("""Tests running...
+
+Ran 2 tests in \\d.\\d\\d\\ds
+OK
+""")))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py
new file mode 100644
index 00000000000..2ccd1e853a0
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py
@@ -0,0 +1,118 @@
+# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
+
+import unittest
+
+from extras import try_import
+
+from testtools import (
+ TestCase,
+ content,
+ content_type,
+ )
+from testtools.compat import _b, _u
+from testtools.testresult.doubles import (
+ ExtendedTestResult,
+ )
+
+fixtures = try_import('fixtures')
+LoggingFixture = try_import('fixtures.tests.helpers.LoggingFixture')
+
+
+class TestFixtureSupport(TestCase):
+
+ def setUp(self):
+ super(TestFixtureSupport, self).setUp()
+ if fixtures is None or LoggingFixture is None:
+ self.skipTest("Need fixtures")
+
+ def test_useFixture(self):
+ fixture = LoggingFixture()
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ result = unittest.TestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertTrue(result.wasSuccessful())
+ self.assertEqual(['setUp', 'cleanUp'], fixture.calls)
+
+ def test_useFixture_cleanups_raise_caught(self):
+ calls = []
+ def raiser(ignored):
+ calls.append('called')
+ raise Exception('foo')
+ fixture = fixtures.FunctionFixture(lambda:None, raiser)
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ result = unittest.TestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertFalse(result.wasSuccessful())
+ self.assertEqual(['called'], calls)
+
+ def test_useFixture_details_captured(self):
+ class DetailsFixture(fixtures.Fixture):
+ def setUp(self):
+ fixtures.Fixture.setUp(self)
+ self.addCleanup(delattr, self, 'content')
+ self.content = [_b('content available until cleanUp')]
+ self.addDetail('content',
+ content.Content(content_type.UTF8_TEXT, self.get_content))
+ def get_content(self):
+ return self.content
+ fixture = DetailsFixture()
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ # Add a colliding detail (both should show up)
+ self.addDetail('content',
+ content.Content(content_type.UTF8_TEXT, lambda:[_b('foo')]))
+ result = ExtendedTestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertEqual('addSuccess', result._events[-2][0])
+ details = result._events[-2][2]
+ self.assertEqual(['content', 'content-1'], sorted(details.keys()))
+ self.assertEqual('foo', details['content'].as_text())
+ self.assertEqual('content available until cleanUp',
+ details['content-1'].as_text())
+
+ def test_useFixture_multiple_details_captured(self):
+ class DetailsFixture(fixtures.Fixture):
+ def setUp(self):
+ fixtures.Fixture.setUp(self)
+ self.addDetail('aaa', content.text_content("foo"))
+ self.addDetail('bbb', content.text_content("bar"))
+ fixture = DetailsFixture()
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ result = ExtendedTestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertEqual('addSuccess', result._events[-2][0])
+ details = result._events[-2][2]
+ self.assertEqual(['aaa', 'bbb'], sorted(details))
+ self.assertEqual(_u('foo'), details['aaa'].as_text())
+ self.assertEqual(_u('bar'), details['bbb'].as_text())
+
+ def test_useFixture_details_captured_from_setUp(self):
+ # Details added during fixture set-up are gathered even if setUp()
+ # fails with an exception.
+ class BrokenFixture(fixtures.Fixture):
+ def setUp(self):
+ fixtures.Fixture.setUp(self)
+ self.addDetail('content', content.text_content("foobar"))
+ raise Exception()
+ fixture = BrokenFixture()
+ class SimpleTest(TestCase):
+ def test_foo(self):
+ self.useFixture(fixture)
+ result = ExtendedTestResult()
+ SimpleTest('test_foo').run(result)
+ self.assertEqual('addError', result._events[-2][0])
+ details = result._events[-2][2]
+ self.assertEqual(['content', 'traceback'], sorted(details))
+ self.assertEqual('foobar', ''.join(details['content'].iter_text()))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py
new file mode 100644
index 00000000000..848c2f0b489
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.tests.helpers import (
+ FullStackRunTest,
+ hide_testtools_stack,
+ is_stack_hidden,
+ )
+
+
+class TestStackHiding(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def setUp(self):
+ super(TestStackHiding, self).setUp()
+ self.addCleanup(hide_testtools_stack, is_stack_hidden())
+
+ def test_is_stack_hidden_consistent_true(self):
+ hide_testtools_stack(True)
+ self.assertEqual(True, is_stack_hidden())
+
+ def test_is_stack_hidden_consistent_false(self):
+ hide_testtools_stack(False)
+ self.assertEqual(False, is_stack_hidden())
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py
new file mode 100644
index 00000000000..540a2ee909f
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py
@@ -0,0 +1,167 @@
+# Copyright (c) 2010 Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""Tests for testtools.monkey."""
+
+from testtools import TestCase
+from testtools.matchers import MatchesException, Raises
+from testtools.monkey import MonkeyPatcher, patch
+
+
+class TestObj:
+
+ def __init__(self):
+ self.foo = 'foo value'
+ self.bar = 'bar value'
+ self.baz = 'baz value'
+
+
+class MonkeyPatcherTest(TestCase):
+ """
+ Tests for 'MonkeyPatcher' monkey-patching class.
+ """
+
+ def setUp(self):
+ super(MonkeyPatcherTest, self).setUp()
+ self.test_object = TestObj()
+ self.original_object = TestObj()
+ self.monkey_patcher = MonkeyPatcher()
+
+ def test_empty(self):
+ # A monkey patcher without patches doesn't change a thing.
+ self.monkey_patcher.patch()
+
+ # We can't assert that all state is unchanged, but at least we can
+ # check our test object.
+ self.assertEquals(self.original_object.foo, self.test_object.foo)
+ self.assertEquals(self.original_object.bar, self.test_object.bar)
+ self.assertEquals(self.original_object.baz, self.test_object.baz)
+
+ def test_construct_with_patches(self):
+ # Constructing a 'MonkeyPatcher' with patches adds all of the given
+ # patches to the patch list.
+ patcher = MonkeyPatcher((self.test_object, 'foo', 'haha'),
+ (self.test_object, 'bar', 'hehe'))
+ patcher.patch()
+ self.assertEquals('haha', self.test_object.foo)
+ self.assertEquals('hehe', self.test_object.bar)
+ self.assertEquals(self.original_object.baz, self.test_object.baz)
+
+ def test_patch_existing(self):
+ # Patching an attribute that exists sets it to the value defined in the
+ # patch.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ self.monkey_patcher.patch()
+ self.assertEquals(self.test_object.foo, 'haha')
+
+ def test_patch_non_existing(self):
+ # Patching a non-existing attribute sets it to the value defined in
+ # the patch.
+ self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
+ self.monkey_patcher.patch()
+ self.assertEquals(self.test_object.doesntexist, 'value')
+
+ def test_restore_non_existing(self):
+ # Restoring a value that didn't exist before the patch deletes the
+ # value.
+ self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
+ self.monkey_patcher.patch()
+ self.monkey_patcher.restore()
+ marker = object()
+ self.assertIs(marker, getattr(self.test_object, 'doesntexist', marker))
+
+ def test_patch_already_patched(self):
+ # Adding a patch for an object and attribute that already have a patch
+ # overrides the existing patch.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'BLAH')
+ self.monkey_patcher.patch()
+ self.assertEquals(self.test_object.foo, 'BLAH')
+ self.monkey_patcher.restore()
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+
+ def test_restore_twice_is_a_no_op(self):
+ # Restoring an already-restored monkey patch is a no-op.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
+ self.monkey_patcher.patch()
+ self.monkey_patcher.restore()
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+ self.monkey_patcher.restore()
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+
+ def test_run_with_patches_decoration(self):
+ # run_with_patches runs the given callable, passing in all arguments
+ # and keyword arguments, and returns the return value of the callable.
+ log = []
+
+ def f(a, b, c=None):
+ log.append((a, b, c))
+ return 'foo'
+
+ result = self.monkey_patcher.run_with_patches(f, 1, 2, c=10)
+ self.assertEquals('foo', result)
+ self.assertEquals([(1, 2, 10)], log)
+
+ def test_repeated_run_with_patches(self):
+ # We can call the same function with run_with_patches more than
+ # once. All patches apply for each call.
+ def f():
+ return (self.test_object.foo, self.test_object.bar,
+ self.test_object.baz)
+
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ result = self.monkey_patcher.run_with_patches(f)
+ self.assertEquals(
+ ('haha', self.original_object.bar, self.original_object.baz),
+ result)
+ result = self.monkey_patcher.run_with_patches(f)
+ self.assertEquals(
+ ('haha', self.original_object.bar, self.original_object.baz),
+ result)
+
+ def test_run_with_patches_restores(self):
+ # run_with_patches restores the original values after the function has
+ # executed.
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ self.assertEquals(self.original_object.foo, self.test_object.foo)
+ self.monkey_patcher.run_with_patches(lambda: None)
+ self.assertEquals(self.original_object.foo, self.test_object.foo)
+
+ def test_run_with_patches_restores_on_exception(self):
+ # run_with_patches restores the original values even when the function
+ # raises an exception.
+ def _():
+ self.assertEquals(self.test_object.foo, 'haha')
+ self.assertEquals(self.test_object.bar, 'blahblah')
+ raise RuntimeError("Something went wrong!")
+
+ self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+ self.monkey_patcher.add_patch(self.test_object, 'bar', 'blahblah')
+
+ self.assertThat(lambda:self.monkey_patcher.run_with_patches(_),
+ Raises(MatchesException(RuntimeError("Something went wrong!"))))
+ self.assertEquals(self.test_object.foo, self.original_object.foo)
+ self.assertEquals(self.test_object.bar, self.original_object.bar)
+
+
+class TestPatchHelper(TestCase):
+
+ def test_patch_patches(self):
+ # patch(obj, name, value) sets obj.name to value.
+ test_object = TestObj()
+ patch(test_object, 'foo', 42)
+ self.assertEqual(42, test_object.foo)
+
+ def test_patch_returns_cleanup(self):
+ # patch(obj, name, value) returns a nullary callable that restores obj
+ # to its original state when run.
+ test_object = TestObj()
+ original = test_object.foo
+ cleanup = patch(test_object, 'foo', 42)
+ cleanup()
+ self.assertEqual(original, test_object.foo)
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py
new file mode 100644
index 00000000000..e89ecdc26a4
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py
@@ -0,0 +1,248 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Tests for the test runner logic."""
+
+from unittest import TestSuite
+import sys
+
+from extras import try_import
+fixtures = try_import('fixtures')
+testresources = try_import('testresources')
+
+import testtools
+from testtools import TestCase, run
+from testtools.compat import (
+ _b,
+ StringIO,
+ )
+from testtools.matchers import Contains
+
+
+if fixtures:
+ class SampleTestFixture(fixtures.Fixture):
+ """Creates testtools.runexample temporarily."""
+
+ def __init__(self, broken=False):
+ """Create a SampleTestFixture.
+
+ :param broken: If True, the sample file will not be importable.
+ """
+ if not broken:
+ init_contents = _b("""\
+from testtools import TestCase
+
+class TestFoo(TestCase):
+ def test_bar(self):
+ pass
+ def test_quux(self):
+ pass
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
+""")
+ else:
+ init_contents = b"class not in\n"
+ self.package = fixtures.PythonPackage(
+ 'runexample', [('__init__.py', init_contents)])
+
+ def setUp(self):
+ super(SampleTestFixture, self).setUp()
+ self.useFixture(self.package)
+ testtools.__path__.append(self.package.base)
+ self.addCleanup(testtools.__path__.remove, self.package.base)
+ self.addCleanup(sys.modules.pop, 'testtools.runexample', None)
+
+
+if fixtures and testresources:
+ class SampleResourcedFixture(fixtures.Fixture):
+ """Creates a test suite that uses testresources."""
+
+ def __init__(self):
+ super(SampleResourcedFixture, self).__init__()
+ self.package = fixtures.PythonPackage(
+ 'resourceexample', [('__init__.py', _b("""
+from fixtures import Fixture
+from testresources import (
+ FixtureResource,
+ OptimisingTestSuite,
+ ResourcedTestCase,
+ )
+from testtools import TestCase
+
+class Printer(Fixture):
+
+ def setUp(self):
+ super(Printer, self).setUp()
+ print('Setting up Printer')
+
+ def reset(self):
+ pass
+
+class TestFoo(TestCase, ResourcedTestCase):
+ # When run, this will print just one Setting up Printer, unless the
+ # OptimisingTestSuite is not honoured, when one per test case will print.
+ resources=[('res', FixtureResource(Printer()))]
+ def test_bar(self):
+ pass
+ def test_foo(self):
+ pass
+ def test_quux(self):
+ pass
+def test_suite():
+ from unittest import TestLoader
+ return OptimisingTestSuite(TestLoader().loadTestsFromName(__name__))
+"""))])
+
+ def setUp(self):
+ super(SampleResourcedFixture, self).setUp()
+ self.useFixture(self.package)
+ self.addCleanup(testtools.__path__.remove, self.package.base)
+ testtools.__path__.append(self.package.base)
+
+
+class TestRun(TestCase):
+
+ def setUp(self):
+ super(TestRun, self).setUp()
+ if fixtures is None:
+ self.skipTest("Need fixtures")
+
+ def test_run_custom_list(self):
+ self.useFixture(SampleTestFixture())
+ tests = []
+ class CaptureList(run.TestToolsTestRunner):
+ def list(self, test):
+ tests.append(set([case.id() for case
+ in testtools.testsuite.iterate_tests(test)]))
+ out = StringIO()
+ try:
+ program = run.TestProgram(
+ argv=['prog', '-l', 'testtools.runexample.test_suite'],
+ stdout=out, testRunner=CaptureList)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual([set(['testtools.runexample.TestFoo.test_bar',
+ 'testtools.runexample.TestFoo.test_quux'])], tests)
+
+ def test_run_list(self):
+ self.useFixture(SampleTestFixture())
+ out = StringIO()
+ try:
+ run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual("""testtools.runexample.TestFoo.test_bar
+testtools.runexample.TestFoo.test_quux
+""", out.getvalue())
+
+ def test_run_list_failed_import(self):
+ if not run.have_discover:
+ self.skipTest("Need discover")
+ broken = self.useFixture(SampleTestFixture(broken=True))
+ out = StringIO()
+ exc = self.assertRaises(
+ SystemExit,
+ run.main, ['prog', 'discover', '-l', broken.package.base, '*.py'], out)
+ self.assertEqual(2, exc.args[0])
+ self.assertEqual("""Failed to import
+runexample.__init__
+""", out.getvalue())
+
+ def test_run_orders_tests(self):
+ self.useFixture(SampleTestFixture())
+ out = StringIO()
+ # We load two tests - one that exists and one that doesn't, and we
+ # should get the one that exists and neither the one that doesn't nor
+ # the unmentioned one that does.
+ tempdir = self.useFixture(fixtures.TempDir())
+ tempname = tempdir.path + '/tests.list'
+ f = open(tempname, 'wb')
+ try:
+ f.write(_b("""
+testtools.runexample.TestFoo.test_bar
+testtools.runexample.missingtest
+"""))
+ finally:
+ f.close()
+ try:
+ run.main(['prog', '-l', '--load-list', tempname,
+ 'testtools.runexample.test_suite'], out)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual("""testtools.runexample.TestFoo.test_bar
+""", out.getvalue())
+
+ def test_run_load_list(self):
+ self.useFixture(SampleTestFixture())
+ out = StringIO()
+ # We load two tests - one that exists and one that doesn't, and we
+ # should get the one that exists and neither the one that doesn't nor
+ # the unmentioned one that does.
+ tempdir = self.useFixture(fixtures.TempDir())
+ tempname = tempdir.path + '/tests.list'
+ f = open(tempname, 'wb')
+ try:
+ f.write(_b("""
+testtools.runexample.TestFoo.test_bar
+testtools.runexample.missingtest
+"""))
+ finally:
+ f.close()
+ try:
+ run.main(['prog', '-l', '--load-list', tempname,
+ 'testtools.runexample.test_suite'], out)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual("""testtools.runexample.TestFoo.test_bar
+""", out.getvalue())
+
+ def test_load_list_preserves_custom_suites(self):
+ if testresources is None:
+ self.skipTest("Need testresources")
+ self.useFixture(SampleResourcedFixture())
+ # We load two tests, not loading one. Both share a resource, so we
+ # should see just one resource setup occur.
+ tempdir = self.useFixture(fixtures.TempDir())
+ tempname = tempdir.path + '/tests.list'
+ f = open(tempname, 'wb')
+ try:
+ f.write(_b("""
+testtools.resourceexample.TestFoo.test_bar
+testtools.resourceexample.TestFoo.test_foo
+"""))
+ finally:
+ f.close()
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ try:
+ run.main(['prog', '--load-list', tempname,
+ 'testtools.resourceexample.test_suite'], stdout.stream)
+ except SystemExit:
+ # Evil resides in TestProgram.
+ pass
+ out = stdout.getDetails()['stdout'].as_text()
+ self.assertEqual(1, out.count('Setting up Printer'), "%r" % out)
+
+ def test_run_failfast(self):
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
+
+ class Failing(TestCase):
+ def test_a(self):
+ self.fail('a')
+ def test_b(self):
+ self.fail('b')
+ runner = run.TestToolsTestRunner(failfast=True)
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ runner.run(TestSuite([Failing('test_a'), Failing('test_b')]))
+ self.assertThat(
+ stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test'))
+
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py
new file mode 100644
index 00000000000..afbb8baf395
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py
@@ -0,0 +1,303 @@
+# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+
+"""Tests for the RunTest single test execution logic."""
+
+from testtools import (
+ ExtendedToOriginalDecorator,
+ run_test_with,
+ RunTest,
+ TestCase,
+ TestResult,
+ )
+from testtools.matchers import MatchesException, Is, Raises
+from testtools.testresult.doubles import ExtendedTestResult
+from testtools.tests.helpers import FullStackRunTest
+
+
+class TestRunTest(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def make_case(self):
+ class Case(TestCase):
+ def test(self):
+ pass
+ return Case('test')
+
+ def test___init___short(self):
+ run = RunTest("bar")
+ self.assertEqual("bar", run.case)
+ self.assertEqual([], run.handlers)
+
+ def test__init____handlers(self):
+ handlers = [("quux", "baz")]
+ run = RunTest("bar", handlers)
+ self.assertEqual(handlers, run.handlers)
+
+ def test_run_with_result(self):
+ # test.run passes result down to _run_test_method.
+ log = []
+ class Case(TestCase):
+ def _run_test_method(self, result):
+ log.append(result)
+ case = Case('_run_test_method')
+ run = RunTest(case, lambda x: log.append(x))
+ result = TestResult()
+ run.run(result)
+ self.assertEqual(1, len(log))
+ self.assertEqual(result, log[0].decorated)
+
+ def test_run_no_result_manages_new_result(self):
+ log = []
+ run = RunTest(self.make_case(), lambda x: log.append(x) or x)
+ result = run.run()
+ self.assertIsInstance(result.decorated, TestResult)
+
+ def test__run_core_called(self):
+ case = self.make_case()
+ log = []
+ run = RunTest(case, lambda x: x)
+ run._run_core = lambda: log.append('foo')
+ run.run()
+ self.assertEqual(['foo'], log)
+
+ def test__run_user_does_not_catch_keyboard(self):
+ case = self.make_case()
+ def raises():
+ raise KeyboardInterrupt("yo")
+ run = RunTest(case, None)
+ run.result = ExtendedTestResult()
+ self.assertThat(lambda: run._run_user(raises),
+ Raises(MatchesException(KeyboardInterrupt)))
+ self.assertEqual([], run.result._events)
+
+ def test__run_user_calls_onException(self):
+ case = self.make_case()
+ log = []
+ def handler(exc_info):
+ log.append("got it")
+ self.assertEqual(3, len(exc_info))
+ self.assertIsInstance(exc_info[1], KeyError)
+ self.assertIs(KeyError, exc_info[0])
+ case.addOnException(handler)
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ run = RunTest(case, [(KeyError, None)])
+ run.result = ExtendedTestResult()
+ status = run._run_user(raises)
+ self.assertEqual(run.exception_caught, status)
+ self.assertEqual([], run.result._events)
+ self.assertEqual(["got it"], log)
+
+ def test__run_user_can_catch_Exception(self):
+ case = self.make_case()
+ e = Exception('Yo')
+ def raises():
+ raise e
+ log = []
+ run = RunTest(case, [(Exception, None)])
+ run.result = ExtendedTestResult()
+ status = run._run_user(raises)
+ self.assertEqual(run.exception_caught, status)
+ self.assertEqual([], run.result._events)
+ self.assertEqual([], log)
+
+ def test__run_user_uncaught_Exception_raised(self):
+ case = self.make_case()
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ log = []
+ def log_exc(self, result, err):
+ log.append((result, err))
+ run = RunTest(case, [(ValueError, log_exc)])
+ run.result = ExtendedTestResult()
+ self.assertThat(lambda: run._run_user(raises),
+ Raises(MatchesException(KeyError)))
+ self.assertEqual([], run.result._events)
+ self.assertEqual([], log)
+
+ def test__run_user_uncaught_Exception_from_exception_handler_raised(self):
+ case = self.make_case()
+ def broken_handler(exc_info):
+ # ValueError because thats what we know how to catch - and must
+ # not.
+ raise ValueError('boo')
+ case.addOnException(broken_handler)
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ log = []
+ def log_exc(self, result, err):
+ log.append((result, err))
+ run = RunTest(case, [(ValueError, log_exc)])
+ run.result = ExtendedTestResult()
+ self.assertThat(lambda: run._run_user(raises),
+ Raises(MatchesException(ValueError)))
+ self.assertEqual([], run.result._events)
+ self.assertEqual([], log)
+
+ def test__run_user_returns_result(self):
+ case = self.make_case()
+ def returns():
+ return 1
+ run = RunTest(case)
+ run.result = ExtendedTestResult()
+ self.assertEqual(1, run._run_user(returns))
+ self.assertEqual([], run.result._events)
+
+ def test__run_one_decorates_result(self):
+ log = []
+ class Run(RunTest):
+ def _run_prepared_result(self, result):
+ log.append(result)
+ return result
+ run = Run(self.make_case(), lambda x: x)
+ result = run._run_one('foo')
+ self.assertEqual([result], log)
+ self.assertIsInstance(log[0], ExtendedToOriginalDecorator)
+ self.assertEqual('foo', result.decorated)
+
+ def test__run_prepared_result_calls_start_and_stop_test(self):
+ result = ExtendedTestResult()
+ case = self.make_case()
+ run = RunTest(case, lambda x: x)
+ run.run(result)
+ self.assertEqual([
+ ('startTest', case),
+ ('addSuccess', case),
+ ('stopTest', case),
+ ], result._events)
+
+ def test__run_prepared_result_calls_stop_test_always(self):
+ result = ExtendedTestResult()
+ case = self.make_case()
+ def inner():
+ raise Exception("foo")
+ run = RunTest(case, lambda x: x)
+ run._run_core = inner
+ self.assertThat(lambda: run.run(result),
+ Raises(MatchesException(Exception("foo"))))
+ self.assertEqual([
+ ('startTest', case),
+ ('stopTest', case),
+ ], result._events)
+
+
+class CustomRunTest(RunTest):
+
+ marker = object()
+
+ def run(self, result=None):
+ return self.marker
+
+
+class TestTestCaseSupportForRunTest(TestCase):
+
+ def test_pass_custom_run_test(self):
+ class SomeCase(TestCase):
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo', runTest=CustomRunTest)
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_default_is_runTest_class_variable(self):
+ class SomeCase(TestCase):
+ run_tests_with = CustomRunTest
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_constructor_argument_overrides_class_variable(self):
+ # If a 'runTest' argument is passed to the test's constructor, that
+ # overrides the class variable.
+ marker = object()
+ class DifferentRunTest(RunTest):
+ def run(self, result=None):
+ return marker
+ class SomeCase(TestCase):
+ run_tests_with = CustomRunTest
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo', runTest=DifferentRunTest)
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(marker))
+
+ def test_decorator_for_run_test(self):
+ # Individual test methods can be marked as needing a special runner.
+ class SomeCase(TestCase):
+ @run_test_with(CustomRunTest)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_extended_decorator_for_run_test(self):
+ # Individual test methods can be marked as needing a special runner.
+ # Extra arguments can be passed to the decorator which will then be
+ # passed on to the RunTest object.
+ marker = object()
+ class FooRunTest(RunTest):
+ def __init__(self, case, handlers=None, bar=None):
+ super(FooRunTest, self).__init__(case, handlers)
+ self.bar = bar
+ def run(self, result=None):
+ return self.bar
+ class SomeCase(TestCase):
+ @run_test_with(FooRunTest, bar=marker)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(marker))
+
+ def test_works_as_inner_decorator(self):
+ # Even if run_test_with is the innermost decorator, it will be
+ # respected.
+ def wrapped(function):
+ """Silly, trivial decorator."""
+ def decorated(*args, **kwargs):
+ return function(*args, **kwargs)
+ decorated.__name__ = function.__name__
+ decorated.__dict__.update(function.__dict__)
+ return decorated
+ class SomeCase(TestCase):
+ @wrapped
+ @run_test_with(CustomRunTest)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo')
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+ def test_constructor_overrides_decorator(self):
+ # If a 'runTest' argument is passed to the test's constructor, that
+ # overrides the decorator.
+ marker = object()
+ class DifferentRunTest(RunTest):
+ def run(self, result=None):
+ return marker
+ class SomeCase(TestCase):
+ @run_test_with(CustomRunTest)
+ def test_foo(self):
+ pass
+ result = TestResult()
+ case = SomeCase('test_foo', runTest=DifferentRunTest)
+ from_run_test = case.run(result)
+ self.assertThat(from_run_test, Is(marker))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py
new file mode 100644
index 00000000000..6112252acd9
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py
@@ -0,0 +1,333 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Tests for the evil Twisted reactor-spinning we do."""
+
+import os
+import signal
+
+from extras import try_import
+
+from testtools import (
+ skipIf,
+ TestCase,
+ )
+from testtools.matchers import (
+ Equals,
+ Is,
+ MatchesException,
+ Raises,
+ )
+
+_spinner = try_import('testtools._spinner')
+
+defer = try_import('twisted.internet.defer')
+Failure = try_import('twisted.python.failure.Failure')
+
+
+class NeedsTwistedTestCase(TestCase):
+
+ def setUp(self):
+ super(NeedsTwistedTestCase, self).setUp()
+ if defer is None or Failure is None:
+ self.skipTest("Need Twisted to run")
+
+
+class TestNotReentrant(NeedsTwistedTestCase):
+
+ def test_not_reentrant(self):
+ # A function decorated as not being re-entrant will raise a
+ # _spinner.ReentryError if it is called while it is running.
+ calls = []
+ @_spinner.not_reentrant
+ def log_something():
+ calls.append(None)
+ if len(calls) < 5:
+ log_something()
+ self.assertThat(
+ log_something, Raises(MatchesException(_spinner.ReentryError)))
+ self.assertEqual(1, len(calls))
+
+ def test_deeper_stack(self):
+ calls = []
+ @_spinner.not_reentrant
+ def g():
+ calls.append(None)
+ if len(calls) < 5:
+ f()
+ @_spinner.not_reentrant
+ def f():
+ calls.append(None)
+ if len(calls) < 5:
+ g()
+ self.assertThat(f, Raises(MatchesException(_spinner.ReentryError)))
+ self.assertEqual(2, len(calls))
+
+
+class TestExtractResult(NeedsTwistedTestCase):
+
+ def test_not_fired(self):
+ # _spinner.extract_result raises _spinner.DeferredNotFired if it's
+ # given a Deferred that has not fired.
+ self.assertThat(lambda:_spinner.extract_result(defer.Deferred()),
+ Raises(MatchesException(_spinner.DeferredNotFired)))
+
+ def test_success(self):
+ # _spinner.extract_result returns the value of the Deferred if it has
+ # fired successfully.
+ marker = object()
+ d = defer.succeed(marker)
+ self.assertThat(_spinner.extract_result(d), Equals(marker))
+
+ def test_failure(self):
+ # _spinner.extract_result raises the failure's exception if it's given
+ # a Deferred that is failing.
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = Failure()
+ d = defer.fail(f)
+ self.assertThat(lambda:_spinner.extract_result(d),
+ Raises(MatchesException(ZeroDivisionError)))
+
+
+class TestTrapUnhandledErrors(NeedsTwistedTestCase):
+
+ def test_no_deferreds(self):
+ marker = object()
+ result, errors = _spinner.trap_unhandled_errors(lambda: marker)
+ self.assertEqual([], errors)
+ self.assertIs(marker, result)
+
+ def test_unhandled_error(self):
+ failures = []
+ def make_deferred_but_dont_handle():
+ try:
+ 1/0
+ except ZeroDivisionError:
+ f = Failure()
+ failures.append(f)
+ defer.fail(f)
+ result, errors = _spinner.trap_unhandled_errors(
+ make_deferred_but_dont_handle)
+ self.assertIs(None, result)
+ self.assertEqual(failures, [error.failResult for error in errors])
+
+
+class TestRunInReactor(NeedsTwistedTestCase):
+
+ def make_reactor(self):
+ from twisted.internet import reactor
+ return reactor
+
+ def make_spinner(self, reactor=None):
+ if reactor is None:
+ reactor = self.make_reactor()
+ return _spinner.Spinner(reactor)
+
+ def make_timeout(self):
+ return 0.01
+
+ def test_function_called(self):
+ # run_in_reactor actually calls the function given to it.
+ calls = []
+ marker = object()
+ self.make_spinner().run(self.make_timeout(), calls.append, marker)
+ self.assertThat(calls, Equals([marker]))
+
+ def test_return_value_returned(self):
+ # run_in_reactor returns the value returned by the function given to
+ # it.
+ marker = object()
+ result = self.make_spinner().run(self.make_timeout(), lambda: marker)
+ self.assertThat(result, Is(marker))
+
+ def test_exception_reraised(self):
+ # If the given function raises an error, run_in_reactor re-raises that
+ # error.
+ self.assertThat(
+ lambda:self.make_spinner().run(self.make_timeout(), lambda: 1/0),
+ Raises(MatchesException(ZeroDivisionError)))
+
+ def test_keyword_arguments(self):
+ # run_in_reactor passes keyword arguments on.
+ calls = []
+ function = lambda *a, **kw: calls.extend([a, kw])
+ self.make_spinner().run(self.make_timeout(), function, foo=42)
+ self.assertThat(calls, Equals([(), {'foo': 42}]))
+
+ def test_not_reentrant(self):
+ # run_in_reactor raises an error if it is called inside another call
+ # to run_in_reactor.
+ spinner = self.make_spinner()
+ self.assertThat(lambda: spinner.run(
+ self.make_timeout(), spinner.run, self.make_timeout(),
+ lambda: None), Raises(MatchesException(_spinner.ReentryError)))
+
+ def test_deferred_value_returned(self):
+ # If the given function returns a Deferred, run_in_reactor returns the
+ # value in the Deferred at the end of the callback chain.
+ marker = object()
+ result = self.make_spinner().run(
+ self.make_timeout(), lambda: defer.succeed(marker))
+ self.assertThat(result, Is(marker))
+
+ def test_preserve_signal_handler(self):
+ signals = ['SIGINT', 'SIGTERM', 'SIGCHLD']
+ signals = filter(
+ None, (getattr(signal, name, None) for name in signals))
+ for sig in signals:
+ self.addCleanup(signal.signal, sig, signal.getsignal(sig))
+ new_hdlrs = list(lambda *a: None for _ in signals)
+ for sig, hdlr in zip(signals, new_hdlrs):
+ signal.signal(sig, hdlr)
+ spinner = self.make_spinner()
+ spinner.run(self.make_timeout(), lambda: None)
+ self.assertEqual(new_hdlrs, map(signal.getsignal, signals))
+
+ def test_timeout(self):
+ # If the function takes too long to run, we raise a
+ # _spinner.TimeoutError.
+ timeout = self.make_timeout()
+ self.assertThat(
+ lambda:self.make_spinner().run(timeout, lambda: defer.Deferred()),
+ Raises(MatchesException(_spinner.TimeoutError)))
+
+ def test_no_junk_by_default(self):
+ # If the reactor hasn't spun yet, then there cannot be any junk.
+ spinner = self.make_spinner()
+ self.assertThat(spinner.get_junk(), Equals([]))
+
+ def test_clean_do_nothing(self):
+ # If there's nothing going on in the reactor, then clean does nothing
+ # and returns an empty list.
+ spinner = self.make_spinner()
+ result = spinner._clean()
+ self.assertThat(result, Equals([]))
+
+ def test_clean_delayed_call(self):
+ # If there's a delayed call in the reactor, then clean cancels it and
+ # returns an empty list.
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ call = reactor.callLater(10, lambda: None)
+ results = spinner._clean()
+ self.assertThat(results, Equals([call]))
+ self.assertThat(call.active(), Equals(False))
+
+ def test_clean_delayed_call_cancelled(self):
+ # If there's a delayed call that's just been cancelled, then it's no
+ # longer there.
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ call = reactor.callLater(10, lambda: None)
+ call.cancel()
+ results = spinner._clean()
+ self.assertThat(results, Equals([]))
+
+ def test_clean_selectables(self):
+ # If there's still a selectable (e.g. a listening socket), then
+ # clean() removes it from the reactor's registry.
+ #
+ # Note that the socket is left open. This emulates a bug in trial.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ port = reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
+ spinner.run(self.make_timeout(), lambda: None)
+ results = spinner.get_junk()
+ self.assertThat(results, Equals([port]))
+
+ def test_clean_running_threads(self):
+ import threading
+ import time
+ current_threads = list(threading.enumerate())
+ reactor = self.make_reactor()
+ timeout = self.make_timeout()
+ spinner = self.make_spinner(reactor)
+ spinner.run(timeout, reactor.callInThread, time.sleep, timeout / 2.0)
+ # Python before 2.5 has a race condition with thread handling where
+ # join() does not remove threads from enumerate before returning - the
+ # thread being joined does the removal. This was fixed in Python 2.5
+ # but we still support 2.4, so we have to workaround the issue.
+ # http://bugs.python.org/issue1703448.
+ self.assertThat(
+ [thread for thread in threading.enumerate() if thread.isAlive()],
+ Equals(current_threads))
+
+ def test_leftover_junk_available(self):
+ # If 'run' is given a function that leaves the reactor dirty in some
+ # way, 'run' will clean up the reactor and then store information
+ # about the junk. This information can be got using get_junk.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ port = spinner.run(
+ self.make_timeout(), reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
+ self.assertThat(spinner.get_junk(), Equals([port]))
+
+ def test_will_not_run_with_previous_junk(self):
+ # If 'run' is called and there's still junk in the spinner's junk
+ # list, then the spinner will refuse to run.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
+ self.assertThat(lambda: spinner.run(timeout, lambda: None),
+ Raises(MatchesException(_spinner.StaleJunkError)))
+
+ def test_clear_junk_clears_previous_junk(self):
+ # If 'run' is called and there's still junk in the spinner's junk
+ # list, then the spinner will refuse to run.
+ from twisted.internet.protocol import ServerFactory
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
+ junk = spinner.clear_junk()
+ self.assertThat(junk, Equals([port]))
+ self.assertThat(spinner.get_junk(), Equals([]))
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_sigint_raises_no_result_error(self):
+ # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ self.skipTest("SIGINT not available")
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
+ Raises(MatchesException(_spinner.NoResultError)))
+ self.assertEqual([], spinner._clean())
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_sigint_raises_no_result_error_second_time(self):
+ # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+ # This test is exactly the same as test_sigint_raises_no_result_error,
+ # and exists to make sure we haven't futzed with state.
+ self.test_sigint_raises_no_result_error()
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_fast_sigint_raises_no_result_error(self):
+ # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+ SIGINT = getattr(signal, 'SIGINT', None)
+ if not SIGINT:
+ self.skipTest("SIGINT not available")
+ reactor = self.make_reactor()
+ spinner = self.make_spinner(reactor)
+ timeout = self.make_timeout()
+ reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
+ self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
+ Raises(MatchesException(_spinner.NoResultError)))
+ self.assertEqual([], spinner._clean())
+
+ @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+ def test_fast_sigint_raises_no_result_error_second_time(self):
+ self.test_fast_sigint_raises_no_result_error()
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py
new file mode 100644
index 00000000000..5010f9ac12c
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2012 testtools developers. See LICENSE for details.
+
+"""Test tag support."""
+
+
+from testtools import TestCase
+from testtools.tags import TagContext
+
+
+class TestTags(TestCase):
+
+ def test_no_tags(self):
+ # A tag context has no tags initially.
+ tag_context = TagContext()
+ self.assertEqual(set(), tag_context.get_current_tags())
+
+ def test_add_tag(self):
+ # A tag added with change_tags appears in get_current_tags.
+ tag_context = TagContext()
+ tag_context.change_tags(set(['foo']), set())
+ self.assertEqual(set(['foo']), tag_context.get_current_tags())
+
+ def test_add_tag_twice(self):
+ # Calling change_tags twice to add tags adds both tags to the current
+ # tags.
+ tag_context = TagContext()
+ tag_context.change_tags(set(['foo']), set())
+ tag_context.change_tags(set(['bar']), set())
+ self.assertEqual(
+ set(['foo', 'bar']), tag_context.get_current_tags())
+
+ def test_change_tags_returns_tags(self):
+ # change_tags returns the current tags. This is a convenience.
+ tag_context = TagContext()
+ tags = tag_context.change_tags(set(['foo']), set())
+ self.assertEqual(set(['foo']), tags)
+
+ def test_remove_tag(self):
+ # change_tags can remove tags from the context.
+ tag_context = TagContext()
+ tag_context.change_tags(set(['foo']), set())
+ tag_context.change_tags(set(), set(['foo']))
+ self.assertEqual(set(), tag_context.get_current_tags())
+
+ def test_child_context(self):
+ # A TagContext can have a parent. If so, its tags are the tags of the
+ # parent at the moment of construction.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ self.assertEqual(
+ parent.get_current_tags(), child.get_current_tags())
+
+ def test_add_to_child(self):
+ # Adding a tag to the child context doesn't affect the parent.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ child.change_tags(set(['bar']), set())
+ self.assertEqual(set(['foo', 'bar']), child.get_current_tags())
+ self.assertEqual(set(['foo']), parent.get_current_tags())
+
+ def test_remove_in_child(self):
+ # A tag that was in the parent context can be removed from the child
+ # context without affect the parent.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ child.change_tags(set(), set(['foo']))
+ self.assertEqual(set(), child.get_current_tags())
+ self.assertEqual(set(['foo']), parent.get_current_tags())
+
+ def test_parent(self):
+ # The parent can be retrieved from a child context.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ child.change_tags(set(), set(['foo']))
+ self.assertEqual(parent, child.parent)
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py
new file mode 100644
index 00000000000..680368db4a1
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py
@@ -0,0 +1,1550 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Tests for extensions to the base test library."""
+
+from doctest import ELLIPSIS
+from pprint import pformat
+import sys
+import unittest
+
+from testtools import (
+ DecorateTestCaseResult,
+ ErrorHolder,
+ MultipleExceptions,
+ PlaceHolder,
+ TestCase,
+ clone_test_with_new_id,
+ content,
+ skip,
+ skipIf,
+ skipUnless,
+ testcase,
+ )
+from testtools.compat import (
+ _b,
+ _u,
+ )
+from testtools.content import (
+ text_content,
+ TracebackContent,
+ )
+from testtools.matchers import (
+ Annotate,
+ DocTestMatches,
+ Equals,
+ HasLength,
+ MatchesException,
+ Raises,
+ )
+from testtools.testcase import (
+ attr,
+ Nullary,
+ WithAttributes,
+ )
+from testtools.testresult.doubles import (
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ )
+from testtools.tests.helpers import (
+ an_exc_info,
+ FullStackRunTest,
+ LoggingResult,
+ )
+try:
+ exec('from __future__ import with_statement')
+except SyntaxError:
+ pass
+else:
+ from testtools.tests.test_with_with import *
+
+
+class TestPlaceHolder(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ def makePlaceHolder(self, test_id="foo", short_description=None):
+ return PlaceHolder(test_id, short_description)
+
+ def test_id_comes_from_constructor(self):
+ # The id() of a PlaceHolder is whatever you pass into the constructor.
+ test = PlaceHolder("test id")
+ self.assertEqual("test id", test.id())
+
+ def test_shortDescription_is_id(self):
+ # The shortDescription() of a PlaceHolder is the id, by default.
+ test = PlaceHolder("test id")
+ self.assertEqual(test.id(), test.shortDescription())
+
+ def test_shortDescription_specified(self):
+ # If a shortDescription is provided to the constructor, then
+ # shortDescription() returns that instead.
+ test = PlaceHolder("test id", "description")
+ self.assertEqual("description", test.shortDescription())
+
+ def test_repr_just_id(self):
+ # repr(placeholder) shows you how the object was constructed.
+ test = PlaceHolder("test id")
+ self.assertEqual(
+ "<testtools.testcase.PlaceHolder('addSuccess', %s, {})>" % repr(
+ test.id()), repr(test))
+
+ def test_repr_with_description(self):
+ # repr(placeholder) shows you how the object was constructed.
+ test = PlaceHolder("test id", "description")
+ self.assertEqual(
+ "<testtools.testcase.PlaceHolder('addSuccess', %r, {}, %r)>" % (
+ test.id(), test.shortDescription()), repr(test))
+
+ def test_repr_custom_outcome(self):
+ test = PlaceHolder("test id", outcome='addSkip')
+ self.assertEqual(
+ "<testtools.testcase.PlaceHolder('addSkip', %r, {})>" % (
+ test.id()), repr(test))
+
+ def test_counts_as_one_test(self):
+ # A placeholder test counts as one test.
+ test = self.makePlaceHolder()
+ self.assertEqual(1, test.countTestCases())
+
+ def test_str_is_id(self):
+ # str(placeholder) is always the id(). We are not barbarians.
+ test = self.makePlaceHolder()
+ self.assertEqual(test.id(), str(test))
+
+ def test_runs_as_success(self):
+ # When run, a PlaceHolder test records a success.
+ test = self.makePlaceHolder()
+ log = []
+ test.run(LoggingResult(log))
+ self.assertEqual(
+ [('tags', set(), set()), ('startTest', test), ('addSuccess', test),
+ ('stopTest', test), ('tags', set(), set()),],
+ log)
+
+ def test_supplies_details(self):
+ details = {'quux':None}
+ test = PlaceHolder('foo', details=details)
+ result = ExtendedTestResult()
+ test.run(result)
+ self.assertEqual(
+ [('tags', set(), set()),
+ ('startTest', test),
+ ('addSuccess', test, details),
+ ('stopTest', test),
+ ('tags', set(), set()),
+ ],
+ result._events)
+
+ def test_supplies_timestamps(self):
+ test = PlaceHolder('foo', details={}, timestamps=["A", "B"])
+ result = ExtendedTestResult()
+ test.run(result)
+ self.assertEqual(
+ [('time', "A"),
+ ('tags', set(), set()),
+ ('startTest', test),
+ ('time', "B"),
+ ('addSuccess', test),
+ ('stopTest', test),
+ ('tags', set(), set()),
+ ],
+ result._events)
+
+ def test_call_is_run(self):
+ # A PlaceHolder can be called, in which case it behaves like run.
+ test = self.makePlaceHolder()
+ run_log = []
+ test.run(LoggingResult(run_log))
+ call_log = []
+ test(LoggingResult(call_log))
+ self.assertEqual(run_log, call_log)
+
+ def test_runs_without_result(self):
+ # A PlaceHolder can be run without a result, in which case there's no
+ # way to actually get at the result.
+ self.makePlaceHolder().run()
+
+ def test_debug(self):
+ # A PlaceHolder can be debugged.
+ self.makePlaceHolder().debug()
+
+ def test_supports_tags(self):
+ result = ExtendedTestResult()
+ tags = set(['foo', 'bar'])
+ case = PlaceHolder("foo", tags=tags)
+ case.run(result)
+ self.assertEqual([
+ ('tags', tags, set()),
+ ('startTest', case),
+ ('addSuccess', case),
+ ('stopTest', case),
+ ('tags', set(), tags),
+ ], result._events)
+
+
+class TestErrorHolder(TestCase):
+ # Note that these tests exist because ErrorHolder exists - it could be
+ # deprecated and dropped at this point.
+
+ run_test_with = FullStackRunTest
+
+ def makeException(self):
+ try:
+ raise RuntimeError("danger danger")
+ except:
+ return sys.exc_info()
+
+ def makePlaceHolder(self, test_id="foo", error=None,
+ short_description=None):
+ if error is None:
+ error = self.makeException()
+ return ErrorHolder(test_id, error, short_description)
+
+ def test_id_comes_from_constructor(self):
+ # The id() of a PlaceHolder is whatever you pass into the constructor.
+ test = ErrorHolder("test id", self.makeException())
+ self.assertEqual("test id", test.id())
+
+ def test_shortDescription_is_id(self):
+ # The shortDescription() of a PlaceHolder is the id, by default.
+ test = ErrorHolder("test id", self.makeException())
+ self.assertEqual(test.id(), test.shortDescription())
+
+ def test_shortDescription_specified(self):
+ # If a shortDescription is provided to the constructor, then
+ # shortDescription() returns that instead.
+ test = ErrorHolder("test id", self.makeException(), "description")
+ self.assertEqual("description", test.shortDescription())
+
+ def test_counts_as_one_test(self):
+ # A placeholder test counts as one test.
+ test = self.makePlaceHolder()
+ self.assertEqual(1, test.countTestCases())
+
+ def test_str_is_id(self):
+ # str(placeholder) is always the id(). We are not barbarians.
+ test = self.makePlaceHolder()
+ self.assertEqual(test.id(), str(test))
+
+ def test_runs_as_error(self):
+ # When run, an ErrorHolder test records an error.
+ error = self.makeException()
+ test = self.makePlaceHolder(error=error)
+ result = ExtendedTestResult()
+ log = result._events
+ test.run(result)
+ self.assertEqual(
+ [('tags', set(), set()),
+ ('startTest', test),
+ ('addError', test, test._details),
+ ('stopTest', test),
+ ('tags', set(), set())], log)
+
+ def test_call_is_run(self):
+ # A PlaceHolder can be called, in which case it behaves like run.
+ test = self.makePlaceHolder()
+ run_log = []
+ test.run(LoggingResult(run_log))
+ call_log = []
+ test(LoggingResult(call_log))
+ self.assertEqual(run_log, call_log)
+
+ def test_runs_without_result(self):
+ # A PlaceHolder can be run without a result, in which case there's no
+ # way to actually get at the result.
+ self.makePlaceHolder().run()
+
+ def test_debug(self):
+ # A PlaceHolder can be debugged.
+ self.makePlaceHolder().debug()
+
+
+class TestEquality(TestCase):
+ """Test ``TestCase``'s equality implementation."""
+
+ run_test_with = FullStackRunTest
+
+ def test_identicalIsEqual(self):
+ # TestCase's are equal if they are identical.
+ self.assertEqual(self, self)
+
+ def test_nonIdenticalInUnequal(self):
+ # TestCase's are not equal if they are not identical.
+ self.assertNotEqual(TestCase(methodName='run'),
+ TestCase(methodName='skip'))
+
+
+class TestAssertions(TestCase):
+ """Test assertions in TestCase."""
+
+ run_test_with = FullStackRunTest
+
+ def raiseError(self, exceptionFactory, *args, **kwargs):
+ raise exceptionFactory(*args, **kwargs)
+
+ def test_formatTypes_single(self):
+ # Given a single class, _formatTypes returns the name.
+ class Foo(object):
+ pass
+ self.assertEqual('Foo', self._formatTypes(Foo))
+
+ def test_formatTypes_multiple(self):
+ # Given multiple types, _formatTypes returns the names joined by
+ # commas.
+ class Foo(object):
+ pass
+ class Bar(object):
+ pass
+ self.assertEqual('Foo, Bar', self._formatTypes([Foo, Bar]))
+
+ def test_assertRaises(self):
+ # assertRaises asserts that a callable raises a particular exception.
+ self.assertRaises(RuntimeError, self.raiseError, RuntimeError)
+
+ def test_assertRaises_exception_w_metaclass(self):
+ # assertRaises works when called for exceptions with custom metaclasses
+ class MyExMeta(type):
+ def __init__(cls, name, bases, dct):
+ """ Do some dummy metaclass stuff """
+ dct.update({'answer': 42})
+ type.__init__(cls, name, bases, dct)
+
+ class MyEx(Exception):
+ __metaclass__ = MyExMeta
+
+ self.assertRaises(MyEx, self.raiseError, MyEx)
+
+ def test_assertRaises_fails_when_no_error_raised(self):
+ # assertRaises raises self.failureException when it's passed a
+ # callable that raises no error.
+ ret = ('orange', 42)
+ self.assertFails(
+ "<function ...<lambda> at ...> returned ('orange', 42)",
+ self.assertRaises, RuntimeError, lambda: ret)
+
+ def test_assertRaises_fails_when_different_error_raised(self):
+ # assertRaises re-raises an exception that it didn't expect.
+ self.assertThat(lambda: self.assertRaises(RuntimeError,
+ self.raiseError, ZeroDivisionError),
+ Raises(MatchesException(ZeroDivisionError)))
+
+ def test_assertRaises_returns_the_raised_exception(self):
+ # assertRaises returns the exception object that was raised. This is
+ # useful for testing that exceptions have the right message.
+
+ # This contraption stores the raised exception, so we can compare it
+ # to the return value of assertRaises.
+ raisedExceptions = []
+ def raiseError():
+ try:
+ raise RuntimeError('Deliberate error')
+ except RuntimeError:
+ raisedExceptions.append(sys.exc_info()[1])
+ raise
+
+ exception = self.assertRaises(RuntimeError, raiseError)
+ self.assertEqual(1, len(raisedExceptions))
+ self.assertTrue(
+ exception is raisedExceptions[0],
+ "%r is not %r" % (exception, raisedExceptions[0]))
+
+ def test_assertRaises_with_multiple_exceptions(self):
+ # assertRaises((ExceptionOne, ExceptionTwo), function) asserts that
+ # function raises one of ExceptionTwo or ExceptionOne.
+ expectedExceptions = (RuntimeError, ZeroDivisionError)
+ self.assertRaises(
+ expectedExceptions, self.raiseError, expectedExceptions[0])
+ self.assertRaises(
+ expectedExceptions, self.raiseError, expectedExceptions[1])
+
+ def test_assertRaises_with_multiple_exceptions_failure_mode(self):
+ # If assertRaises is called expecting one of a group of exceptions and
+ # a callable that doesn't raise an exception, then fail with an
+ # appropriate error message.
+ expectedExceptions = (RuntimeError, ZeroDivisionError)
+ self.assertRaises(
+ self.failureException,
+ self.assertRaises, expectedExceptions, lambda: None)
+ self.assertFails('<function ...<lambda> at ...> returned None',
+ self.assertRaises, expectedExceptions, lambda: None)
+
+ def test_assertRaises_function_repr_in_exception(self):
+ # When assertRaises fails, it includes the repr of the invoked
+ # function in the error message, so it's easy to locate the problem.
+ def foo():
+ """An arbitrary function."""
+ pass
+ self.assertThat(
+ lambda: self.assertRaises(Exception, foo),
+ Raises(
+ MatchesException(self.failureException, '.*%r.*' % (foo,))))
+
+ def assertFails(self, message, function, *args, **kwargs):
+ """Assert that function raises a failure with the given message."""
+ failure = self.assertRaises(
+ self.failureException, function, *args, **kwargs)
+ self.assertThat(failure, DocTestMatches(message, ELLIPSIS))
+
+ def test_assertIn_success(self):
+ # assertIn(needle, haystack) asserts that 'needle' is in 'haystack'.
+ self.assertIn(3, range(10))
+ self.assertIn('foo', 'foo bar baz')
+ self.assertIn('foo', 'foo bar baz'.split())
+
+ def test_assertIn_failure(self):
+ # assertIn(needle, haystack) fails the test when 'needle' is not in
+ # 'haystack'.
+ self.assertFails('3 not in [0, 1, 2]', self.assertIn, 3, [0, 1, 2])
+ self.assertFails(
+ '%r not in %r' % ('qux', 'foo bar baz'),
+ self.assertIn, 'qux', 'foo bar baz')
+
+ def test_assertNotIn_success(self):
+ # assertNotIn(needle, haystack) asserts that 'needle' is not in
+ # 'haystack'.
+ self.assertNotIn(3, [0, 1, 2])
+ self.assertNotIn('qux', 'foo bar baz')
+
+ def test_assertNotIn_failure(self):
+ # assertNotIn(needle, haystack) fails the test when 'needle' is in
+ # 'haystack'.
+ self.assertFails('[1, 2, 3] matches Contains(3)', self.assertNotIn,
+ 3, [1, 2, 3])
+ self.assertFails(
+ "'foo bar baz' matches Contains('foo')",
+ self.assertNotIn, 'foo', 'foo bar baz')
+
+ def test_assertIsInstance(self):
+ # assertIsInstance asserts that an object is an instance of a class.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ foo = Foo()
+ self.assertIsInstance(foo, Foo)
+
+ def test_assertIsInstance_multiple_classes(self):
+ # assertIsInstance asserts that an object is an instance of one of a
+ # group of classes.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ class Bar(object):
+ """Another simple class for testing assertIsInstance."""
+
+ foo = Foo()
+ self.assertIsInstance(foo, (Foo, Bar))
+ self.assertIsInstance(Bar(), (Foo, Bar))
+
+ def test_assertIsInstance_failure(self):
+ # assertIsInstance(obj, klass) fails the test when obj is not an
+ # instance of klass.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ self.assertFails(
+ "'42' is not an instance of %s" % self._formatTypes(Foo),
+ self.assertIsInstance, 42, Foo)
+
+ def test_assertIsInstance_failure_multiple_classes(self):
+ # assertIsInstance(obj, (klass1, klass2)) fails the test when obj is
+ # not an instance of klass1 or klass2.
+
+ class Foo(object):
+ """Simple class for testing assertIsInstance."""
+
+ class Bar(object):
+ """Another simple class for testing assertIsInstance."""
+
+ self.assertFails(
+ "'42' is not an instance of any of (%s)" % self._formatTypes([Foo, Bar]),
+ self.assertIsInstance, 42, (Foo, Bar))
+
+ def test_assertIsInstance_overridden_message(self):
+ # assertIsInstance(obj, klass, msg) permits a custom message.
+ self.assertFails("'42' is not an instance of str: foo",
+ self.assertIsInstance, 42, str, "foo")
+
+ def test_assertIs(self):
+ # assertIs asserts that an object is identical to another object.
+ self.assertIs(None, None)
+ some_list = [42]
+ self.assertIs(some_list, some_list)
+ some_object = object()
+ self.assertIs(some_object, some_object)
+
+ def test_assertIs_fails(self):
+ # assertIs raises assertion errors if one object is not identical to
+ # another.
+ self.assertFails('None is not 42', self.assertIs, None, 42)
+ self.assertFails('[42] is not [42]', self.assertIs, [42], [42])
+
+ def test_assertIs_fails_with_message(self):
+ # assertIs raises assertion errors if one object is not identical to
+ # another, and includes a user-supplied message, if it's provided.
+ self.assertFails(
+ 'None is not 42: foo bar', self.assertIs, None, 42, 'foo bar')
+
+ def test_assertIsNot(self):
+ # assertIsNot asserts that an object is not identical to another
+ # object.
+ self.assertIsNot(None, 42)
+ self.assertIsNot([42], [42])
+ self.assertIsNot(object(), object())
+
+ def test_assertIsNot_fails(self):
+ # assertIsNot raises assertion errors if one object is identical to
+ # another.
+ self.assertFails('None matches Is(None)', self.assertIsNot, None, None)
+ some_list = [42]
+ self.assertFails(
+ '[42] matches Is([42])', self.assertIsNot, some_list, some_list)
+
+ def test_assertIsNot_fails_with_message(self):
+ # assertIsNot raises assertion errors if one object is identical to
+ # another, and includes a user-supplied message if it's provided.
+ self.assertFails(
+ 'None matches Is(None): foo bar', self.assertIsNot, None, None,
+ "foo bar")
+
+ def test_assertThat_matches_clean(self):
+ class Matcher(object):
+ def match(self, foo):
+ return None
+ self.assertThat("foo", Matcher())
+
+ def test_assertThat_mismatch_raises_description(self):
+ calls = []
+ class Mismatch(object):
+ def __init__(self, thing):
+ self.thing = thing
+ def describe(self):
+ calls.append(('describe_diff', self.thing))
+ return "object is not a thing"
+ def get_details(self):
+ return {}
+ class Matcher(object):
+ def match(self, thing):
+ calls.append(('match', thing))
+ return Mismatch(thing)
+ def __str__(self):
+ calls.append(('__str__',))
+ return "a description"
+ class Test(TestCase):
+ def test(self):
+ self.assertThat("foo", Matcher())
+ result = Test("test").run()
+ self.assertEqual([
+ ('match', "foo"),
+ ('describe_diff', "foo"),
+ ], calls)
+ self.assertFalse(result.wasSuccessful())
+
+ def test_assertThat_output(self):
+ matchee = 'foo'
+ matcher = Equals('bar')
+ expected = matcher.match(matchee).describe()
+ self.assertFails(expected, self.assertThat, matchee, matcher)
+
+ def test_assertThat_message_is_annotated(self):
+ matchee = 'foo'
+ matcher = Equals('bar')
+ expected = Annotate('woo', matcher).match(matchee).describe()
+ self.assertFails(expected, self.assertThat, matchee, matcher, 'woo')
+
+ def test_assertThat_verbose_output(self):
+ matchee = 'foo'
+ matcher = Equals('bar')
+ expected = (
+ 'Match failed. Matchee: %r\n'
+ 'Matcher: %s\n'
+ 'Difference: %s\n' % (
+ matchee,
+ matcher,
+ matcher.match(matchee).describe(),
+ ))
+ self.assertFails(
+ expected, self.assertThat, matchee, matcher, verbose=True)
+
+ def test__force_failure_fails_test(self):
+ class Test(TestCase):
+ def test_foo(self):
+ self.force_failure = True
+ self.remaining_code_run = True
+ test = Test('test_foo')
+ result = test.run()
+ self.assertFalse(result.wasSuccessful())
+ self.assertTrue(test.remaining_code_run)
+
+ def get_error_string(self, e):
+ """Get the string showing how 'e' would be formatted in test output.
+
+ This is a little bit hacky, since it's designed to give consistent
+ output regardless of Python version.
+
+ In testtools, TestResult._exc_info_to_unicode is the point of dispatch
+ between various different implementations of methods that format
+ exceptions, so that's what we have to call. However, that method cares
+ about stack traces and formats the exception class. We don't care
+ about either of these, so we take its output and parse it a little.
+ """
+ error = TracebackContent((e.__class__, e, None), self).as_text()
+ # We aren't at all interested in the traceback.
+ if error.startswith('Traceback (most recent call last):\n'):
+ lines = error.splitlines(True)[1:]
+ for i, line in enumerate(lines):
+ if not line.startswith(' '):
+ break
+ error = ''.join(lines[i:])
+ # We aren't interested in how the exception type is formatted.
+ exc_class, error = error.split(': ', 1)
+ return error
+
+ def test_assertThat_verbose_unicode(self):
+ # When assertThat is given matchees or matchers that contain non-ASCII
+ # unicode strings, we can still provide a meaningful error.
+ matchee = _u('\xa7')
+ matcher = Equals(_u('a'))
+ expected = (
+ 'Match failed. Matchee: %s\n'
+ 'Matcher: %s\n'
+ 'Difference: %s\n\n' % (
+ repr(matchee).replace("\\xa7", matchee),
+ matcher,
+ matcher.match(matchee).describe(),
+ ))
+ e = self.assertRaises(
+ self.failureException, self.assertThat, matchee, matcher,
+ verbose=True)
+ self.assertEqual(expected, self.get_error_string(e))
+
+ def test_assertEqual_nice_formatting(self):
+ message = "These things ought not be equal."
+ a = ['apple', 'banana', 'cherry']
+ b = {'Thatcher': 'One who mends roofs of straw',
+ 'Major': 'A military officer, ranked below colonel',
+ 'Blair': 'To shout loudly',
+ 'Brown': 'The colour of healthy human faeces'}
+ expected_error = '\n'.join([
+ '!=:',
+ 'reference = %s' % pformat(a),
+ 'actual = %s' % pformat(b),
+ ': ' + message,
+ ])
+ self.assertFails(expected_error, self.assertEqual, a, b, message)
+ self.assertFails(expected_error, self.assertEquals, a, b, message)
+ self.assertFails(expected_error, self.failUnlessEqual, a, b, message)
+
+ def test_assertEqual_formatting_no_message(self):
+ a = "cat"
+ b = "dog"
+ expected_error = "'cat' != 'dog'"
+ self.assertFails(expected_error, self.assertEqual, a, b)
+ self.assertFails(expected_error, self.assertEquals, a, b)
+ self.assertFails(expected_error, self.failUnlessEqual, a, b)
+
+ def test_assertEqual_non_ascii_str_with_newlines(self):
+ message = _u("Be careful mixing unicode and bytes")
+ a = "a\n\xa7\n"
+ b = "Just a longish string so the more verbose output form is used."
+ expected_error = '\n'.join([
+ '!=:',
+ "reference = '''\\",
+ 'a',
+ repr('\xa7')[1:-1],
+ "'''",
+ 'actual = %r' % (b,),
+ ': ' + message,
+ ])
+ self.assertFails(expected_error, self.assertEqual, a, b, message)
+
+ def test_assertIsNone(self):
+ self.assertIsNone(None)
+
+ expected_error = 'None is not 0'
+ self.assertFails(expected_error, self.assertIsNone, 0)
+
+ def test_assertIsNotNone(self):
+ self.assertIsNotNone(0)
+ self.assertIsNotNone("0")
+
+ expected_error = 'None matches Is(None)'
+ self.assertFails(expected_error, self.assertIsNotNone, None)
+
+
+ def test_fail_preserves_traceback_detail(self):
+ class Test(TestCase):
+ def test(self):
+ self.addDetail('traceback', text_content('foo'))
+ self.fail('bar')
+ test = Test('test')
+ result = ExtendedTestResult()
+ test.run(result)
+ self.assertEqual(set(['traceback', 'traceback-1']),
+ set(result._events[1][2].keys()))
+
+
+class TestAddCleanup(TestCase):
+ """Tests for TestCase.addCleanup."""
+
+ run_test_with = FullStackRunTest
+
+ class LoggingTest(TestCase):
+ """A test that logs calls to setUp, runTest and tearDown."""
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self._calls = ['setUp']
+
+ def brokenSetUp(self):
+ # A tearDown that deliberately fails.
+ self._calls = ['brokenSetUp']
+ raise RuntimeError('Deliberate Failure')
+
+ def runTest(self):
+ self._calls.append('runTest')
+
+ def brokenTest(self):
+ raise RuntimeError('Deliberate broken test')
+
+ def tearDown(self):
+ self._calls.append('tearDown')
+ TestCase.tearDown(self)
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self._result_calls = []
+ self.test = TestAddCleanup.LoggingTest('runTest')
+ self.logging_result = LoggingResult(self._result_calls)
+
+ def assertErrorLogEqual(self, messages):
+ self.assertEqual(messages, [call[0] for call in self._result_calls])
+
+ def assertTestLogEqual(self, messages):
+ """Assert that the call log equals 'messages'."""
+ case = self._result_calls[0][1]
+ self.assertEqual(messages, case._calls)
+
+ def logAppender(self, message):
+ """A cleanup that appends 'message' to the tests log.
+
+ Cleanups are callables that are added to a test by addCleanup. To
+ verify that our cleanups run in the right order, we add strings to a
+ list that acts as a log. This method returns a cleanup that will add
+ the given message to that log when run.
+ """
+ self.test._calls.append(message)
+
+ def test_fixture(self):
+ # A normal run of self.test logs 'setUp', 'runTest' and 'tearDown'.
+ # This test doesn't test addCleanup itself, it just sanity checks the
+ # fixture.
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
+
+ def test_cleanup_run_before_tearDown(self):
+ # Cleanup functions added with 'addCleanup' are called before tearDown
+ # runs.
+ self.test.addCleanup(self.logAppender, 'cleanup')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown', 'cleanup'])
+
+ def test_add_cleanup_called_if_setUp_fails(self):
+ # Cleanup functions added with 'addCleanup' are called even if setUp
+ # fails. Note that tearDown has a different behavior: it is only
+ # called when setUp succeeds.
+ self.test.setUp = self.test.brokenSetUp
+ self.test.addCleanup(self.logAppender, 'cleanup')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['brokenSetUp', 'cleanup'])
+
+ def test_addCleanup_called_in_reverse_order(self):
+ # Cleanup functions added with 'addCleanup' are called in reverse
+ # order.
+ #
+ # One of the main uses of addCleanup is to dynamically create
+ # resources that need some sort of explicit tearDown. Often one
+ # resource will be created in terms of another, e.g.,
+ # self.first = self.makeFirst()
+ # self.second = self.makeSecond(self.first)
+ #
+ # When this happens, we generally want to clean up the second resource
+ # before the first one, since the second depends on the first.
+ self.test.addCleanup(self.logAppender, 'first')
+ self.test.addCleanup(self.logAppender, 'second')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(
+ ['setUp', 'runTest', 'tearDown', 'second', 'first'])
+
+ def test_tearDown_runs_after_cleanup_failure(self):
+ # tearDown runs even if a cleanup function fails.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
+
+ def test_cleanups_continue_running_after_error(self):
+ # All cleanups are always run, even if one or two of them fail.
+ self.test.addCleanup(self.logAppender, 'first')
+ self.test.addCleanup(lambda: 1/0)
+ self.test.addCleanup(self.logAppender, 'second')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(
+ ['setUp', 'runTest', 'tearDown', 'second', 'first'])
+
+ def test_error_in_cleanups_are_captured(self):
+ # If a cleanup raises an error, we want to record it and fail the the
+ # test, even though we go on to run other cleanups.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.run(self.logging_result)
+ self.assertErrorLogEqual(['startTest', 'addError', 'stopTest'])
+
+ def test_keyboard_interrupt_not_caught(self):
+ # If a cleanup raises KeyboardInterrupt, it gets reraised.
+ def raiseKeyboardInterrupt():
+ raise KeyboardInterrupt()
+ self.test.addCleanup(raiseKeyboardInterrupt)
+ self.assertThat(lambda:self.test.run(self.logging_result),
+ Raises(MatchesException(KeyboardInterrupt)))
+
+ def test_all_errors_from_MultipleExceptions_reported(self):
+ # When a MultipleExceptions exception is caught, all the errors are
+ # reported.
+ def raiseMany():
+ try:
+ 1/0
+ except Exception:
+ exc_info1 = sys.exc_info()
+ try:
+ 1/0
+ except Exception:
+ exc_info2 = sys.exc_info()
+ raise MultipleExceptions(exc_info1, exc_info2)
+ self.test.addCleanup(raiseMany)
+ self.logging_result = ExtendedTestResult()
+ self.test.run(self.logging_result)
+ self.assertEqual(['startTest', 'addError', 'stopTest'],
+ [event[0] for event in self.logging_result._events])
+ self.assertEqual(set(['traceback', 'traceback-1']),
+ set(self.logging_result._events[1][2].keys()))
+
+ def test_multipleCleanupErrorsReported(self):
+ # Errors from all failing cleanups are reported as separate backtraces.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.addCleanup(lambda: 1/0)
+ self.logging_result = ExtendedTestResult()
+ self.test.run(self.logging_result)
+ self.assertEqual(['startTest', 'addError', 'stopTest'],
+ [event[0] for event in self.logging_result._events])
+ self.assertEqual(set(['traceback', 'traceback-1']),
+ set(self.logging_result._events[1][2].keys()))
+
+ def test_multipleErrorsCoreAndCleanupReported(self):
+ # Errors from all failing cleanups are reported, with stopTest,
+ # startTest inserted.
+ self.test = TestAddCleanup.LoggingTest('brokenTest')
+ self.test.addCleanup(lambda: 1/0)
+ self.test.addCleanup(lambda: 1/0)
+ self.logging_result = ExtendedTestResult()
+ self.test.run(self.logging_result)
+ self.assertEqual(['startTest', 'addError', 'stopTest'],
+ [event[0] for event in self.logging_result._events])
+ self.assertEqual(set(['traceback', 'traceback-1', 'traceback-2']),
+ set(self.logging_result._events[1][2].keys()))
+
+
+class TestWithDetails(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ def assertDetailsProvided(self, case, expected_outcome, expected_keys):
+ """Assert that when case is run, details are provided to the result.
+
+ :param case: A TestCase to run.
+ :param expected_outcome: The call that should be made.
+ :param expected_keys: The keys to look for.
+ """
+ result = ExtendedTestResult()
+ case.run(result)
+ case = result._events[0][1]
+ expected = [
+ ('startTest', case),
+ (expected_outcome, case),
+ ('stopTest', case),
+ ]
+ self.assertEqual(3, len(result._events))
+ self.assertEqual(expected[0], result._events[0])
+ self.assertEqual(expected[1], result._events[1][0:2])
+ # Checking the TB is right is rather tricky. doctest line matching
+ # would help, but 'meh'.
+ self.assertEqual(sorted(expected_keys),
+ sorted(result._events[1][2].keys()))
+ self.assertEqual(expected[-1], result._events[-1])
+
+ def get_content(self):
+ return content.Content(
+ content.ContentType("text", "foo"), lambda: [_b('foo')])
+
+
+class TestExpectedFailure(TestWithDetails):
+ """Tests for expected failures and unexpected successess."""
+
+ run_test_with = FullStackRunTest
+
+ def make_unexpected_case(self):
+ class Case(TestCase):
+ def test(self):
+ raise testcase._UnexpectedSuccess
+ case = Case('test')
+ return case
+
+ def test_raising__UnexpectedSuccess_py27(self):
+ case = self.make_unexpected_case()
+ result = Python27TestResult()
+ case.run(result)
+ case = result._events[0][1]
+ self.assertEqual([
+ ('startTest', case),
+ ('addUnexpectedSuccess', case),
+ ('stopTest', case),
+ ], result._events)
+
+ def test_raising__UnexpectedSuccess_extended(self):
+ case = self.make_unexpected_case()
+ result = ExtendedTestResult()
+ case.run(result)
+ case = result._events[0][1]
+ self.assertEqual([
+ ('startTest', case),
+ ('addUnexpectedSuccess', case, {}),
+ ('stopTest', case),
+ ], result._events)
+
+ def make_xfail_case_xfails(self):
+ content = self.get_content()
+ class Case(TestCase):
+ def test(self):
+ self.addDetail("foo", content)
+ self.expectFailure("we are sad", self.assertEqual,
+ 1, 0)
+ case = Case('test')
+ return case
+
+ def make_xfail_case_succeeds(self):
+ content = self.get_content()
+ class Case(TestCase):
+ def test(self):
+ self.addDetail("foo", content)
+ self.expectFailure("we are sad", self.assertEqual,
+ 1, 1)
+ case = Case('test')
+ return case
+
+ def test_expectFailure_KnownFailure_extended(self):
+ case = self.make_xfail_case_xfails()
+ self.assertDetailsProvided(case, "addExpectedFailure",
+ ["foo", "traceback", "reason"])
+
+ def test_expectFailure_KnownFailure_unexpected_success(self):
+ case = self.make_xfail_case_succeeds()
+ self.assertDetailsProvided(case, "addUnexpectedSuccess",
+ ["foo", "reason"])
+
+
+class TestUniqueFactories(TestCase):
+ """Tests for getUniqueString and getUniqueInteger."""
+
+ run_test_with = FullStackRunTest
+
+ def test_getUniqueInteger(self):
+ # getUniqueInteger returns an integer that increments each time you
+ # call it.
+ one = self.getUniqueInteger()
+ self.assertEqual(1, one)
+ two = self.getUniqueInteger()
+ self.assertEqual(2, two)
+
+ def test_getUniqueString(self):
+ # getUniqueString returns the current test id followed by a unique
+ # integer.
+ name_one = self.getUniqueString()
+ self.assertEqual('%s-%d' % (self.id(), 1), name_one)
+ name_two = self.getUniqueString()
+ self.assertEqual('%s-%d' % (self.id(), 2), name_two)
+
+ def test_getUniqueString_prefix(self):
+ # If getUniqueString is given an argument, it uses that argument as
+ # the prefix of the unique string, rather than the test id.
+ name_one = self.getUniqueString('foo')
+ self.assertThat(name_one, Equals('foo-1'))
+ name_two = self.getUniqueString('bar')
+ self.assertThat(name_two, Equals('bar-2'))
+
+
+class TestCloneTestWithNewId(TestCase):
+ """Tests for clone_test_with_new_id."""
+
+ run_test_with = FullStackRunTest
+
+ def test_clone_test_with_new_id(self):
+ class FooTestCase(TestCase):
+ def test_foo(self):
+ pass
+ test = FooTestCase('test_foo')
+ oldName = test.id()
+ newName = self.getUniqueString()
+ newTest = clone_test_with_new_id(test, newName)
+ self.assertEqual(newName, newTest.id())
+ self.assertEqual(oldName, test.id(),
+ "the original test instance should be unchanged.")
+
+ def test_cloned_testcase_does_not_share_details(self):
+ """A cloned TestCase does not share the details dict."""
+ class Test(TestCase):
+ def test_foo(self):
+ self.addDetail(
+ 'foo', content.Content('text/plain', lambda: 'foo'))
+ orig_test = Test('test_foo')
+ cloned_test = clone_test_with_new_id(orig_test, self.getUniqueString())
+ orig_test.run(unittest.TestResult())
+ self.assertEqual('foo', orig_test.getDetails()['foo'].iter_bytes())
+ self.assertEqual(None, cloned_test.getDetails().get('foo'))
+
+
+class TestDetailsProvided(TestWithDetails):
+
+ run_test_with = FullStackRunTest
+
+ def test_addDetail(self):
+ mycontent = self.get_content()
+ self.addDetail("foo", mycontent)
+ details = self.getDetails()
+ self.assertEqual({"foo": mycontent}, details)
+
+ def test_addError(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ 1/0
+ self.assertDetailsProvided(Case("test"), "addError",
+ ["foo", "traceback"])
+
+ def test_addFailure(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.fail('yo')
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["foo", "traceback"])
+
+ def test_addSkip(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.skip('yo')
+ self.assertDetailsProvided(Case("test"), "addSkip",
+ ["foo", "reason"])
+
+ def test_addSucccess(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.assertDetailsProvided(Case("test"), "addSuccess",
+ ["foo"])
+
+ def test_addUnexpectedSuccess(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ raise testcase._UnexpectedSuccess()
+ self.assertDetailsProvided(Case("test"), "addUnexpectedSuccess",
+ ["foo"])
+
+ def test_addDetails_from_Mismatch(self):
+ content = self.get_content()
+ class Mismatch(object):
+ def describe(self):
+ return "Mismatch"
+ def get_details(self):
+ return {"foo": content}
+ class Matcher(object):
+ def match(self, thing):
+ return Mismatch()
+ def __str__(self):
+ return "a description"
+ class Case(TestCase):
+ def test(self):
+ self.assertThat("foo", Matcher())
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["foo", "traceback"])
+
+ def test_multiple_addDetails_from_Mismatch(self):
+ content = self.get_content()
+ class Mismatch(object):
+ def describe(self):
+ return "Mismatch"
+ def get_details(self):
+ return {"foo": content, "bar": content}
+ class Matcher(object):
+ def match(self, thing):
+ return Mismatch()
+ def __str__(self):
+ return "a description"
+ class Case(TestCase):
+ def test(self):
+ self.assertThat("foo", Matcher())
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["bar", "foo", "traceback"])
+
+ def test_addDetails_with_same_name_as_key_from_get_details(self):
+ content = self.get_content()
+ class Mismatch(object):
+ def describe(self):
+ return "Mismatch"
+ def get_details(self):
+ return {"foo": content}
+ class Matcher(object):
+ def match(self, thing):
+ return Mismatch()
+ def __str__(self):
+ return "a description"
+ class Case(TestCase):
+ def test(self):
+ self.addDetail("foo", content)
+ self.assertThat("foo", Matcher())
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["foo", "foo-1", "traceback"])
+
+ def test_addDetailUniqueName_works(self):
+ content = self.get_content()
+ class Case(TestCase):
+ def test(self):
+ self.addDetailUniqueName("foo", content)
+ self.addDetailUniqueName("foo", content)
+ self.assertDetailsProvided(Case("test"), "addSuccess",
+ ["foo", "foo-1"])
+
+
+class TestSetupTearDown(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ def test_setUpNotCalled(self):
+ class DoesnotcallsetUp(TestCase):
+ def setUp(self):
+ pass
+ def test_method(self):
+ pass
+ result = unittest.TestResult()
+ DoesnotcallsetUp('test_method').run(result)
+ self.assertThat(result.errors, HasLength(1))
+ self.assertThat(result.errors[0][1],
+ DocTestMatches(
+ "...ValueError...File...testtools/tests/test_testcase.py...",
+ ELLIPSIS))
+
+ def test_tearDownNotCalled(self):
+ class DoesnotcalltearDown(TestCase):
+ def test_method(self):
+ pass
+ def tearDown(self):
+ pass
+ result = unittest.TestResult()
+ DoesnotcalltearDown('test_method').run(result)
+ self.assertThat(result.errors, HasLength(1))
+ self.assertThat(result.errors[0][1],
+ DocTestMatches(
+ "...ValueError...File...testtools/tests/test_testcase.py...",
+ ELLIPSIS))
+
+
+class TestSkipping(TestCase):
+ """Tests for skipping of tests functionality."""
+
+ run_test_with = FullStackRunTest
+
+ def test_skip_causes_skipException(self):
+ self.assertThat(lambda:self.skip("Skip this test"),
+ Raises(MatchesException(self.skipException)))
+
+ def test_can_use_skipTest(self):
+ self.assertThat(lambda:self.skipTest("Skip this test"),
+ Raises(MatchesException(self.skipException)))
+
+ def test_skip_without_reason_works(self):
+ class Test(TestCase):
+ def test(self):
+ raise self.skipException()
+ case = Test("test")
+ result = ExtendedTestResult()
+ case.run(result)
+ self.assertEqual('addSkip', result._events[1][0])
+ self.assertEqual('no reason given.',
+ result._events[1][2]['reason'].as_text())
+
+ def test_skipException_in_setup_calls_result_addSkip(self):
+ class TestThatRaisesInSetUp(TestCase):
+ def setUp(self):
+ TestCase.setUp(self)
+ self.skip("skipping this test")
+ def test_that_passes(self):
+ pass
+ calls = []
+ result = LoggingResult(calls)
+ test = TestThatRaisesInSetUp("test_that_passes")
+ test.run(result)
+ case = result._events[0][1]
+ self.assertEqual([('startTest', case),
+ ('addSkip', case, "skipping this test"), ('stopTest', case)],
+ calls)
+
+ def test_skipException_in_test_method_calls_result_addSkip(self):
+ class SkippingTest(TestCase):
+ def test_that_raises_skipException(self):
+ self.skip("skipping this test")
+ result = Python27TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ case = result._events[0][1]
+ self.assertEqual([('startTest', case),
+ ('addSkip', case, "skipping this test"), ('stopTest', case)],
+ result._events)
+
+ def test_skip__in_setup_with_old_result_object_calls_addSuccess(self):
+ class SkippingTest(TestCase):
+ def setUp(self):
+ TestCase.setUp(self)
+ raise self.skipException("skipping this test")
+ def test_that_raises_skipException(self):
+ pass
+ result = Python26TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skip_with_old_result_object_calls_addError(self):
+ class SkippingTest(TestCase):
+ def test_that_raises_skipException(self):
+ raise self.skipException("skipping this test")
+ result = Python26TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skip_decorator(self):
+ class SkippingTest(TestCase):
+ @skip("skipping this test")
+ def test_that_is_decorated_with_skip(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skip")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skipIf_decorator(self):
+ class SkippingTest(TestCase):
+ @skipIf(True, "skipping this test")
+ def test_that_is_decorated_with_skipIf(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skipIf")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skipUnless_decorator(self):
+ class SkippingTest(TestCase):
+ @skipUnless(False, "skipping this test")
+ def test_that_is_decorated_with_skipUnless(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skipUnless")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+
+class TestOnException(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ def test_default_works(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.onException(an_exc_info)
+ events.append(True)
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([True]))
+
+ def test_added_handler_works(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.addOnException(events.append)
+ self.onException(an_exc_info)
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([an_exc_info]))
+
+ def test_handler_that_raises_is_not_caught(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.addOnException(events.index)
+ self.assertThat(lambda: self.onException(an_exc_info),
+ Raises(MatchesException(ValueError)))
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([]))
+
+
+class TestPatchSupport(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ class Case(TestCase):
+ def test(self):
+ pass
+
+ def test_patch(self):
+ # TestCase.patch masks obj.attribute with the new value.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ self.assertEqual('patched', self.foo)
+
+ def test_patch_restored_after_run(self):
+ # TestCase.patch masks obj.attribute with the new value, but restores
+ # the original value after the test is finished.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ test.run()
+ self.assertEqual('original', self.foo)
+
+ def test_successive_patches_apply(self):
+ # TestCase.patch can be called multiple times per test. Each time you
+ # call it, it overrides the original value.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ test.patch(self, 'foo', 'second')
+ self.assertEqual('second', self.foo)
+
+ def test_successive_patches_restored_after_run(self):
+ # TestCase.patch restores the original value, no matter how many times
+ # it was called.
+ self.foo = 'original'
+ test = self.Case('test')
+ test.patch(self, 'foo', 'patched')
+ test.patch(self, 'foo', 'second')
+ test.run()
+ self.assertEqual('original', self.foo)
+
+ def test_patch_nonexistent_attribute(self):
+ # TestCase.patch can be used to patch a non-existent attribute.
+ test = self.Case('test')
+ test.patch(self, 'doesntexist', 'patched')
+ self.assertEqual('patched', self.doesntexist)
+
+ def test_restore_nonexistent_attribute(self):
+ # TestCase.patch can be used to patch a non-existent attribute, after
+ # the test run, the attribute is then removed from the object.
+ test = self.Case('test')
+ test.patch(self, 'doesntexist', 'patched')
+ test.run()
+ marker = object()
+ value = getattr(self, 'doesntexist', marker)
+ self.assertIs(marker, value)
+
+
+class TestTestCaseSuper(TestCase):
+
+ run_test_with = FullStackRunTest
+
+ def test_setup_uses_super(self):
+ class OtherBaseCase(unittest.TestCase):
+ setup_called = False
+ def setUp(self):
+ self.setup_called = True
+ super(OtherBaseCase, self).setUp()
+ class OurCase(TestCase, OtherBaseCase):
+ def runTest(self):
+ pass
+ test = OurCase()
+ test.setUp()
+ test.tearDown()
+ self.assertTrue(test.setup_called)
+
+ def test_teardown_uses_super(self):
+ class OtherBaseCase(unittest.TestCase):
+ teardown_called = False
+ def tearDown(self):
+ self.teardown_called = True
+ super(OtherBaseCase, self).tearDown()
+ class OurCase(TestCase, OtherBaseCase):
+ def runTest(self):
+ pass
+ test = OurCase()
+ test.setUp()
+ test.tearDown()
+ self.assertTrue(test.teardown_called)
+
+
+class TestNullary(TestCase):
+
+ def test_repr(self):
+ # The repr() of nullary is the same as the repr() of the wrapped
+ # function.
+ def foo():
+ pass
+ wrapped = Nullary(foo)
+ self.assertEqual(repr(wrapped), repr(foo))
+
+ def test_called_with_arguments(self):
+ # The function is called with the arguments given to Nullary's
+ # constructor.
+ l = []
+ def foo(*args, **kwargs):
+ l.append((args, kwargs))
+ wrapped = Nullary(foo, 1, 2, a="b")
+ wrapped()
+ self.assertEqual(l, [((1, 2), {'a': 'b'})])
+
+ def test_returns_wrapped(self):
+ # Calling Nullary returns whatever the function returns.
+ ret = object()
+ wrapped = Nullary(lambda: ret)
+ self.assertIs(ret, wrapped())
+
+ def test_raises(self):
+ # If the function raises, so does Nullary when called.
+ wrapped = Nullary(lambda: 1/0)
+ self.assertRaises(ZeroDivisionError, wrapped)
+
+
+class TestAttributes(TestCase):
+
+ def test_simple_attr(self):
+ # Adding an attr to a test changes its id().
+ class MyTest(WithAttributes, TestCase):
+ @attr('foo')
+ def test_bar(self):
+ pass
+ case = MyTest('test_bar')
+ self.assertEqual('testtools.tests.test_testcase.MyTest.test_bar[foo]',
+ case.id())
+
+ def test_multiple_attributes(self):
+ class MyTest(WithAttributes, TestCase):
+ # Not sorted here, forward or backwards.
+ @attr('foo', 'quux', 'bar')
+ def test_bar(self):
+ pass
+ case = MyTest('test_bar')
+ self.assertEqual(
+ 'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
+ case.id())
+
+ def test_multiple_attr_decorators(self):
+ class MyTest(WithAttributes, TestCase):
+ # Not sorted here, forward or backwards.
+ @attr('bar')
+ @attr('quux')
+ @attr('foo')
+ def test_bar(self):
+ pass
+ case = MyTest('test_bar')
+ self.assertEqual(
+ 'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
+ case.id())
+
+
+class TestDecorateTestCaseResult(TestCase):
+
+ def setUp(self):
+ super(TestDecorateTestCaseResult, self).setUp()
+ self.log = []
+
+ def make_result(self, result):
+ self.log.append(('result', result))
+ return LoggingResult(self.log)
+
+ def test___call__(self):
+ case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
+ case(None)
+ case('something')
+ self.assertEqual([('result', None),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ ('result', 'something'),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set())
+ ], self.log)
+
+ def test_run(self):
+ case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
+ case.run(None)
+ case.run('something')
+ self.assertEqual([('result', None),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ ('result', 'something'),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set())
+ ], self.log)
+
+ def test_before_after_hooks(self):
+ case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result,
+ before_run=lambda result: self.log.append('before'),
+ after_run=lambda result: self.log.append('after'))
+ case.run(None)
+ case(None)
+ self.assertEqual([
+ ('result', None),
+ 'before',
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ 'after',
+ ('result', None),
+ 'before',
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ 'after',
+ ], self.log)
+
+ def test_other_attribute(self):
+ orig = PlaceHolder('foo')
+ orig.thing = 'fred'
+ case = DecorateTestCaseResult(orig, self.make_result)
+ self.assertEqual('fred', case.thing)
+ self.assertRaises(AttributeError, getattr, case, 'other')
+ case.other = 'barbara'
+ self.assertEqual('barbara', orig.other)
+ del case.thing
+ self.assertRaises(AttributeError, getattr, orig, 'thing')
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py
new file mode 100644
index 00000000000..04aa0873ccd
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py
@@ -0,0 +1,2919 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Test TestResults and related things."""
+
+__metaclass__ = type
+
+import codecs
+import datetime
+import doctest
+from itertools import chain, combinations
+import os
+import shutil
+import sys
+import tempfile
+import threading
+from unittest import TestSuite
+import warnings
+
+from extras import safe_hasattr, try_imports
+
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
+from testtools import (
+ CopyStreamResult,
+ ExtendedToOriginalDecorator,
+ ExtendedToStreamDecorator,
+ MultiTestResult,
+ PlaceHolder,
+ StreamFailFast,
+ StreamResult,
+ StreamResultRouter,
+ StreamSummary,
+ StreamTagger,
+ StreamToDict,
+ StreamToExtendedDecorator,
+ StreamToQueue,
+ Tagger,
+ TestCase,
+ TestControl,
+ TestResult,
+ TestResultDecorator,
+ TestByTestResult,
+ TextTestResult,
+ ThreadsafeForwardingResult,
+ TimestampingStreamResult,
+ testresult,
+ )
+from testtools.compat import (
+ _b,
+ _get_exception_encoding,
+ _r,
+ _u,
+ advance_iterator,
+ str_is_unicode,
+ StringIO,
+ )
+from testtools.content import (
+ Content,
+ content_from_stream,
+ text_content,
+ TracebackContent,
+ )
+from testtools.content_type import ContentType, UTF8_TEXT
+from testtools.matchers import (
+ AllMatch,
+ Contains,
+ DocTestMatches,
+ Equals,
+ HasLength,
+ MatchesAny,
+ MatchesException,
+ Raises,
+ )
+from testtools.tests.helpers import (
+ an_exc_info,
+ FullStackRunTest,
+ LoggingResult,
+ run_with_stack_hidden,
+ )
+from testtools.testresult.doubles import (
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ StreamResult as LoggingStreamResult,
+ )
+from testtools.testresult.real import (
+ _details_to_str,
+ _merge_tags,
+ utc,
+ )
+
+
+def make_erroring_test():
+ class Test(TestCase):
+ def error(self):
+ 1/0
+ return Test("error")
+
+
+def make_failing_test():
+ class Test(TestCase):
+ def failed(self):
+ self.fail("yo!")
+ return Test("failed")
+
+
+def make_mismatching_test():
+ class Test(TestCase):
+ def mismatch(self):
+ self.assertEqual(1, 2)
+ return Test("mismatch")
+
+
+def make_unexpectedly_successful_test():
+ class Test(TestCase):
+ def succeeded(self):
+ self.expectFailure("yo!", lambda: None)
+ return Test("succeeded")
+
+
+def make_test():
+ class Test(TestCase):
+ def test(self):
+ pass
+ return Test("test")
+
+
+def make_exception_info(exceptionFactory, *args, **kwargs):
+ try:
+ raise exceptionFactory(*args, **kwargs)
+ except:
+ return sys.exc_info()
+
+
+class Python26Contract(object):
+
+ def test_fresh_result_is_successful(self):
+ # A result is considered successful before any tests are run.
+ result = self.makeResult()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addError_is_failure(self):
+ # addError fails the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, an_exc_info)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+ def test_addFailure_is_failure(self):
+ # addFailure fails the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, an_exc_info)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+ def test_addSuccess_is_success(self):
+ # addSuccess does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSuccess(self)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_stop_sets_shouldStop(self):
+ result = self.makeResult()
+ result.stop()
+ self.assertTrue(result.shouldStop)
+
+
+class Python27Contract(Python26Contract):
+
+ def test_addExpectedFailure(self):
+ # Calling addExpectedFailure(test, exc_info) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, an_exc_info)
+
+ def test_addExpectedFailure_is_success(self):
+ # addExpectedFailure does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, an_exc_info)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addSkipped(self):
+ # Calling addSkip(test, reason) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, _u("Skipped for some reason"))
+
+ def test_addSkip_is_success(self):
+ # addSkip does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, _u("Skipped for some reason"))
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addUnexpectedSuccess(self):
+ # Calling addUnexpectedSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+
+ def test_addUnexpectedSuccess_was_successful(self):
+ # addUnexpectedSuccess does not fail the test run in Python 2.7.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startStopTestRun(self):
+ # Calling startTestRun completes ok.
+ result = self.makeResult()
+ result.startTestRun()
+ result.stopTestRun()
+
+ def test_failfast(self):
+ result = self.makeResult()
+ result.failfast = True
+ class Failing(TestCase):
+ def test_a(self):
+ self.fail('a')
+ def test_b(self):
+ self.fail('b')
+ TestSuite([Failing('test_a'), Failing('test_b')]).run(result)
+ self.assertEqual(1, result.testsRun)
+
+
+class TagsContract(Python27Contract):
+ """Tests to ensure correct tagging behaviour.
+
+ See the subunit docs for guidelines on how this is supposed to work.
+ """
+
+ def test_no_tags_by_default(self):
+ # Results initially have no tags.
+ result = self.makeResult()
+ result.startTestRun()
+ self.assertEqual(frozenset(), result.current_tags)
+
+ def test_adding_tags(self):
+ # Tags are added using 'tags' and thus become visible in
+ # 'current_tags'.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ self.assertEqual(set(['foo']), result.current_tags)
+
+ def test_removing_tags(self):
+ # Tags are removed using 'tags'.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.tags(set(), set(['foo']))
+ self.assertEqual(set(), result.current_tags)
+
+ def test_startTestRun_resets_tags(self):
+ # startTestRun makes a new test run, and thus clears all the tags.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTestRun()
+ self.assertEqual(set(), result.current_tags)
+
+ def test_add_tags_within_test(self):
+ # Tags can be added after a test has run.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(['bar']), set())
+ self.assertEqual(set(['foo', 'bar']), result.current_tags)
+
+ def test_tags_added_in_test_are_reverted(self):
+ # Tags added during a test run are then reverted once that test has
+ # finished.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(['bar']), set())
+ result.addSuccess(self)
+ result.stopTest(self)
+ self.assertEqual(set(['foo']), result.current_tags)
+
+ def test_tags_removed_in_test(self):
+ # Tags can be removed during tests.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(), set(['foo']))
+ self.assertEqual(set(), result.current_tags)
+
+ def test_tags_removed_in_test_are_restored(self):
+ # Tags removed during tests are restored once that test has finished.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(), set(['foo']))
+ result.addSuccess(self)
+ result.stopTest(self)
+ self.assertEqual(set(['foo']), result.current_tags)
+
+
+class DetailsContract(TagsContract):
+ """Tests for the details API of TestResults."""
+
+ def test_addExpectedFailure_details(self):
+ # Calling addExpectedFailure(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, details={})
+
+ def test_addError_details(self):
+ # Calling addError(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, details={})
+
+ def test_addFailure_details(self):
+ # Calling addFailure(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, details={})
+
+ def test_addSkipped_details(self):
+ # Calling addSkip(test, reason) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, details={})
+
+ def test_addUnexpectedSuccess_details(self):
+ # Calling addUnexpectedSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self, details={})
+
+ def test_addSuccess_details(self):
+ # Calling addSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSuccess(self, details={})
+
+
+class FallbackContract(DetailsContract):
+ """When we fallback we take our policy choice to map calls.
+
+ For instance, we map unexpectedSuccess to an error code, not to success.
+ """
+
+ def test_addUnexpectedSuccess_was_successful(self):
+ # addUnexpectedSuccess fails test run in testtools.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+
+class StartTestRunContract(FallbackContract):
+ """Defines the contract for testtools policy choices.
+
+ That is things which are not simply extensions to unittest but choices we
+ have made differently.
+ """
+
+ def test_startTestRun_resets_unexpected_success(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startTestRun_resets_failure(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, an_exc_info)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startTestRun_resets_errors(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, an_exc_info)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+
+class TestTestResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return TestResult()
+
+
+class TestMultiTestResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return MultiTestResult(TestResult(), TestResult())
+
+
+class TestTextTestResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return TextTestResult(StringIO())
+
+
+class TestThreadSafeForwardingResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ result_semaphore = threading.Semaphore(1)
+ target = TestResult()
+ return ThreadsafeForwardingResult(target, result_semaphore)
+
+
+class TestExtendedTestResultContract(TestCase, StartTestRunContract):
+
+ def makeResult(self):
+ return ExtendedTestResult()
+
+
+class TestPython26TestResultContract(TestCase, Python26Contract):
+
+ def makeResult(self):
+ return Python26TestResult()
+
+
+class TestAdaptedPython26TestResultContract(TestCase, FallbackContract):
+
+ def makeResult(self):
+ return ExtendedToOriginalDecorator(Python26TestResult())
+
+
+class TestPython27TestResultContract(TestCase, Python27Contract):
+
+ def makeResult(self):
+ return Python27TestResult()
+
+
+class TestAdaptedPython27TestResultContract(TestCase, DetailsContract):
+
+ def makeResult(self):
+ return ExtendedToOriginalDecorator(Python27TestResult())
+
+
+class TestAdaptedStreamResult(TestCase, DetailsContract):
+
+ def makeResult(self):
+ return ExtendedToStreamDecorator(StreamResult())
+
+
+class TestTestResultDecoratorContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return TestResultDecorator(TestResult())
+
+
+# DetailsContract because ExtendedToStreamDecorator follows Python for
+# uxsuccess handling.
+class TestStreamToExtendedContract(TestCase, DetailsContract):
+
+ def makeResult(self):
+ return ExtendedToStreamDecorator(
+ StreamToExtendedDecorator(ExtendedTestResult()))
+
+
+class TestStreamResultContract(object):
+
+ def _make_result(self):
+ raise NotImplementedError(self._make_result)
+
+ def test_startTestRun(self):
+ result = self._make_result()
+ result.startTestRun()
+ result.stopTestRun()
+
+ def test_files(self):
+ # Test parameter combinations when files are being emitted.
+ result = self._make_result()
+ result.startTestRun()
+ self.addCleanup(result.stopTestRun)
+ now = datetime.datetime.now(utc)
+ inputs = list(dict(
+ eof=True,
+ mime_type="text/plain",
+ route_code=_u("1234"),
+ test_id=_u("foo"),
+ timestamp=now,
+ ).items())
+ param_dicts = self._power_set(inputs)
+ for kwargs in param_dicts:
+ result.status(file_name=_u("foo"), file_bytes=_b(""), **kwargs)
+ result.status(file_name=_u("foo"), file_bytes=_b("bar"), **kwargs)
+
+ def test_test_status(self):
+ # Tests non-file attachment parameter combinations.
+ result = self._make_result()
+ result.startTestRun()
+ self.addCleanup(result.stopTestRun)
+ now = datetime.datetime.now(utc)
+ args = [[_u("foo"), s] for s in ['exists', 'inprogress', 'xfail',
+ 'uxsuccess', 'success', 'fail', 'skip']]
+ inputs = list(dict(
+ runnable=False,
+ test_tags=set(['quux']),
+ route_code=_u("1234"),
+ timestamp=now,
+ ).items())
+ param_dicts = self._power_set(inputs)
+ for kwargs in param_dicts:
+ for arg in args:
+ result.status(test_id=arg[0], test_status=arg[1], **kwargs)
+
+ def _power_set(self, iterable):
+ "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
+ s = list(iterable)
+ param_dicts = []
+ for ss in chain.from_iterable(combinations(s, r) for r in range(len(s)+1)):
+ param_dicts.append(dict(ss))
+ return param_dicts
+
+
+class TestBaseStreamResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamResult()
+
+
+class TestCopyStreamResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return CopyStreamResult([StreamResult(), StreamResult()])
+
+
+class TestDoubleStreamResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return LoggingStreamResult()
+
+
+class TestExtendedToStreamDecoratorContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return ExtendedToStreamDecorator(StreamResult())
+
+
+class TestStreamSummaryResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamSummary()
+
+
+class TestStreamTaggerContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamTagger([StreamResult()], add=set(), discard=set())
+
+
+class TestStreamToDictContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamToDict(lambda x:None)
+
+
+class TestStreamToExtendedDecoratorContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamToExtendedDecorator(ExtendedTestResult())
+
+
+class TestStreamToQueueContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ queue = Queue()
+ return StreamToQueue(queue, "foo")
+
+
+class TestStreamFailFastContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamFailFast(lambda:None)
+
+
+class TestStreamResultRouterContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamResultRouter(StreamResult())
+
+
+class TestDoubleStreamResultEvents(TestCase):
+
+ def test_startTestRun(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ self.assertEqual([('startTestRun',)], result._events)
+
+ def test_stopTestRun(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ result.stopTestRun()
+ self.assertEqual([('startTestRun',), ('stopTestRun',)], result._events)
+
+ def test_file(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ now = datetime.datetime.now(utc)
+ result.status(file_name="foo", file_bytes="bar", eof=True, mime_type="text/json",
+ test_id="id", route_code='abc', timestamp=now)
+ self.assertEqual(
+ [('startTestRun',),
+ ('status', 'id', None, None, True, 'foo', 'bar', True, 'text/json', 'abc', now)],
+ result._events)
+
+ def test_status(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ now = datetime.datetime.now(utc)
+ result.status("foo", "success", test_tags=set(['tag']),
+ runnable=False, route_code='abc', timestamp=now)
+ self.assertEqual(
+ [('startTestRun',),
+ ('status', 'foo', 'success', set(['tag']), False, None, None, False, None, 'abc', now)],
+ result._events)
+
+
+class TestCopyStreamResultCopies(TestCase):
+
+ def setUp(self):
+ super(TestCopyStreamResultCopies, self).setUp()
+ self.target1 = LoggingStreamResult()
+ self.target2 = LoggingStreamResult()
+ self.targets = [self.target1._events, self.target2._events]
+ self.result = CopyStreamResult([self.target1, self.target2])
+
+ def test_startTestRun(self):
+ self.result.startTestRun()
+ self.assertThat(self.targets, AllMatch(Equals([('startTestRun',)])))
+
+ def test_stopTestRun(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertThat(self.targets,
+ AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+
+ def test_status(self):
+ self.result.startTestRun()
+ now = datetime.datetime.now(utc)
+ self.result.status("foo", "success", test_tags=set(['tag']),
+ runnable=False, file_name="foo", file_bytes=b'bar', eof=True,
+ mime_type="text/json", route_code='abc', timestamp=now)
+ self.assertThat(self.targets,
+ AllMatch(Equals([('startTestRun',),
+ ('status', 'foo', 'success', set(['tag']), False, "foo",
+ b'bar', True, "text/json", 'abc', now)
+ ])))
+
+
+class TestStreamTagger(TestCase):
+
+ def test_adding(self):
+ log = LoggingStreamResult()
+ result = StreamTagger([log], add=['foo'])
+ result.startTestRun()
+ result.status()
+ result.status(test_tags=set(['bar']))
+ result.status(test_tags=None)
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
+ ('status', None, None, set(['foo', 'bar']), True, None, None, False, None, None, None),
+ ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
+ ('stopTestRun',),
+ ], log._events)
+
+ def test_discarding(self):
+ log = LoggingStreamResult()
+ result = StreamTagger([log], discard=['foo'])
+ result.startTestRun()
+ result.status()
+ result.status(test_tags=None)
+ result.status(test_tags=set(['foo']))
+ result.status(test_tags=set(['bar']))
+ result.status(test_tags=set(['foo', 'bar']))
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status', None, None, None, True, None, None, False, None, None, None),
+ ('status', None, None, None, True, None, None, False, None, None, None),
+ ('status', None, None, None, True, None, None, False, None, None, None),
+ ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
+ ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
+ ('stopTestRun',),
+ ], log._events)
+
+
+class TestStreamToDict(TestCase):
+
+ def test_hung_test(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status('foo', 'inprogress')
+ self.assertEqual([], tests)
+ result.stopTestRun()
+ self.assertEqual([
+ {'id': 'foo', 'tags': set(), 'details': {}, 'status': 'inprogress',
+ 'timestamps': [None, None]}
+ ], tests)
+
+ def test_all_terminal_states_reported(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status('success', 'success')
+ result.status('skip', 'skip')
+ result.status('exists', 'exists')
+ result.status('fail', 'fail')
+ result.status('xfail', 'xfail')
+ result.status('uxsuccess', 'uxsuccess')
+ self.assertThat(tests, HasLength(6))
+ self.assertEqual(
+ ['success', 'skip', 'exists', 'fail', 'xfail', 'uxsuccess'],
+ [test['id'] for test in tests])
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(6))
+
+ def test_files_reported(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status(file_name="some log.txt",
+ file_bytes=_b("1234 log message"), eof=True,
+ mime_type="text/plain; charset=utf8", test_id="foo.bar")
+ result.status(file_name="another file",
+ file_bytes=_b("""Traceback..."""), test_id="foo.bar")
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(1))
+ test = tests[0]
+ self.assertEqual("foo.bar", test['id'])
+ self.assertEqual("unknown", test['status'])
+ details = test['details']
+ self.assertEqual(
+ _u("1234 log message"), details['some log.txt'].as_text())
+ self.assertEqual(
+ _b("Traceback..."),
+ _b('').join(details['another file'].iter_bytes()))
+ self.assertEqual(
+ "application/octet-stream", repr(details['another file'].content_type))
+
+ def test_bad_mime(self):
+ # Testtools was making bad mime types, this tests that the specific
+ # corruption is catered for.
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status(file_name="file", file_bytes=b'a',
+ mime_type='text/plain; charset=utf8, language=python',
+ test_id='id')
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(1))
+ test = tests[0]
+ self.assertEqual("id", test['id'])
+ details = test['details']
+ self.assertEqual(_u("a"), details['file'].as_text())
+ self.assertEqual(
+ "text/plain; charset=\"utf8\"",
+ repr(details['file'].content_type))
+
+ def test_timestamps(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status(test_id='foo', test_status='inprogress', timestamp="A")
+ result.status(test_id='foo', test_status='success', timestamp="B")
+ result.status(test_id='bar', test_status='inprogress', timestamp="C")
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(2))
+ self.assertEqual(["A", "B"], tests[0]['timestamps'])
+ self.assertEqual(["C", None], tests[1]['timestamps'])
+
+
+class TestExtendedToStreamDecorator(TestCase):
+
+ def test_explicit_time(self):
+ log = LoggingStreamResult()
+ result = ExtendedToStreamDecorator(log)
+ result.startTestRun()
+ now = datetime.datetime.now(utc)
+ result.time(now)
+ result.startTest(self)
+ result.addSuccess(self)
+ result.stopTest(self)
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status',
+ 'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
+ 'inprogress',
+ None,
+ True,
+ None,
+ None,
+ False,
+ None,
+ None,
+ now),
+ ('status',
+ 'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
+ 'success',
+ set(),
+ True,
+ None,
+ None,
+ False,
+ None,
+ None,
+ now),
+ ('stopTestRun',)], log._events)
+
+ def test_wasSuccessful_after_stopTestRun(self):
+ log = LoggingStreamResult()
+ result = ExtendedToStreamDecorator(log)
+ result.startTestRun()
+ result.status(test_id='foo', test_status='fail')
+ result.stopTestRun()
+ self.assertEqual(False, result.wasSuccessful())
+
+
+class TestStreamFailFast(TestCase):
+
+ def test_inprogress(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'inprogress')
+
+ def test_exists(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'exists')
+
+ def test_xfail(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'xfail')
+
+ def test_uxsuccess(self):
+ calls = []
+ def hook():
+ calls.append("called")
+ result = StreamFailFast(hook)
+ result.status('foo', 'uxsuccess')
+ result.status('foo', 'uxsuccess')
+ self.assertEqual(['called', 'called'], calls)
+
+ def test_success(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'success')
+
+ def test_fail(self):
+ calls = []
+ def hook():
+ calls.append("called")
+ result = StreamFailFast(hook)
+ result.status('foo', 'fail')
+ result.status('foo', 'fail')
+ self.assertEqual(['called', 'called'], calls)
+
+ def test_skip(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'skip')
+
+
+class TestStreamSummary(TestCase):
+
+ def test_attributes(self):
+ result = StreamSummary()
+ result.startTestRun()
+ self.assertEqual([], result.failures)
+ self.assertEqual([], result.errors)
+ self.assertEqual([], result.skipped)
+ self.assertEqual([], result.expectedFailures)
+ self.assertEqual([], result.unexpectedSuccesses)
+ self.assertEqual(0, result.testsRun)
+
+ def test_startTestRun(self):
+ result = StreamSummary()
+ result.startTestRun()
+ result.failures.append('x')
+ result.errors.append('x')
+ result.skipped.append('x')
+ result.expectedFailures.append('x')
+ result.unexpectedSuccesses.append('x')
+ result.testsRun = 1
+ result.startTestRun()
+ self.assertEqual([], result.failures)
+ self.assertEqual([], result.errors)
+ self.assertEqual([], result.skipped)
+ self.assertEqual([], result.expectedFailures)
+ self.assertEqual([], result.unexpectedSuccesses)
+ self.assertEqual(0, result.testsRun)
+
+ def test_wasSuccessful(self):
+ # wasSuccessful returns False if any of
+ # failures/errors is non-empty.
+ result = StreamSummary()
+ result.startTestRun()
+ self.assertEqual(True, result.wasSuccessful())
+ result.failures.append('x')
+ self.assertEqual(False, result.wasSuccessful())
+ result.startTestRun()
+ result.errors.append('x')
+ self.assertEqual(False, result.wasSuccessful())
+ result.startTestRun()
+ result.skipped.append('x')
+ self.assertEqual(True, result.wasSuccessful())
+ result.startTestRun()
+ result.expectedFailures.append('x')
+ self.assertEqual(True, result.wasSuccessful())
+ result.startTestRun()
+ result.unexpectedSuccesses.append('x')
+ self.assertEqual(True, result.wasSuccessful())
+
+ def test_stopTestRun(self):
+ result = StreamSummary()
+ # terminal successful codes.
+ result.startTestRun()
+ result.status("foo", "inprogress")
+ result.status("foo", "success")
+ result.status("bar", "skip")
+ result.status("baz", "exists")
+ result.stopTestRun()
+ self.assertEqual(True, result.wasSuccessful())
+ # Existence is terminal but doesn't count as 'running' a test.
+ self.assertEqual(2, result.testsRun)
+
+ def test_stopTestRun_inprogress_test_fails(self):
+ # Tests inprogress at stopTestRun trigger a failure.
+ result = StreamSummary()
+ result.startTestRun()
+ result.status("foo", "inprogress")
+ result.stopTestRun()
+ self.assertEqual(False, result.wasSuccessful())
+ self.assertThat(result.errors, HasLength(1))
+ self.assertEqual("foo", result.errors[0][0].id())
+ self.assertEqual("Test did not complete", result.errors[0][1])
+ # interim state detection handles route codes - while duplicate ids in
+ # one run is undesirable, it may happen (e.g. with repeated tests).
+ result.startTestRun()
+ result.status("foo", "inprogress")
+ result.status("foo", "inprogress", route_code="A")
+ result.status("foo", "success", route_code="A")
+ result.stopTestRun()
+ self.assertEqual(False, result.wasSuccessful())
+
+ def test_status_skip(self):
+ # when skip is seen, a synthetic test is reported with reason captured
+ # from the 'reason' file attachment if any.
+ result = StreamSummary()
+ result.startTestRun()
+ result.status(file_name="reason",
+ file_bytes=_b("Missing dependency"), eof=True,
+ mime_type="text/plain; charset=utf8", test_id="foo.bar")
+ result.status("foo.bar", "skip")
+ self.assertThat(result.skipped, HasLength(1))
+ self.assertEqual("foo.bar", result.skipped[0][0].id())
+ self.assertEqual(_u("Missing dependency"), result.skipped[0][1])
+
+ def _report_files(self, result):
+ result.status(file_name="some log.txt",
+ file_bytes=_b("1234 log message"), eof=True,
+ mime_type="text/plain; charset=utf8", test_id="foo.bar")
+ result.status(file_name="traceback",
+ file_bytes=_b("""Traceback (most recent call last):
+ File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
+ AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+testtools.matchers._impl.MismatchError: Differences: [
+[('startTestRun',), ('stopTestRun',)] != []
+[('startTestRun',), ('stopTestRun',)] != []
+]
+"""), eof=True, mime_type="text/plain; charset=utf8", test_id="foo.bar")
+
+ files_message = Equals(_u("""some log.txt: {{{1234 log message}}}
+
+Traceback (most recent call last):
+ File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
+ AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+testtools.matchers._impl.MismatchError: Differences: [
+[('startTestRun',), ('stopTestRun',)] != []
+[('startTestRun',), ('stopTestRun',)] != []
+]
+"""))
+
+ def test_status_fail(self):
+ # when fail is seen, a synthetic test is reported with all files
+ # attached shown as the message.
+ result = StreamSummary()
+ result.startTestRun()
+ self._report_files(result)
+ result.status("foo.bar", "fail")
+ self.assertThat(result.errors, HasLength(1))
+ self.assertEqual("foo.bar", result.errors[0][0].id())
+ self.assertThat(result.errors[0][1], self.files_message)
+
+ def test_status_xfail(self):
+ # when xfail is seen, a synthetic test is reported with all files
+ # attached shown as the message.
+ result = StreamSummary()
+ result.startTestRun()
+ self._report_files(result)
+ result.status("foo.bar", "xfail")
+ self.assertThat(result.expectedFailures, HasLength(1))
+ self.assertEqual("foo.bar", result.expectedFailures[0][0].id())
+ self.assertThat(result.expectedFailures[0][1], self.files_message)
+
+ def test_status_uxsuccess(self):
+ # when uxsuccess is seen, a synthetic test is reported.
+ result = StreamSummary()
+ result.startTestRun()
+ result.status("foo.bar", "uxsuccess")
+ self.assertThat(result.unexpectedSuccesses, HasLength(1))
+ self.assertEqual("foo.bar", result.unexpectedSuccesses[0].id())
+
+
+class TestTestControl(TestCase):
+
+ def test_default(self):
+ self.assertEqual(False, TestControl().shouldStop)
+
+ def test_stop(self):
+ control = TestControl()
+ control.stop()
+ self.assertEqual(True, control.shouldStop)
+
+
+class TestTestResult(TestCase):
+ """Tests for 'TestResult'."""
+
+ run_tests_with = FullStackRunTest
+
+ def makeResult(self):
+ """Make an arbitrary result for testing."""
+ return TestResult()
+
+ def test_addSkipped(self):
+ # Calling addSkip on a TestResult records the test that was skipped in
+ # its skip_reasons dict.
+ result = self.makeResult()
+ result.addSkip(self, _u("Skipped for some reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self]},
+ result.skip_reasons)
+ result.addSkip(self, _u("Skipped for some reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self, self]},
+ result.skip_reasons)
+ result.addSkip(self, _u("Skipped for another reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self, self],
+ _u("Skipped for another reason"):[self]},
+ result.skip_reasons)
+
+ def test_now_datetime_now(self):
+ result = self.makeResult()
+ olddatetime = testresult.real.datetime
+ def restore():
+ testresult.real.datetime = olddatetime
+ self.addCleanup(restore)
+ class Module:
+ pass
+ now = datetime.datetime.now(utc)
+ stubdatetime = Module()
+ stubdatetime.datetime = Module()
+ stubdatetime.datetime.now = lambda tz: now
+ testresult.real.datetime = stubdatetime
+ # Calling _now() looks up the time.
+ self.assertEqual(now, result._now())
+ then = now + datetime.timedelta(0, 1)
+ # Set an explicit datetime, which gets returned from then on.
+ result.time(then)
+ self.assertNotEqual(now, result._now())
+ self.assertEqual(then, result._now())
+ # go back to looking it up.
+ result.time(None)
+ self.assertEqual(now, result._now())
+
+ def test_now_datetime_time(self):
+ result = self.makeResult()
+ now = datetime.datetime.now(utc)
+ result.time(now)
+ self.assertEqual(now, result._now())
+
+ def test_traceback_formatting_without_stack_hidden(self):
+ # During the testtools test run, we show our levels of the stack,
+ # because we want to be able to use our test suite to debug our own
+ # code.
+ result = self.makeResult()
+ test = make_erroring_test()
+ test.run(result)
+ self.assertThat(
+ result.errors[0][1],
+ DocTestMatches(
+ 'Traceback (most recent call last):\n'
+ ' File "...testtools...runtest.py", line ..., in _run_user\n'
+ ' return fn(*args, **kwargs)\n'
+ ' File "...testtools...testcase.py", line ..., in _run_test_method\n'
+ ' return self._get_test_method()()\n'
+ ' File "...testtools...tests...test_testresult.py", line ..., in error\n'
+ ' 1/0\n'
+ 'ZeroDivisionError: ...\n',
+ doctest.ELLIPSIS | doctest.REPORT_UDIFF))
+
+ def test_traceback_formatting_with_stack_hidden(self):
+ result = self.makeResult()
+ test = make_erroring_test()
+ run_with_stack_hidden(True, test.run, result)
+ self.assertThat(
+ result.errors[0][1],
+ DocTestMatches(
+ 'Traceback (most recent call last):\n'
+ ' File "...testtools...tests...test_testresult.py", line ..., in error\n'
+ ' 1/0\n'
+ 'ZeroDivisionError: ...\n',
+ doctest.ELLIPSIS))
+
+ def test_traceback_formatting_with_stack_hidden_mismatch(self):
+ result = self.makeResult()
+ test = make_mismatching_test()
+ run_with_stack_hidden(True, test.run, result)
+ self.assertThat(
+ result.failures[0][1],
+ DocTestMatches(
+ 'Traceback (most recent call last):\n'
+ ' File "...testtools...tests...test_testresult.py", line ..., in mismatch\n'
+ ' self.assertEqual(1, 2)\n'
+ '...MismatchError: 1 != 2\n',
+ doctest.ELLIPSIS))
+
+ def test_exc_info_to_unicode(self):
+ # subunit upcalls to TestResult._exc_info_to_unicode, so we need to
+ # make sure that it's there.
+ #
+ # See <https://bugs.launchpad.net/testtools/+bug/929063>.
+ test = make_erroring_test()
+ exc_info = make_exception_info(RuntimeError, "foo")
+ result = self.makeResult()
+ text_traceback = result._exc_info_to_unicode(exc_info, test)
+ self.assertEqual(
+ TracebackContent(exc_info, test).as_text(), text_traceback)
+
+
+class TestMultiTestResult(TestCase):
+ """Tests for 'MultiTestResult'."""
+
+ def setUp(self):
+ super(TestMultiTestResult, self).setUp()
+ self.result1 = LoggingResult([])
+ self.result2 = LoggingResult([])
+ self.multiResult = MultiTestResult(self.result1, self.result2)
+
+ def assertResultLogsEqual(self, expectedEvents):
+ """Assert that our test results have received the expected events."""
+ self.assertEqual(expectedEvents, self.result1._events)
+ self.assertEqual(expectedEvents, self.result2._events)
+
+ def test_repr(self):
+ self.assertEqual(
+ '<MultiTestResult (%r, %r)>' % (
+ ExtendedToOriginalDecorator(self.result1),
+ ExtendedToOriginalDecorator(self.result2)),
+ repr(self.multiResult))
+
+ def test_empty(self):
+ # Initializing a `MultiTestResult` doesn't do anything to its
+ # `TestResult`s.
+ self.assertResultLogsEqual([])
+
+ def test_failfast_get(self):
+ # Reading reads from the first one - arbitrary choice.
+ self.assertEqual(False, self.multiResult.failfast)
+ self.result1.failfast = True
+ self.assertEqual(True, self.multiResult.failfast)
+
+ def test_failfast_set(self):
+ # Writing writes to all.
+ self.multiResult.failfast = True
+ self.assertEqual(True, self.result1.failfast)
+ self.assertEqual(True, self.result2.failfast)
+
+ def test_shouldStop(self):
+ self.assertFalse(self.multiResult.shouldStop)
+ self.result2.stop()
+ # NB: result1 is not stopped: MultiTestResult has to combine the
+ # values.
+ self.assertTrue(self.multiResult.shouldStop)
+
+ def test_startTest(self):
+ # Calling `startTest` on a `MultiTestResult` calls `startTest` on all
+ # its `TestResult`s.
+ self.multiResult.startTest(self)
+ self.assertResultLogsEqual([('startTest', self)])
+
+ def test_stop(self):
+ self.assertFalse(self.multiResult.shouldStop)
+ self.multiResult.stop()
+ self.assertResultLogsEqual(['stop'])
+
+ def test_stopTest(self):
+ # Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all
+ # its `TestResult`s.
+ self.multiResult.stopTest(self)
+ self.assertResultLogsEqual([('stopTest', self)])
+
+ def test_addSkipped(self):
+ # Calling `addSkip` on a `MultiTestResult` calls addSkip on its
+ # results.
+ reason = _u("Skipped for some reason")
+ self.multiResult.addSkip(self, reason)
+ self.assertResultLogsEqual([('addSkip', self, reason)])
+
+ def test_addSuccess(self):
+ # Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on
+ # all its `TestResult`s.
+ self.multiResult.addSuccess(self)
+ self.assertResultLogsEqual([('addSuccess', self)])
+
+ def test_done(self):
+ # Calling `done` on a `MultiTestResult` calls `done` on all its
+ # `TestResult`s.
+ self.multiResult.done()
+ self.assertResultLogsEqual([('done')])
+
+ def test_addFailure(self):
+ # Calling `addFailure` on a `MultiTestResult` calls `addFailure` on
+ # all its `TestResult`s.
+ exc_info = make_exception_info(AssertionError, 'failure')
+ self.multiResult.addFailure(self, exc_info)
+ self.assertResultLogsEqual([('addFailure', self, exc_info)])
+
+ def test_addError(self):
+ # Calling `addError` on a `MultiTestResult` calls `addError` on all
+ # its `TestResult`s.
+ exc_info = make_exception_info(RuntimeError, 'error')
+ self.multiResult.addError(self, exc_info)
+ self.assertResultLogsEqual([('addError', self, exc_info)])
+
+ def test_startTestRun(self):
+ # Calling `startTestRun` on a `MultiTestResult` forwards to all its
+ # `TestResult`s.
+ self.multiResult.startTestRun()
+ self.assertResultLogsEqual([('startTestRun')])
+
+ def test_stopTestRun(self):
+ # Calling `stopTestRun` on a `MultiTestResult` forwards to all its
+ # `TestResult`s.
+ self.multiResult.stopTestRun()
+ self.assertResultLogsEqual([('stopTestRun')])
+
+ def test_stopTestRun_returns_results(self):
+ # `MultiTestResult.stopTestRun` returns a tuple of all of the return
+ # values the `stopTestRun`s that it forwards to.
+ class Result(LoggingResult):
+ def stopTestRun(self):
+ super(Result, self).stopTestRun()
+ return 'foo'
+ multi_result = MultiTestResult(Result([]), Result([]))
+ result = multi_result.stopTestRun()
+ self.assertEqual(('foo', 'foo'), result)
+
+ def test_tags(self):
+ # Calling `tags` on a `MultiTestResult` calls `tags` on all its
+ # `TestResult`s.
+ added_tags = set(['foo', 'bar'])
+ removed_tags = set(['eggs'])
+ self.multiResult.tags(added_tags, removed_tags)
+ self.assertResultLogsEqual([('tags', added_tags, removed_tags)])
+
+ def test_time(self):
+ # the time call is dispatched, not eaten by the base class
+ self.multiResult.time('foo')
+ self.assertResultLogsEqual([('time', 'foo')])
+
+
+class TestTextTestResult(TestCase):
+ """Tests for 'TextTestResult'."""
+
+ def setUp(self):
+ super(TestTextTestResult, self).setUp()
+ self.result = TextTestResult(StringIO())
+
+ def getvalue(self):
+ return self.result.stream.getvalue()
+
+ def test__init_sets_stream(self):
+ result = TextTestResult("fp")
+ self.assertEqual("fp", result.stream)
+
+ def reset_output(self):
+ self.result.stream = StringIO()
+
+ def test_startTestRun(self):
+ self.result.startTestRun()
+ self.assertEqual("Tests running...\n", self.getvalue())
+
+ def test_stopTestRun_count_many(self):
+ test = make_test()
+ self.result.startTestRun()
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.result.stream = StringIO()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("\nRan 2 tests in ...s\n...", doctest.ELLIPSIS))
+
+ def test_stopTestRun_count_single(self):
+ test = make_test()
+ self.result.startTestRun()
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("\nRan 1 test in ...s\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_count_zero(self):
+ self.result.startTestRun()
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("\nRan 0 tests in ...s\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_current_time(self):
+ test = make_test()
+ now = datetime.datetime.now(utc)
+ self.result.time(now)
+ self.result.startTestRun()
+ self.result.startTest(test)
+ now = now + datetime.timedelta(0, 0, 0, 1)
+ self.result.time(now)
+ self.result.stopTest(test)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS))
+
+ def test_stopTestRun_successful(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_failure(self):
+ test = make_failing_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_error(self):
+ test = make_erroring_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_unexpected_success(self):
+ test = make_unexpectedly_successful_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_shows_details(self):
+ self.skip("Disabled per bug 1188420")
+ def run_tests():
+ self.result.startTestRun()
+ make_erroring_test().run(self.result)
+ make_unexpectedly_successful_test().run(self.result)
+ make_failing_test().run(self.result)
+ self.reset_output()
+ self.result.stopTestRun()
+ run_with_stack_hidden(True, run_tests)
+ self.assertThat(self.getvalue(),
+ DocTestMatches("""...======================================================================
+ERROR: testtools.tests.test_testresult.Test.error
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "...testtools...tests...test_testresult.py", line ..., in error
+ 1/0
+ZeroDivisionError:... divi... by zero...
+======================================================================
+FAIL: testtools.tests.test_testresult.Test.failed
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "...testtools...tests...test_testresult.py", line ..., in failed
+ self.fail("yo!")
+AssertionError: yo!
+======================================================================
+UNEXPECTED SUCCESS: testtools.tests.test_testresult.Test.succeeded
+----------------------------------------------------------------------
+...""", doctest.ELLIPSIS | doctest.REPORT_NDIFF))
+
+
+class TestThreadSafeForwardingResult(TestCase):
+ """Tests for `TestThreadSafeForwardingResult`."""
+
+ def make_results(self, n):
+ events = []
+ target = LoggingResult(events)
+ semaphore = threading.Semaphore(1)
+ return [
+ ThreadsafeForwardingResult(target, semaphore)
+ for i in range(n)], events
+
+ def test_nonforwarding_methods(self):
+ # startTest and stopTest are not forwarded because they need to be
+ # batched.
+ [result], events = self.make_results(1)
+ result.startTest(self)
+ result.stopTest(self)
+ self.assertEqual([], events)
+
+ def test_tags_not_forwarded(self):
+ # Tags need to be batched for each test, so they aren't forwarded
+ # until a test runs.
+ [result], events = self.make_results(1)
+ result.tags(set(['foo']), set(['bar']))
+ self.assertEqual([], events)
+
+ def test_global_tags_simple(self):
+ # Tags specified outside of a test result are global. When a test's
+ # results are finally forwarded, we send through these global tags
+ # *as* test specific tags, because as a multiplexer there should be no
+ # way for a global tag on an input stream to affect tests from other
+ # streams - we can just always issue test local tags.
+ [result], events = self.make_results(1)
+ result.tags(set(['foo']), set())
+ result.time(1)
+ result.startTest(self)
+ result.time(2)
+ result.addSuccess(self)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', self),
+ ('time', 2),
+ ('tags', set(['foo']), set()),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_global_tags_complex(self):
+ # Multiple calls to tags() in a global context are buffered until the
+ # next test completes and are issued as part of of the test context,
+ # because they cannot be issued until the output result is locked.
+ # The sample data shows them being merged together, this is, strictly
+ # speaking incidental - they could be issued separately (in-order) and
+ # still be legitimate.
+ [result], events = self.make_results(1)
+ result.tags(set(['foo', 'bar']), set(['baz', 'qux']))
+ result.tags(set(['cat', 'qux']), set(['bar', 'dog']))
+ result.time(1)
+ result.startTest(self)
+ result.time(2)
+ result.addSuccess(self)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', self),
+ ('time', 2),
+ ('tags', set(['cat', 'foo', 'qux']), set(['dog', 'bar', 'baz'])),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_local_tags(self):
+ # Any tags set within a test context are forwarded in that test
+ # context when the result is finally forwarded. This means that the
+ # tags for the test are part of the atomic message communicating
+ # everything about that test.
+ [result], events = self.make_results(1)
+ result.time(1)
+ result.startTest(self)
+ result.tags(set(['foo']), set([]))
+ result.tags(set(), set(['bar']))
+ result.time(2)
+ result.addSuccess(self)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', self),
+ ('time', 2),
+ ('tags', set(['foo']), set(['bar'])),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_local_tags_dont_leak(self):
+ # A tag set during a test is local to that test and is not set during
+ # the tests that follow.
+ [result], events = self.make_results(1)
+ a, b = PlaceHolder('a'), PlaceHolder('b')
+ result.time(1)
+ result.startTest(a)
+ result.tags(set(['foo']), set([]))
+ result.time(2)
+ result.addSuccess(a)
+ result.stopTest(a)
+ result.time(3)
+ result.startTest(b)
+ result.time(4)
+ result.addSuccess(b)
+ result.stopTest(b)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', a),
+ ('time', 2),
+ ('tags', set(['foo']), set()),
+ ('addSuccess', a),
+ ('stopTest', a),
+ ('time', 3),
+ ('startTest', b),
+ ('time', 4),
+ ('addSuccess', b),
+ ('stopTest', b),
+ ], events)
+
+ def test_startTestRun(self):
+ # Calls to startTestRun are not batched, because we are only
+ # interested in sending tests atomically, not the whole run.
+ [result1, result2], events = self.make_results(2)
+ result1.startTestRun()
+ result2.startTestRun()
+ self.assertEqual(["startTestRun", "startTestRun"], events)
+
+ def test_stopTestRun(self):
+ # Calls to stopTestRun are not batched, because we are only
+ # interested in sending tests atomically, not the whole run.
+ [result1, result2], events = self.make_results(2)
+ result1.stopTestRun()
+ result2.stopTestRun()
+ self.assertEqual(["stopTestRun", "stopTestRun"], events)
+
+ def test_forward_addError(self):
+ # Once we receive an addError event, we forward all of the events for
+ # that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ exc_info = make_exception_info(RuntimeError, 'error')
+ start_time = datetime.datetime.utcfromtimestamp(1.489)
+ end_time = datetime.datetime.utcfromtimestamp(51.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addError(self, exc_info)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addError', self, exc_info),
+ ('stopTest', self),
+ ], events)
+
+ def test_forward_addFailure(self):
+ # Once we receive an addFailure event, we forward all of the events
+ # for that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ exc_info = make_exception_info(AssertionError, 'failure')
+ start_time = datetime.datetime.utcfromtimestamp(2.489)
+ end_time = datetime.datetime.utcfromtimestamp(3.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addFailure(self, exc_info)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addFailure', self, exc_info),
+ ('stopTest', self),
+ ], events)
+
+ def test_forward_addSkip(self):
+ # Once we receive an addSkip event, we forward all of the events for
+ # that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ reason = _u("Skipped for some reason")
+ start_time = datetime.datetime.utcfromtimestamp(4.489)
+ end_time = datetime.datetime.utcfromtimestamp(5.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addSkip(self, reason)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addSkip', self, reason),
+ ('stopTest', self),
+ ], events)
+
+ def test_forward_addSuccess(self):
+ # Once we receive an addSuccess event, we forward all of the events
+ # for that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ start_time = datetime.datetime.utcfromtimestamp(6.489)
+ end_time = datetime.datetime.utcfromtimestamp(7.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addSuccess(self)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_only_one_test_at_a_time(self):
+ # Even if there are multiple ThreadsafeForwardingResults forwarding to
+ # the same target result, the target result only receives the complete
+ # events for one test at a time.
+ [result1, result2], events = self.make_results(2)
+ test1, test2 = self, make_test()
+ start_time1 = datetime.datetime.utcfromtimestamp(1.489)
+ end_time1 = datetime.datetime.utcfromtimestamp(2.476)
+ start_time2 = datetime.datetime.utcfromtimestamp(3.489)
+ end_time2 = datetime.datetime.utcfromtimestamp(4.489)
+ result1.time(start_time1)
+ result2.time(start_time2)
+ result1.startTest(test1)
+ result2.startTest(test2)
+ result1.time(end_time1)
+ result2.time(end_time2)
+ result2.addSuccess(test2)
+ result1.addSuccess(test1)
+ self.assertEqual([
+ # test2 finishes first, and so is flushed first.
+ ('time', start_time2),
+ ('startTest', test2),
+ ('time', end_time2),
+ ('addSuccess', test2),
+ ('stopTest', test2),
+ # test1 finishes next, and thus follows.
+ ('time', start_time1),
+ ('startTest', test1),
+ ('time', end_time1),
+ ('addSuccess', test1),
+ ('stopTest', test1),
+ ], events)
+
+
+class TestMergeTags(TestCase):
+
+ def test_merge_unseen_gone_tag(self):
+ # If an incoming "gone" tag isn't currently tagged one way or the
+ # other, add it to the "gone" tags.
+ current_tags = set(['present']), set(['missing'])
+ changing_tags = set(), set(['going'])
+ expected = set(['present']), set(['missing', 'going'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+ def test_merge_incoming_gone_tag_with_current_new_tag(self):
+ # If one of the incoming "gone" tags is one of the existing "new"
+ # tags, then it overrides the "new" tag, leaving it marked as "gone".
+ current_tags = set(['present', 'going']), set(['missing'])
+ changing_tags = set(), set(['going'])
+ expected = set(['present']), set(['missing', 'going'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+ def test_merge_unseen_new_tag(self):
+ current_tags = set(['present']), set(['missing'])
+ changing_tags = set(['coming']), set()
+ expected = set(['coming', 'present']), set(['missing'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+ def test_merge_incoming_new_tag_with_current_gone_tag(self):
+ # If one of the incoming "new" tags is currently marked as "gone",
+ # then it overrides the "gone" tag, leaving it marked as "new".
+ current_tags = set(['present']), set(['coming', 'missing'])
+ changing_tags = set(['coming']), set()
+ expected = set(['coming', 'present']), set(['missing'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+
+class TestStreamResultRouter(TestCase):
+
+ def test_start_stop_test_run_no_fallback(self):
+ result = StreamResultRouter()
+ result.startTestRun()
+ result.stopTestRun()
+
+ def test_no_fallback_errors(self):
+ self.assertRaises(Exception, StreamResultRouter().status, test_id='f')
+
+ def test_fallback_calls(self):
+ fallback = LoggingStreamResult()
+ result = StreamResultRouter(fallback)
+ result.startTestRun()
+ result.status(test_id='foo')
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None),
+ ('stopTestRun',),
+ ],
+ fallback._events)
+
+ def test_fallback_no_do_start_stop_run(self):
+ fallback = LoggingStreamResult()
+ result = StreamResultRouter(fallback, do_start_stop_run=False)
+ result.startTestRun()
+ result.status(test_id='foo')
+ result.stopTestRun()
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None)
+ ],
+ fallback._events)
+
+ def test_add_rule_bad_policy(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(ValueError, router.add_rule, target, 'route_code_prefixa',
+ route_prefix='0')
+
+ def test_add_rule_extra_policy_arg(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
+ route_prefix='0', foo=1)
+
+ def test_add_rule_missing_prefix(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix')
+
+ def test_add_rule_slash_in_prefix(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
+ route_prefix='0/')
+
+ def test_add_rule_route_code_consume_False(self):
+ fallback = LoggingStreamResult()
+ target = LoggingStreamResult()
+ router = StreamResultRouter(fallback)
+ router.add_rule(target, 'route_code_prefix', route_prefix='0')
+ router.status(test_id='foo', route_code='0')
+ router.status(test_id='foo', route_code='0/1')
+ router.status(test_id='foo')
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, '0',
+ None),
+ ('status', 'foo', None, None, True, None, None, False, None, '0/1',
+ None),
+ ],
+ target._events)
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None),
+ ],
+ fallback._events)
+
+ def test_add_rule_route_code_consume_True(self):
+ fallback = LoggingStreamResult()
+ target = LoggingStreamResult()
+ router = StreamResultRouter(fallback)
+ router.add_rule(
+ target, 'route_code_prefix', route_prefix='0', consume_route=True)
+ router.status(test_id='foo', route_code='0') # -> None
+ router.status(test_id='foo', route_code='0/1') # -> 1
+ router.status(test_id='foo', route_code='1') # -> fallback as-is.
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None),
+ ('status', 'foo', None, None, True, None, None, False, None, '1',
+ None),
+ ],
+ target._events)
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, '1',
+ None),
+ ],
+ fallback._events)
+
+ def test_add_rule_test_id(self):
+ nontest = LoggingStreamResult()
+ test = LoggingStreamResult()
+ router = StreamResultRouter(test)
+ router.add_rule(nontest, 'test_id', test_id=None)
+ router.status(test_id='foo', file_name="bar", file_bytes=b'')
+ router.status(file_name="bar", file_bytes=b'')
+ self.assertEqual([
+ ('status', 'foo', None, None, True, 'bar', b'', False, None, None,
+ None),], test._events)
+ self.assertEqual([
+ ('status', None, None, None, True, 'bar', b'', False, None, None,
+ None),], nontest._events)
+
+ def test_add_rule_do_start_stop_run(self):
+ nontest = LoggingStreamResult()
+ router = StreamResultRouter()
+ router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
+ router.startTestRun()
+ router.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('stopTestRun',),
+ ], nontest._events)
+
+ def test_add_rule_do_start_stop_run_after_startTestRun(self):
+ nontest = LoggingStreamResult()
+ router = StreamResultRouter()
+ router.startTestRun()
+ router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
+ router.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('stopTestRun',),
+ ], nontest._events)
+
+
+class TestStreamToQueue(TestCase):
+
+ def make_result(self):
+ queue = Queue()
+ return queue, StreamToQueue(queue, "foo")
+
+ def test_status(self):
+ def check_event(event_dict, route=None, time=None):
+ self.assertEqual("status", event_dict['event'])
+ self.assertEqual("test", event_dict['test_id'])
+ self.assertEqual("fail", event_dict['test_status'])
+ self.assertEqual(set(["quux"]), event_dict['test_tags'])
+ self.assertEqual(False, event_dict['runnable'])
+ self.assertEqual("file", event_dict['file_name'])
+ self.assertEqual(_b("content"), event_dict['file_bytes'])
+ self.assertEqual(True, event_dict['eof'])
+ self.assertEqual("quux", event_dict['mime_type'])
+ self.assertEqual("test", event_dict['test_id'])
+ self.assertEqual(route, event_dict['route_code'])
+ self.assertEqual(time, event_dict['timestamp'])
+ queue, result = self.make_result()
+ result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
+ file_name="file", file_bytes=_b("content"), eof=True,
+ mime_type="quux", route_code=None, timestamp=None)
+ self.assertEqual(1, queue.qsize())
+ a_time = datetime.datetime.now(utc)
+ result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
+ file_name="file", file_bytes=_b("content"), eof=True,
+ mime_type="quux", route_code="bar", timestamp=a_time)
+ self.assertEqual(2, queue.qsize())
+ check_event(queue.get(False), route="foo", time=None)
+ check_event(queue.get(False), route="foo/bar", time=a_time)
+
+ def testStartTestRun(self):
+ queue, result = self.make_result()
+ result.startTestRun()
+ self.assertEqual(
+ {'event':'startTestRun', 'result':result}, queue.get(False))
+ self.assertTrue(queue.empty())
+
+ def testStopTestRun(self):
+ queue, result = self.make_result()
+ result.stopTestRun()
+ self.assertEqual(
+ {'event':'stopTestRun', 'result':result}, queue.get(False))
+ self.assertTrue(queue.empty())
+
+
+class TestExtendedToOriginalResultDecoratorBase(TestCase):
+
+ def make_26_result(self):
+ self.result = Python26TestResult()
+ self.make_converter()
+
+ def make_27_result(self):
+ self.result = Python27TestResult()
+ self.make_converter()
+
+ def make_converter(self):
+ self.converter = ExtendedToOriginalDecorator(self.result)
+
+ def make_extended_result(self):
+ self.result = ExtendedTestResult()
+ self.make_converter()
+
+ def check_outcome_details(self, outcome):
+ """Call an outcome with a details dict to be passed through."""
+ # This dict is /not/ convertible - thats deliberate, as it should
+ # not hit the conversion code path.
+ details = {'foo': 'bar'}
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, details)], self.result._events)
+
+ def get_details_and_string(self):
+ """Get a details dict and expected string."""
+ text1 = lambda: [_b("1\n2\n")]
+ text2 = lambda: [_b("3\n4\n")]
+ bin1 = lambda: [_b("5\n")]
+ details = {'text 1': Content(ContentType('text', 'plain'), text1),
+ 'text 2': Content(ContentType('text', 'strange'), text2),
+ 'bin 1': Content(ContentType('application', 'binary'), bin1)}
+ return (details,
+ ("Binary content:\n"
+ " bin 1 (application/binary)\n"
+ "\n"
+ "text 1: {{{\n"
+ "1\n"
+ "2\n"
+ "}}}\n"
+ "\n"
+ "text 2: {{{\n"
+ "3\n"
+ "4\n"
+ "}}}\n"))
+
+ def check_outcome_details_to_exec_info(self, outcome, expected=None):
+ """Call an outcome with a details dict to be made into exc_info."""
+ # The conversion is a done using RemoteError and the string contents
+ # of the text types in the details dict.
+ if not expected:
+ expected = outcome
+ details, err_str = self.get_details_and_string()
+ getattr(self.converter, outcome)(self, details=details)
+ err = self.converter._details_to_exc_info(details)
+ self.assertEqual([(expected, self, err)], self.result._events)
+
+ def check_outcome_details_to_nothing(self, outcome, expected=None):
+ """Call an outcome with a details dict to be swallowed."""
+ if not expected:
+ expected = outcome
+ details = {'foo': 'bar'}
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_details_to_string(self, outcome):
+ """Call an outcome with a details dict to be stringified."""
+ details, err_str = self.get_details_and_string()
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, err_str)], self.result._events)
+
+ def check_outcome_details_to_arg(self, outcome, arg, extra_detail=None):
+ """Call an outcome with a details dict to have an arg extracted."""
+ details, _ = self.get_details_and_string()
+ if extra_detail:
+ details.update(extra_detail)
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, arg)], self.result._events)
+
+ def check_outcome_exc_info(self, outcome, expected=None):
+ """Check that calling a legacy outcome still works."""
+ # calling some outcome with the legacy exc_info style api (no keyword
+ # parameters) gets passed through.
+ if not expected:
+ expected = outcome
+ err = sys.exc_info()
+ getattr(self.converter, outcome)(self, err)
+ self.assertEqual([(expected, self, err)], self.result._events)
+
+ def check_outcome_exc_info_to_nothing(self, outcome, expected=None):
+ """Check that calling a legacy outcome on a fallback works."""
+ # calling some outcome with the legacy exc_info style api (no keyword
+ # parameters) gets passed through.
+ if not expected:
+ expected = outcome
+ err = sys.exc_info()
+ getattr(self.converter, outcome)(self, err)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_nothing(self, outcome, expected=None):
+ """Check that calling a legacy outcome still works."""
+ if not expected:
+ expected = outcome
+ getattr(self.converter, outcome)(self)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_string_nothing(self, outcome, expected):
+ """Check that calling outcome with a string calls expected."""
+ getattr(self.converter, outcome)(self, "foo")
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_string(self, outcome):
+ """Check that calling outcome with a string works."""
+ getattr(self.converter, outcome)(self, "foo")
+ self.assertEqual([(outcome, self, "foo")], self.result._events)
+
+
+class TestExtendedToOriginalResultDecorator(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ def test_failfast_py26(self):
+ self.make_26_result()
+ self.assertEqual(False, self.converter.failfast)
+ self.converter.failfast = True
+ self.assertFalse(safe_hasattr(self.converter.decorated, 'failfast'))
+
+ def test_failfast_py27(self):
+ self.make_27_result()
+ self.assertEqual(False, self.converter.failfast)
+ # setting it should write it to the backing result
+ self.converter.failfast = True
+ self.assertEqual(True, self.converter.decorated.failfast)
+
+ def test_progress_py26(self):
+ self.make_26_result()
+ self.converter.progress(1, 2)
+
+ def test_progress_py27(self):
+ self.make_27_result()
+ self.converter.progress(1, 2)
+
+ def test_progress_pyextended(self):
+ self.make_extended_result()
+ self.converter.progress(1, 2)
+ self.assertEqual([('progress', 1, 2)], self.result._events)
+
+ def test_shouldStop(self):
+ self.make_26_result()
+ self.assertEqual(False, self.converter.shouldStop)
+ self.converter.decorated.stop()
+ self.assertEqual(True, self.converter.shouldStop)
+
+ def test_startTest_py26(self):
+ self.make_26_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTest_py27(self):
+ self.make_27_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTest_pyextended(self):
+ self.make_extended_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTestRun_py26(self):
+ self.make_26_result()
+ self.converter.startTestRun()
+ self.assertEqual([], self.result._events)
+
+ def test_startTestRun_py27(self):
+ self.make_27_result()
+ self.converter.startTestRun()
+ self.assertEqual([('startTestRun',)], self.result._events)
+
+ def test_startTestRun_pyextended(self):
+ self.make_extended_result()
+ self.converter.startTestRun()
+ self.assertEqual([('startTestRun',)], self.result._events)
+
+ def test_stopTest_py26(self):
+ self.make_26_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTest_py27(self):
+ self.make_27_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTest_pyextended(self):
+ self.make_extended_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTestRun_py26(self):
+ self.make_26_result()
+ self.converter.stopTestRun()
+ self.assertEqual([], self.result._events)
+
+ def test_stopTestRun_py27(self):
+ self.make_27_result()
+ self.converter.stopTestRun()
+ self.assertEqual([('stopTestRun',)], self.result._events)
+
+ def test_stopTestRun_pyextended(self):
+ self.make_extended_result()
+ self.converter.stopTestRun()
+ self.assertEqual([('stopTestRun',)], self.result._events)
+
+ def test_tags_py26(self):
+ self.make_26_result()
+ self.converter.tags(set([1]), set([2]))
+
+ def test_tags_py27(self):
+ self.make_27_result()
+ self.converter.tags(set([1]), set([2]))
+
+ def test_tags_pyextended(self):
+ self.make_extended_result()
+ self.converter.tags(set([1]), set([2]))
+ self.assertEqual([('tags', set([1]), set([2]))], self.result._events)
+
+ def test_time_py26(self):
+ self.make_26_result()
+ self.converter.time(1)
+
+ def test_time_py27(self):
+ self.make_27_result()
+ self.converter.time(1)
+
+ def test_time_pyextended(self):
+ self.make_extended_result()
+ self.converter.time(1)
+ self.assertEqual([('time', 1)], self.result._events)
+
+
+class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addError'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_exec_info(self.outcome)
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_exec_info(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+ def test_outcome__no_details(self):
+ self.make_extended_result()
+ self.assertThat(
+ lambda: getattr(self.converter, self.outcome)(self),
+ Raises(MatchesException(ValueError)))
+
+
+class TestExtendedToOriginalAddFailure(
+ TestExtendedToOriginalAddError):
+
+ outcome = 'addFailure'
+
+
+class TestExtendedToOriginalAddExpectedFailure(
+ TestExtendedToOriginalAddError):
+
+ outcome = 'addExpectedFailure'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_nothing(self.outcome, 'addSuccess')
+
+
+
+class TestExtendedToOriginalAddSkip(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addSkip'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_string(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_string(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Extended_py27_no_reason(self):
+ self.make_27_result()
+ self.check_outcome_details_to_string(self.outcome)
+
+ def test_outcome_Extended_py27_reason(self):
+ self.make_27_result()
+ self.check_outcome_details_to_arg(self.outcome, 'foo',
+ {'reason': Content(UTF8_TEXT, lambda:[_b('foo')])})
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+ def test_outcome__no_details(self):
+ self.make_extended_result()
+ self.assertThat(
+ lambda: getattr(self.converter, self.outcome)(self),
+ Raises(MatchesException(ValueError)))
+
+
+class TestExtendedToOriginalAddSuccess(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addSuccess'
+ expected = 'addSuccess'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_nothing(self.outcome, self.expected)
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_nothing(self.outcome, self.expected)
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_nothing(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+
+class TestExtendedToOriginalAddUnexpectedSuccess(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addUnexpectedSuccess'
+ expected = 'addFailure'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ getattr(self.converter, self.outcome)(self)
+ [event] = self.result._events
+ self.assertEqual((self.expected, self), event[:2])
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ getattr(self.converter, self.outcome)(self)
+ [event] = self.result._events
+ self.assertEqual((self.expected, self), event[:2])
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_nothing(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+
+class TestExtendedToOriginalResultOtherAttributes(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ def test_other_attribute(self):
+ class OtherExtendedResult:
+ def foo(self):
+ return 2
+ bar = 1
+ self.result = OtherExtendedResult()
+ self.make_converter()
+ self.assertEqual(1, self.converter.bar)
+ self.assertEqual(2, self.converter.foo())
+
+
+class TestNonAsciiResults(TestCase):
+ """Test all kinds of tracebacks are cleanly interpreted as unicode
+
+ Currently only uses weak "contains" assertions, would be good to be much
+ stricter about the expected output. This would add a few failures for the
+ current release of IronPython for instance, which gets some traceback
+ lines muddled.
+ """
+
+ _sample_texts = (
+ _u("pa\u026a\u03b8\u0259n"), # Unicode encodings only
+ _u("\u5357\u7121"), # In ISO 2022 encodings
+ _u("\xa7\xa7\xa7"), # In ISO 8859 encodings
+ )
+
+ _is_pypy = "__pypy__" in sys.builtin_module_names
+ # Everything but Jython shows syntax errors on the current character
+ _error_on_character = os.name != "java" and not _is_pypy
+
+ def _run(self, stream, test):
+ """Run the test, the same as in testtools.run but not to stdout"""
+ result = TextTestResult(stream)
+ result.startTestRun()
+ try:
+ return test.run(result)
+ finally:
+ result.stopTestRun()
+
+ def _write_module(self, name, encoding, contents):
+ """Create Python module on disk with contents in given encoding"""
+ try:
+ # Need to pre-check that the coding is valid or codecs.open drops
+ # the file without closing it which breaks non-refcounted pythons
+ codecs.lookup(encoding)
+ except LookupError:
+ self.skip("Encoding unsupported by implementation: %r" % encoding)
+ f = codecs.open(os.path.join(self.dir, name + ".py"), "w", encoding)
+ try:
+ f.write(contents)
+ finally:
+ f.close()
+
+ def _test_external_case(self, testline, coding="ascii", modulelevel="",
+ suffix=""):
+ """Create and run a test case in a seperate module"""
+ self._setup_external_case(testline, coding, modulelevel, suffix)
+ return self._run_external_case()
+
+ def _setup_external_case(self, testline, coding="ascii", modulelevel="",
+ suffix=""):
+ """Create a test case in a seperate module"""
+ _, prefix, self.modname = self.id().rsplit(".", 2)
+ self.dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
+ self.addCleanup(shutil.rmtree, self.dir)
+ self._write_module(self.modname, coding,
+ # Older Python 2 versions don't see a coding declaration in a
+ # docstring so it has to be in a comment, but then we can't
+ # workaround bug: <http://ironpython.codeplex.com/workitem/26940>
+ "# coding: %s\n"
+ "import testtools\n"
+ "%s\n"
+ "class Test(testtools.TestCase):\n"
+ " def runTest(self):\n"
+ " %s\n" % (coding, modulelevel, testline))
+
+ def _run_external_case(self):
+ """Run the prepared test case in a seperate module"""
+ sys.path.insert(0, self.dir)
+ self.addCleanup(sys.path.remove, self.dir)
+ module = __import__(self.modname)
+ self.addCleanup(sys.modules.pop, self.modname)
+ stream = StringIO()
+ self._run(stream, module.Test())
+ return stream.getvalue()
+
+ def _silence_deprecation_warnings(self):
+ """Shut up DeprecationWarning for this test only"""
+ warnings.simplefilter("ignore", DeprecationWarning)
+ self.addCleanup(warnings.filters.remove, warnings.filters[0])
+
+ def _get_sample_text(self, encoding="unicode_internal"):
+ if encoding is None and str_is_unicode:
+ encoding = "unicode_internal"
+ for u in self._sample_texts:
+ try:
+ b = u.encode(encoding)
+ if u == b.decode(encoding):
+ if str_is_unicode:
+ return u, u
+ return u, b
+ except (LookupError, UnicodeError):
+ pass
+ self.skip("Could not find a sample text for encoding: %r" % encoding)
+
+ def _as_output(self, text):
+ return text
+
+ def test_non_ascii_failure_string(self):
+ """Assertion contents can be non-ascii and should get decoded"""
+ text, raw = self._get_sample_text(_get_exception_encoding())
+ textoutput = self._test_external_case("self.fail(%s)" % _r(raw))
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_non_ascii_failure_string_via_exec(self):
+ """Assertion via exec can be non-ascii and still gets decoded"""
+ text, raw = self._get_sample_text(_get_exception_encoding())
+ textoutput = self._test_external_case(
+ testline='exec ("self.fail(%s)")' % _r(raw))
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_control_characters_in_failure_string(self):
+ """Control characters in assertions should be escaped"""
+ textoutput = self._test_external_case("self.fail('\\a\\a\\a')")
+ self.expectFailure("Defense against the beeping horror unimplemented",
+ self.assertNotIn, self._as_output("\a\a\a"), textoutput)
+ self.assertIn(self._as_output(_u("\uFFFD\uFFFD\uFFFD")), textoutput)
+
+ def _local_os_error_matcher(self):
+ if sys.version_info > (3, 3):
+ return MatchesAny(Contains("FileExistsError: "),
+ Contains("PermissionError: "))
+ elif os.name != "nt" or sys.version_info < (2, 5):
+ return Contains(self._as_output("OSError: "))
+ else:
+ return Contains(self._as_output("WindowsError: "))
+
+ def test_os_error(self):
+ """Locale error messages from the OS shouldn't break anything"""
+ textoutput = self._test_external_case(
+ modulelevel="import os",
+ testline="os.mkdir('/')")
+ self.assertThat(textoutput, self._local_os_error_matcher())
+
+ def test_assertion_text_shift_jis(self):
+ """A terminal raw backslash in an encoded string is weird but fine"""
+ example_text = _u("\u5341")
+ textoutput = self._test_external_case(
+ coding="shift_jis",
+ testline="self.fail('%s')" % example_text)
+ if str_is_unicode:
+ output_text = example_text
+ else:
+ output_text = example_text.encode("shift_jis").decode(
+ _get_exception_encoding(), "replace")
+ self.assertIn(self._as_output("AssertionError: %s" % output_text),
+ textoutput)
+
+ def test_file_comment_iso2022_jp(self):
+ """Control character escapes must be preserved if valid encoding"""
+ example_text, _ = self._get_sample_text("iso2022_jp")
+ textoutput = self._test_external_case(
+ coding="iso2022_jp",
+ testline="self.fail('Simple') # %s" % example_text)
+ self.assertIn(self._as_output(example_text), textoutput)
+
+ def test_unicode_exception(self):
+ """Exceptions that can be formated losslessly as unicode should be"""
+ example_text, _ = self._get_sample_text()
+ exception_class = (
+ "class FancyError(Exception):\n"
+ # A __unicode__ method does nothing on py3k but the default works
+ " def __unicode__(self):\n"
+ " return self.args[0]\n")
+ textoutput = self._test_external_case(
+ modulelevel=exception_class,
+ testline="raise FancyError(%s)" % _r(example_text))
+ self.assertIn(self._as_output(example_text), textoutput)
+
+ def test_unprintable_exception(self):
+ """A totally useless exception instance still prints something"""
+ exception_class = (
+ "class UnprintableError(Exception):\n"
+ " def __str__(self):\n"
+ " raise RuntimeError\n"
+ " def __unicode__(self):\n"
+ " raise RuntimeError\n"
+ " def __repr__(self):\n"
+ " raise RuntimeError\n")
+ textoutput = self._test_external_case(
+ modulelevel=exception_class,
+ testline="raise UnprintableError")
+ self.assertIn(self._as_output(
+ "UnprintableError: <unprintable UnprintableError object>\n"),
+ textoutput)
+
+ def test_string_exception(self):
+ """Raise a string rather than an exception instance if supported"""
+ if sys.version_info > (2, 6):
+ self.skip("No string exceptions in Python 2.6 or later")
+ elif sys.version_info > (2, 5):
+ self._silence_deprecation_warnings()
+ textoutput = self._test_external_case(testline="raise 'plain str'")
+ self.assertIn(self._as_output("\nplain str\n"), textoutput)
+
+ def test_non_ascii_dirname(self):
+ """Script paths in the traceback can be non-ascii"""
+ text, raw = self._get_sample_text(sys.getfilesystemencoding())
+ textoutput = self._test_external_case(
+ # Avoid bug in Python 3 by giving a unicode source encoding rather
+ # than just ascii which raises a SyntaxError with no other details
+ coding="utf-8",
+ testline="self.fail('Simple')",
+ suffix=raw)
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_syntax_error(self):
+ """Syntax errors should still have fancy special-case formatting"""
+ textoutput = self._test_external_case("exec ('f(a, b c)')")
+ self.assertIn(self._as_output(
+ ' File "<string>", line 1\n'
+ ' f(a, b c)\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: '
+ ), textoutput)
+
+ def test_syntax_error_malformed(self):
+ """Syntax errors with bogus parameters should break anything"""
+ textoutput = self._test_external_case("raise SyntaxError(3, 2, 1)")
+ self.assertIn(self._as_output("\nSyntaxError: "), textoutput)
+
+ def test_syntax_error_import_binary(self):
+ """Importing a binary file shouldn't break SyntaxError formatting"""
+ if sys.version_info < (2, 5):
+ # Python 2.4 assumes the file is latin-1 and tells you off
+ self._silence_deprecation_warnings()
+ self._setup_external_case("import bad")
+ f = open(os.path.join(self.dir, "bad.py"), "wb")
+ try:
+ f.write(_b("x\x9c\xcb*\xcd\xcb\x06\x00\x04R\x01\xb9"))
+ finally:
+ f.close()
+ textoutput = self._run_external_case()
+ matches_error = MatchesAny(
+ Contains('\nTypeError: '), Contains('\nSyntaxError: '))
+ self.assertThat(textoutput, matches_error)
+
+ def test_syntax_error_line_iso_8859_1(self):
+ """Syntax error on a latin-1 line shows the line decoded"""
+ text, raw = self._get_sample_text("iso-8859-1")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "iso-8859-1",
+ "# coding: iso-8859-1\n! = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' ! = 0 # %s\n'
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_iso_8859_5(self):
+ """Syntax error on a iso-8859-5 line shows the line decoded"""
+ text, raw = self._get_sample_text("iso-8859-5")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "iso-8859-5",
+ "# coding: iso-8859-5\n%% = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' %% = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_euc_jp(self):
+ """Syntax error on a euc_jp line shows the line decoded"""
+ text, raw = self._get_sample_text("euc_jp")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "euc_jp",
+ "# coding: euc_jp\n$ = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ # pypy uses cpython's multibyte codecs so has their behavior here
+ if self._is_pypy:
+ self._error_on_character = True
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' $ = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_utf_8(self):
+ """Syntax error on a utf-8 line shows the line decoded"""
+ text, raw = self._get_sample_text("utf-8")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "utf-8", _u("\ufeff^ = 0 # %s\n") % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ 'bad.py", line 1\n'
+ ' ^ = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ text), textoutput)
+
+
+class TestNonAsciiResultsWithUnittest(TestNonAsciiResults):
+ """Test that running under unittest produces clean ascii strings"""
+
+ def _run(self, stream, test):
+ from unittest import TextTestRunner as _Runner
+ return _Runner(stream).run(test)
+
+ def _as_output(self, text):
+ if str_is_unicode:
+ return text
+ return text.encode("utf-8")
+
+
+class TestDetailsToStr(TestCase):
+
+ def test_no_details(self):
+ string = _details_to_str({})
+ self.assertThat(string, Equals(''))
+
+ def test_binary_content(self):
+ content = content_from_stream(
+ StringIO('foo'), content_type=ContentType('image', 'jpeg'))
+ string = _details_to_str({'attachment': content})
+ self.assertThat(
+ string, Equals("""\
+Binary content:
+ attachment (image/jpeg)
+"""))
+
+ def test_single_line_content(self):
+ content = text_content('foo')
+ string = _details_to_str({'attachment': content})
+ self.assertThat(string, Equals('attachment: {{{foo}}}\n'))
+
+ def test_multi_line_text_content(self):
+ content = text_content('foo\nbar\nbaz')
+ string = _details_to_str({'attachment': content})
+ self.assertThat(string, Equals('attachment: {{{\nfoo\nbar\nbaz\n}}}\n'))
+
+ def test_special_text_content(self):
+ content = text_content('foo')
+ string = _details_to_str({'attachment': content}, special='attachment')
+ self.assertThat(string, Equals('foo\n'))
+
+ def test_multiple_text_content(self):
+ string = _details_to_str(
+ {'attachment': text_content('foo\nfoo'),
+ 'attachment-1': text_content('bar\nbar')})
+ self.assertThat(
+ string, Equals('attachment: {{{\n'
+ 'foo\n'
+ 'foo\n'
+ '}}}\n'
+ '\n'
+ 'attachment-1: {{{\n'
+ 'bar\n'
+ 'bar\n'
+ '}}}\n'))
+
+ def test_empty_attachment(self):
+ string = _details_to_str({'attachment': text_content('')})
+ self.assertThat(
+ string, Equals("""\
+Empty attachments:
+ attachment
+"""))
+
+ def test_lots_of_different_attachments(self):
+ jpg = lambda x: content_from_stream(
+ StringIO(x), ContentType('image', 'jpeg'))
+ attachments = {
+ 'attachment': text_content('foo'),
+ 'attachment-1': text_content('traceback'),
+ 'attachment-2': jpg('pic1'),
+ 'attachment-3': text_content('bar'),
+ 'attachment-4': text_content(''),
+ 'attachment-5': jpg('pic2'),
+ }
+ string = _details_to_str(attachments, special='attachment-1')
+ self.assertThat(
+ string, Equals("""\
+Binary content:
+ attachment-2 (image/jpeg)
+ attachment-5 (image/jpeg)
+Empty attachments:
+ attachment-4
+
+attachment: {{{foo}}}
+attachment-3: {{{bar}}}
+
+traceback
+"""))
+
+
+class TestByTestResultTests(TestCase):
+
+ def setUp(self):
+ super(TestByTestResultTests, self).setUp()
+ self.log = []
+ self.result = TestByTestResult(self.on_test)
+ now = iter(range(5))
+ self.result._now = lambda: advance_iterator(now)
+
+ def assertCalled(self, **kwargs):
+ defaults = {
+ 'test': self,
+ 'tags': set(),
+ 'details': None,
+ 'start_time': 0,
+ 'stop_time': 1,
+ }
+ defaults.update(kwargs)
+ self.assertEqual([defaults], self.log)
+
+ def on_test(self, **kwargs):
+ self.log.append(kwargs)
+
+ def test_no_tests_nothing_reported(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertEqual([], self.log)
+
+ def test_add_success(self):
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success')
+
+ def test_add_success_details(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addSuccess(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', details=details)
+
+ def test_global_tags(self):
+ self.result.tags(['foo'], [])
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', tags=set(['foo']))
+
+ def test_local_tags(self):
+ self.result.tags(['foo'], [])
+ self.result.startTest(self)
+ self.result.tags(['bar'], [])
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', tags=set(['foo', 'bar']))
+
+ def test_add_error(self):
+ self.result.startTest(self)
+ try:
+ 1/0
+ except ZeroDivisionError:
+ error = sys.exc_info()
+ self.result.addError(self, error)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='error',
+ details={'traceback': TracebackContent(error, self)})
+
+ def test_add_error_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addError(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='error', details=details)
+
+ def test_add_failure(self):
+ self.result.startTest(self)
+ try:
+ self.fail("intentional failure")
+ except self.failureException:
+ failure = sys.exc_info()
+ self.result.addFailure(self, failure)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='failure',
+ details={'traceback': TracebackContent(failure, self)})
+
+ def test_add_failure_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addFailure(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='failure', details=details)
+
+ def test_add_xfail(self):
+ self.result.startTest(self)
+ try:
+ 1/0
+ except ZeroDivisionError:
+ error = sys.exc_info()
+ self.result.addExpectedFailure(self, error)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='xfail',
+ details={'traceback': TracebackContent(error, self)})
+
+ def test_add_xfail_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addExpectedFailure(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='xfail', details=details)
+
+ def test_add_unexpected_success(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addUnexpectedSuccess(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', details=details)
+
+ def test_add_skip_reason(self):
+ self.result.startTest(self)
+ reason = self.getUniqueString()
+ self.result.addSkip(self, reason)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='skip', details={'reason': text_content(reason)})
+
+ def test_add_skip_details(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addSkip(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='skip', details=details)
+
+ def test_twice(self):
+ self.result.startTest(self)
+ self.result.addSuccess(self, details={'foo': 'bar'})
+ self.result.stopTest(self)
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertEqual(
+ [{'test': self,
+ 'status': 'success',
+ 'start_time': 0,
+ 'stop_time': 1,
+ 'tags': set(),
+ 'details': {'foo': 'bar'}},
+ {'test': self,
+ 'status': 'success',
+ 'start_time': 2,
+ 'stop_time': 3,
+ 'tags': set(),
+ 'details': None},
+ ],
+ self.log)
+
+
+class TestTagger(TestCase):
+
+ def test_tags_tests(self):
+ result = ExtendedTestResult()
+ tagger = Tagger(result, set(['foo']), set(['bar']))
+ test1, test2 = self, make_test()
+ tagger.startTest(test1)
+ tagger.addSuccess(test1)
+ tagger.stopTest(test1)
+ tagger.startTest(test2)
+ tagger.addSuccess(test2)
+ tagger.stopTest(test2)
+ self.assertEqual(
+ [('startTest', test1),
+ ('tags', set(['foo']), set(['bar'])),
+ ('addSuccess', test1),
+ ('stopTest', test1),
+ ('startTest', test2),
+ ('tags', set(['foo']), set(['bar'])),
+ ('addSuccess', test2),
+ ('stopTest', test2),
+ ], result._events)
+
+
+class TestTimestampingStreamResult(TestCase):
+
+ def test_startTestRun(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.startTestRun()
+ self.assertEqual([('startTestRun',)], result.targets[0]._events)
+
+ def test_stopTestRun(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.stopTestRun()
+ self.assertEqual([('stopTestRun',)], result.targets[0]._events)
+
+ def test_status_no_timestamp(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.status(test_id="A", test_status="B", test_tags="C",
+ runnable="D", file_name="E", file_bytes=b"F", eof=True,
+ mime_type="G", route_code="H")
+ events = result.targets[0]._events
+ self.assertThat(events, HasLength(1))
+ self.assertThat(events[0], HasLength(11))
+ self.assertEqual(
+ ("status", "A", "B", "C", "D", "E", b"F", True, "G", "H"),
+ events[0][:10])
+ self.assertNotEqual(None, events[0][10])
+ self.assertIsInstance(events[0][10], datetime.datetime)
+
+ def test_status_timestamp(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.status(timestamp="F")
+ self.assertEqual("F", result.targets[0]._events[0][10])
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py
new file mode 100644
index 00000000000..e2c33062b2d
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py
@@ -0,0 +1,279 @@
+# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+
+"""Test ConcurrentTestSuite and related things."""
+
+__metaclass__ = type
+
+import doctest
+from functools import partial
+import sys
+import unittest
+
+from extras import try_import
+
+from testtools import (
+ ConcurrentTestSuite,
+ ConcurrentStreamTestSuite,
+ iterate_tests,
+ PlaceHolder,
+ TestByTestResult,
+ TestCase,
+ )
+from testtools.compat import _b, _u
+from testtools.matchers import DocTestMatches
+from testtools.testsuite import FixtureSuite, iterate_tests, sorted_tests
+from testtools.tests.helpers import LoggingResult
+from testtools.testresult.doubles import StreamResult as LoggingStream
+
+FunctionFixture = try_import('fixtures.FunctionFixture')
+
+class Sample(TestCase):
+ def __hash__(self):
+ return id(self)
+ def test_method1(self):
+ pass
+ def test_method2(self):
+ pass
+
+
+class TestConcurrentTestSuiteRun(TestCase):
+
+ def test_broken_test(self):
+ log = []
+ def on_test(test, status, start_time, stop_time, tags, details):
+ log.append((test.id(), status, set(details.keys())))
+ class BrokenTest(object):
+ # Simple break - no result parameter to run()
+ def __call__(self):
+ pass
+ run = __call__
+ original_suite = unittest.TestSuite([BrokenTest()])
+ suite = ConcurrentTestSuite(original_suite, self.split_suite)
+ suite.run(TestByTestResult(on_test))
+ self.assertEqual([('broken-runner', 'error', set(['traceback']))], log)
+
+ def test_trivial(self):
+ log = []
+ result = LoggingResult(log)
+ test1 = Sample('test_method1')
+ test2 = Sample('test_method2')
+ original_suite = unittest.TestSuite([test1, test2])
+ suite = ConcurrentTestSuite(original_suite, self.split_suite)
+ suite.run(result)
+ # log[0] is the timestamp for the first test starting.
+ test1 = log[1][1]
+ test2 = log[-1][1]
+ self.assertIsInstance(test1, Sample)
+ self.assertIsInstance(test2, Sample)
+ self.assertNotEqual(test1.id(), test2.id())
+
+ def test_wrap_result(self):
+ # ConcurrentTestSuite has a hook for wrapping the per-thread result.
+ wrap_log = []
+
+ def wrap_result(thread_safe_result, thread_number):
+ wrap_log.append(
+ (thread_safe_result.result.decorated, thread_number))
+ return thread_safe_result
+
+ result_log = []
+ result = LoggingResult(result_log)
+ test1 = Sample('test_method1')
+ test2 = Sample('test_method2')
+ original_suite = unittest.TestSuite([test1, test2])
+ suite = ConcurrentTestSuite(
+ original_suite, self.split_suite, wrap_result=wrap_result)
+ suite.run(result)
+ self.assertEqual(
+ [(result, 0),
+ (result, 1),
+ ], wrap_log)
+ # Smoke test to make sure everything ran OK.
+ self.assertNotEqual([], result_log)
+
+ def split_suite(self, suite):
+ return list(iterate_tests(suite))
+
+
+class TestConcurrentStreamTestSuiteRun(TestCase):
+
+ def test_trivial(self):
+ result = LoggingStream()
+ test1 = Sample('test_method1')
+ test2 = Sample('test_method2')
+ cases = lambda:[(test1, '0'), (test2, '1')]
+ suite = ConcurrentStreamTestSuite(cases)
+ suite.run(result)
+ def freeze(set_or_none):
+ if set_or_none is None:
+ return set_or_none
+ return frozenset(set_or_none)
+ # Ignore event order: we're testing the code is all glued together,
+ # which just means we can pump events through and they get route codes
+ # added appropriately.
+ self.assertEqual(set([
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method1',
+ 'inprogress',
+ None,
+ True,
+ None,
+ None,
+ False,
+ None,
+ '0',
+ None,
+ ),
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method1',
+ 'success',
+ frozenset(),
+ True,
+ None,
+ None,
+ False,
+ None,
+ '0',
+ None,
+ ),
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method2',
+ 'inprogress',
+ None,
+ True,
+ None,
+ None,
+ False,
+ None,
+ '1',
+ None,
+ ),
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method2',
+ 'success',
+ frozenset(),
+ True,
+ None,
+ None,
+ False,
+ None,
+ '1',
+ None,
+ ),
+ ]), set(event[0:3] + (freeze(event[3]),) + event[4:10] + (None,)
+ for event in result._events))
+
+ def test_broken_runner(self):
+ # If the object called breaks, the stream is informed about it
+ # regardless.
+ class BrokenTest(object):
+ # broken - no result parameter!
+ def __call__(self):
+ pass
+ def run(self):
+ pass
+ result = LoggingStream()
+ cases = lambda:[(BrokenTest(), '0')]
+ suite = ConcurrentStreamTestSuite(cases)
+ suite.run(result)
+ events = result._events
+ # Check the traceback loosely.
+ self.assertThat(events[1][6].decode('utf8'), DocTestMatches("""\
+Traceback (most recent call last):
+ File "...testtools/testsuite.py", line ..., in _run_test
+ test.run(process_result)
+TypeError: run() takes ...1 ...argument...2...given...
+""", doctest.ELLIPSIS))
+ events = [event[0:10] + (None,) for event in events]
+ events[1] = events[1][:6] + (None,) + events[1][7:]
+ self.assertEqual([
+ ('status', "broken-runner-'0'", 'inprogress', None, True, None, None, False, None, _u('0'), None),
+ ('status', "broken-runner-'0'", None, None, True, 'traceback', None,
+ False,
+ 'text/x-traceback; charset="utf8"; language="python"',
+ '0',
+ None),
+ ('status', "broken-runner-'0'", None, None, True, 'traceback', b'', True,
+ 'text/x-traceback; charset="utf8"; language="python"', '0', None),
+ ('status', "broken-runner-'0'", 'fail', set(), True, None, None, False, None, _u('0'), None)
+ ], events)
+
+ def split_suite(self, suite):
+ tests = list(enumerate(iterate_tests(suite)))
+ return [(test, _u(str(pos))) for pos, test in tests]
+
+
+class TestFixtureSuite(TestCase):
+
+ def setUp(self):
+ super(TestFixtureSuite, self).setUp()
+ if FunctionFixture is None:
+ self.skip("Need fixtures")
+
+ def test_fixture_suite(self):
+ log = []
+ class Sample(TestCase):
+ def test_one(self):
+ log.append(1)
+ def test_two(self):
+ log.append(2)
+ fixture = FunctionFixture(
+ lambda: log.append('setUp'),
+ lambda fixture: log.append('tearDown'))
+ suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_two')])
+ suite.run(LoggingResult([]))
+ self.assertEqual(['setUp', 1, 2, 'tearDown'], log)
+
+ def test_fixture_suite_sort(self):
+ log = []
+ class Sample(TestCase):
+ def test_one(self):
+ log.append(1)
+ def test_two(self):
+ log.append(2)
+ fixture = FunctionFixture(
+ lambda: log.append('setUp'),
+ lambda fixture: log.append('tearDown'))
+ suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_one')])
+ self.assertRaises(ValueError, suite.sort_tests)
+
+
+class TestSortedTests(TestCase):
+
+ def test_sorts_custom_suites(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ class Subclass(unittest.TestSuite):
+ def sort_tests(self):
+ self._tests = sorted_tests(self, True)
+ input_suite = Subclass([b, a])
+ suite = sorted_tests(input_suite)
+ self.assertEqual([a, b], list(iterate_tests(suite)))
+ self.assertEqual([input_suite], list(iter(suite)))
+
+ def test_custom_suite_without_sort_tests_works(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ class Subclass(unittest.TestSuite):pass
+ input_suite = Subclass([b, a])
+ suite = sorted_tests(input_suite)
+ self.assertEqual([b, a], list(iterate_tests(suite)))
+ self.assertEqual([input_suite], list(iter(suite)))
+
+ def test_sorts_simple_suites(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ suite = sorted_tests(unittest.TestSuite([b, a]))
+ self.assertEqual([a, b], list(iterate_tests(suite)))
+
+ def test_duplicate_simple_suites(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ c = PlaceHolder('a')
+ self.assertRaises(
+ ValueError, sorted_tests, unittest.TestSuite([a, b, c]))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py
new file mode 100644
index 00000000000..4305c624a86
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2011 testtools developers. See LICENSE for details.
+
+from __future__ import with_statement
+
+import sys
+
+from testtools import (
+ ExpectedException,
+ TestCase,
+ )
+from testtools.matchers import (
+ AfterPreprocessing,
+ Equals,
+ EndsWith,
+ )
+
+
+class TestExpectedException(TestCase):
+ """Test the ExpectedException context manager."""
+
+ def test_pass_on_raise(self):
+ with ExpectedException(ValueError, 'tes.'):
+ raise ValueError('test')
+
+ def test_pass_on_raise_matcher(self):
+ with ExpectedException(
+ ValueError, AfterPreprocessing(str, Equals('test'))):
+ raise ValueError('test')
+
+ def test_raise_on_text_mismatch(self):
+ try:
+ with ExpectedException(ValueError, 'tes.'):
+ raise ValueError('mismatch')
+ except AssertionError:
+ e = sys.exc_info()[1]
+ self.assertEqual("'mismatch' does not match /tes./", str(e))
+ else:
+ self.fail('AssertionError not raised.')
+
+ def test_raise_on_general_mismatch(self):
+ matcher = AfterPreprocessing(str, Equals('test'))
+ value_error = ValueError('mismatch')
+ try:
+ with ExpectedException(ValueError, matcher):
+ raise value_error
+ except AssertionError:
+ e = sys.exc_info()[1]
+ self.assertEqual(matcher.match(value_error).describe(), str(e))
+ else:
+ self.fail('AssertionError not raised.')
+
+ def test_raise_on_error_mismatch(self):
+ try:
+ with ExpectedException(TypeError, 'tes.'):
+ raise ValueError('mismatch')
+ except ValueError:
+ e = sys.exc_info()[1]
+ self.assertEqual('mismatch', str(e))
+ else:
+ self.fail('ValueError not raised.')
+
+ def test_raise_if_no_exception(self):
+ try:
+ with ExpectedException(TypeError, 'tes.'):
+ pass
+ except AssertionError:
+ e = sys.exc_info()[1]
+ self.assertEqual('TypeError not raised.', str(e))
+ else:
+ self.fail('AssertionError not raised.')
+
+ def test_pass_on_raise_any_message(self):
+ with ExpectedException(ValueError):
+ raise ValueError('whatever')
+
+ def test_annotate(self):
+ def die():
+ with ExpectedException(ValueError, msg="foo"):
+ pass
+ exc = self.assertRaises(AssertionError, die)
+ self.assertThat(exc.args[0], EndsWith(': foo'))
+
+ def test_annotated_matcher(self):
+ def die():
+ with ExpectedException(ValueError, 'bar', msg="foo"):
+ pass
+ exc = self.assertRaises(AssertionError, die)
+ self.assertThat(exc.args[0], EndsWith(': foo'))
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testsuite.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testsuite.py
new file mode 100644
index 00000000000..9e92e0cb8b1
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/testsuite.py
@@ -0,0 +1,317 @@
+# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+
+"""Test suites and related things."""
+
+__metaclass__ = type
+__all__ = [
+ 'ConcurrentTestSuite',
+ 'ConcurrentStreamTestSuite',
+ 'filter_by_ids',
+ 'iterate_tests',
+ 'sorted_tests',
+ ]
+
+import sys
+import threading
+import unittest
+
+from extras import safe_hasattr, try_imports
+
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
+import testtools
+
+
+def iterate_tests(test_suite_or_case):
+ """Iterate through all of the test cases in 'test_suite_or_case'."""
+ try:
+ suite = iter(test_suite_or_case)
+ except TypeError:
+ yield test_suite_or_case
+ else:
+ for test in suite:
+ for subtest in iterate_tests(test):
+ yield subtest
+
+
+class ConcurrentTestSuite(unittest.TestSuite):
+ """A TestSuite whose run() calls out to a concurrency strategy."""
+
+ def __init__(self, suite, make_tests, wrap_result=None):
+ """Create a ConcurrentTestSuite to execute suite.
+
+ :param suite: A suite to run concurrently.
+ :param make_tests: A helper function to split the tests in the
+ ConcurrentTestSuite into some number of concurrently executing
+ sub-suites. make_tests must take a suite, and return an iterable
+ of TestCase-like object, each of which must have a run(result)
+ method.
+ :param wrap_result: An optional function that takes a thread-safe
+ result and a thread number and must return a ``TestResult``
+ object. If not provided, then ``ConcurrentTestSuite`` will just
+ use a ``ThreadsafeForwardingResult`` wrapped around the result
+ passed to ``run()``.
+ """
+ super(ConcurrentTestSuite, self).__init__([suite])
+ self.make_tests = make_tests
+ if wrap_result:
+ self._wrap_result = wrap_result
+
+ def _wrap_result(self, thread_safe_result, thread_number):
+ """Wrap a thread-safe result before sending it test results.
+
+ You can either override this in a subclass or pass your own
+ ``wrap_result`` in to the constructor. The latter is preferred.
+ """
+ return thread_safe_result
+
+ def run(self, result):
+ """Run the tests concurrently.
+
+ This calls out to the provided make_tests helper, and then serialises
+ the results so that result only sees activity from one TestCase at
+ a time.
+
+ ConcurrentTestSuite provides no special mechanism to stop the tests
+ returned by make_tests, it is up to the make_tests to honour the
+ shouldStop attribute on the result object they are run with, which will
+ be set if an exception is raised in the thread which
+ ConcurrentTestSuite.run is called in.
+ """
+ tests = self.make_tests(self)
+ try:
+ threads = {}
+ queue = Queue()
+ semaphore = threading.Semaphore(1)
+ for i, test in enumerate(tests):
+ process_result = self._wrap_result(
+ testtools.ThreadsafeForwardingResult(result, semaphore), i)
+ reader_thread = threading.Thread(
+ target=self._run_test, args=(test, process_result, queue))
+ threads[test] = reader_thread, process_result
+ reader_thread.start()
+ while threads:
+ finished_test = queue.get()
+ threads[finished_test][0].join()
+ del threads[finished_test]
+ except:
+ for thread, process_result in threads.values():
+ process_result.stop()
+ raise
+
+ def _run_test(self, test, process_result, queue):
+ try:
+ try:
+ test.run(process_result)
+ except Exception as e:
+ # The run logic itself failed.
+ case = testtools.ErrorHolder(
+ "broken-runner",
+ error=sys.exc_info())
+ case.run(process_result)
+ finally:
+ queue.put(test)
+
+
+class ConcurrentStreamTestSuite(object):
+ """A TestSuite whose run() parallelises."""
+
+ def __init__(self, make_tests):
+ """Create a ConcurrentTestSuite to execute tests returned by make_tests.
+
+ :param make_tests: A helper function that should return some number
+ of concurrently executable test suite / test case objects.
+ make_tests must take no parameters and return an iterable of
+ tuples. Each tuple must be of the form (case, route_code), where
+ case is a TestCase-like object with a run(result) method, and
+ route_code is either None or a unicode string.
+ """
+ super(ConcurrentStreamTestSuite, self).__init__()
+ self.make_tests = make_tests
+
+ def run(self, result):
+ """Run the tests concurrently.
+
+ This calls out to the provided make_tests helper to determine the
+ concurrency to use and to assign routing codes to each worker.
+
+ ConcurrentTestSuite provides no special mechanism to stop the tests
+ returned by make_tests, it is up to the made tests to honour the
+ shouldStop attribute on the result object they are run with, which will
+ be set if the test run is to be aborted.
+
+ The tests are run with an ExtendedToStreamDecorator wrapped around a
+ StreamToQueue instance. ConcurrentStreamTestSuite dequeues events from
+ the queue and forwards them to result. Tests can therefore be either
+ original unittest tests (or compatible tests), or new tests that emit
+ StreamResult events directly.
+
+ :param result: A StreamResult instance. The caller is responsible for
+ calling startTestRun on this instance prior to invoking suite.run,
+ and stopTestRun subsequent to the run method returning.
+ """
+ tests = self.make_tests()
+ try:
+ threads = {}
+ queue = Queue()
+ for test, route_code in tests:
+ to_queue = testtools.StreamToQueue(queue, route_code)
+ process_result = testtools.ExtendedToStreamDecorator(
+ testtools.TimestampingStreamResult(to_queue))
+ runner_thread = threading.Thread(
+ target=self._run_test,
+ args=(test, process_result, route_code))
+ threads[to_queue] = runner_thread, process_result
+ runner_thread.start()
+ while threads:
+ event_dict = queue.get()
+ event = event_dict.pop('event')
+ if event == 'status':
+ result.status(**event_dict)
+ elif event == 'stopTestRun':
+ thread = threads.pop(event_dict['result'])[0]
+ thread.join()
+ elif event == 'startTestRun':
+ pass
+ else:
+ raise ValueError('unknown event type %r' % (event,))
+ except:
+ for thread, process_result in threads.values():
+ # Signal to each TestControl in the ExtendedToStreamDecorator
+ # that the thread should stop running tests and cleanup
+ process_result.stop()
+ raise
+
+ def _run_test(self, test, process_result, route_code):
+ process_result.startTestRun()
+ try:
+ try:
+ test.run(process_result)
+ except Exception as e:
+ # The run logic itself failed.
+ case = testtools.ErrorHolder(
+ "broken-runner-'%s'" % (route_code,),
+ error=sys.exc_info())
+ case.run(process_result)
+ finally:
+ process_result.stopTestRun()
+
+
+class FixtureSuite(unittest.TestSuite):
+
+ def __init__(self, fixture, tests):
+ super(FixtureSuite, self).__init__(tests)
+ self._fixture = fixture
+
+ def run(self, result):
+ self._fixture.setUp()
+ try:
+ super(FixtureSuite, self).run(result)
+ finally:
+ self._fixture.cleanUp()
+
+ def sort_tests(self):
+ self._tests = sorted_tests(self, True)
+
+
+def _flatten_tests(suite_or_case, unpack_outer=False):
+ try:
+ tests = iter(suite_or_case)
+ except TypeError:
+ # Not iterable, assume it's a test case.
+ return [(suite_or_case.id(), suite_or_case)]
+ if (type(suite_or_case) in (unittest.TestSuite,) or
+ unpack_outer):
+ # Plain old test suite (or any others we may add).
+ result = []
+ for test in tests:
+ # Recurse to flatten.
+ result.extend(_flatten_tests(test))
+ return result
+ else:
+ # Find any old actual test and grab its id.
+ suite_id = None
+ tests = iterate_tests(suite_or_case)
+ for test in tests:
+ suite_id = test.id()
+ break
+ # If it has a sort_tests method, call that.
+ if safe_hasattr(suite_or_case, 'sort_tests'):
+ suite_or_case.sort_tests()
+ return [(suite_id, suite_or_case)]
+
+
+def filter_by_ids(suite_or_case, test_ids):
+ """Remove tests from suite_or_case where their id is not in test_ids.
+
+ :param suite_or_case: A test suite or test case.
+ :param test_ids: Something that supports the __contains__ protocol.
+ :return: suite_or_case, unless suite_or_case was a case that itself
+ fails the predicate when it will return a new unittest.TestSuite with
+ no contents.
+
+ This helper exists to provide backwards compatability with older versions
+ of Python (currently all versions :)) that don't have a native
+ filter_by_ids() method on Test(Case|Suite).
+
+ For subclasses of TestSuite, filtering is done by:
+ - attempting to call suite.filter_by_ids(test_ids)
+ - if there is no method, iterating the suite and identifying tests to
+ remove, then removing them from _tests, manually recursing into
+ each entry.
+
+ For objects with an id() method - TestCases, filtering is done by:
+ - attempting to return case.filter_by_ids(test_ids)
+ - if there is no such method, checking for case.id() in test_ids
+ and returning case if it is, or TestSuite() if it is not.
+
+ For anything else, it is not filtered - it is returned as-is.
+
+ To provide compatability with this routine for a custom TestSuite, just
+ define a filter_by_ids() method that will return a TestSuite equivalent to
+ the original minus any tests not in test_ids.
+ Similarly to provide compatability for a custom TestCase that does
+ something unusual define filter_by_ids to return a new TestCase object
+ that will only run test_ids that are in the provided container. If none
+ would run, return an empty TestSuite().
+
+ The contract for this function does not require mutation - each filtered
+ object can choose to return a new object with the filtered tests. However
+ because existing custom TestSuite classes in the wild do not have this
+ method, we need a way to copy their state correctly which is tricky:
+ thus the backwards-compatible code paths attempt to mutate in place rather
+ than guessing how to reconstruct a new suite.
+ """
+ # Compatible objects
+ if safe_hasattr(suite_or_case, 'filter_by_ids'):
+ return suite_or_case.filter_by_ids(test_ids)
+ # TestCase objects.
+ if safe_hasattr(suite_or_case, 'id'):
+ if suite_or_case.id() in test_ids:
+ return suite_or_case
+ else:
+ return unittest.TestSuite()
+ # Standard TestSuites or derived classes [assumed to be mutable].
+ if isinstance(suite_or_case, unittest.TestSuite):
+ filtered = []
+ for item in suite_or_case:
+ filtered.append(filter_by_ids(item, test_ids))
+ suite_or_case._tests[:] = filtered
+ # Everything else:
+ return suite_or_case
+
+
+def sorted_tests(suite_or_case, unpack_outer=False):
+ """Sort suite_or_case while preserving non-vanilla TestSuites."""
+ # Duplicate test id can induce TypeError in Python 3.3.
+ # Detect the duplicate test id, raise exception when found.
+ seen = set()
+ for test_case in iterate_tests(suite_or_case):
+ test_id = test_case.id()
+ if test_id not in seen:
+ seen.add(test_id)
+ else:
+ raise ValueError('Duplicate test id detected: %s' % (test_id,))
+ tests = _flatten_tests(suite_or_case, unpack_outer=unpack_outer)
+ tests.sort()
+ return unittest.TestSuite([test for (sort_key, test) in tests])
diff --git a/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/utils.py b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/utils.py
new file mode 100644
index 00000000000..0f39d8f5b6e
--- /dev/null
+++ b/src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/utils.py
@@ -0,0 +1,13 @@
+# Copyright (c) 2008-2010 testtools developers. See LICENSE for details.
+
+"""Utilities for dealing with stuff in unittest.
+
+Legacy - deprecated - use testtools.testsuite.iterate_tests
+"""
+
+import warnings
+warnings.warn("Please import iterate_tests from testtools.testsuite - "
+ "testtools.utils is deprecated.", DeprecationWarning, stacklevel=2)
+
+from testtools.testsuite import iterate_tests
+