summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Cahill <michael.cahill@wiredtiger.com>2013-12-20 10:27:28 +1100
committerMichael Cahill <michael.cahill@wiredtiger.com>2013-12-20 10:27:28 +1100
commitfa69e2a994a7c351c6d81cd96a271c5abaf04780 (patch)
tree21e7b7dd739ab7db068475d457335fef40a95194
parente2fcf1a851c7a9d966cf4e8609fdd6cbbbcf16a6 (diff)
downloadmongo-fa69e2a994a7c351c6d81cd96a271c5abaf04780.tar.gz
Update test/3rdparty with the packages required to run the test suite in parallel mode. Change the short command line flag to "-j", matching make.
--HG-- rename : test/3rdparty/testscenarios-0.2/.bzrignore => test/3rdparty/testscenarios-0.4/.bzrignore rename : test/3rdparty/testscenarios-0.2/Apache-2.0 => test/3rdparty/testscenarios-0.4/Apache-2.0 rename : test/3rdparty/testscenarios-0.2/BSD => test/3rdparty/testscenarios-0.4/BSD rename : test/3rdparty/testscenarios-0.2/COPYING => test/3rdparty/testscenarios-0.4/COPYING rename : test/3rdparty/testscenarios-0.2/GOALS => test/3rdparty/testscenarios-0.4/GOALS rename : test/3rdparty/testscenarios-0.2/HACKING => test/3rdparty/testscenarios-0.4/HACKING rename : test/3rdparty/testscenarios-0.2/MANIFEST.in => test/3rdparty/testscenarios-0.4/MANIFEST.in rename : test/3rdparty/testscenarios-0.2/Makefile => test/3rdparty/testscenarios-0.4/Makefile rename : test/3rdparty/testscenarios-0.2/doc/__init__.py => test/3rdparty/testscenarios-0.4/doc/__init__.py rename : test/3rdparty/testscenarios-0.2/doc/example.py => test/3rdparty/testscenarios-0.4/doc/example.py rename : test/3rdparty/testscenarios-0.2/doc/test_sample.py => test/3rdparty/testscenarios-0.4/doc/test_sample.py rename : test/3rdparty/testtools-0.9.12/doc/conf.py => test/3rdparty/testtools-0.9.34/doc/conf.py rename : test/3rdparty/testtools-0.9.12/doc/make.bat => test/3rdparty/testtools-0.9.34/doc/make.bat rename : test/3rdparty/testtools-0.9.12/testtools/_compat2x.py => test/3rdparty/testtools-0.9.34/testtools/_compat2x.py rename : test/3rdparty/testtools-0.9.12/testtools/_spinner.py => test/3rdparty/testtools-0.9.34/testtools/_spinner.py rename : test/3rdparty/testtools-0.9.12/testtools/distutilscmd.py => test/3rdparty/testtools-0.9.34/testtools/distutilscmd.py rename : test/3rdparty/testtools-0.9.12/testtools/monkey.py => test/3rdparty/testtools-0.9.34/testtools/monkey.py rename : test/3rdparty/testtools-0.9.12/testtools/tests/test_monkey.py => test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py rename : test/3rdparty/testtools-0.9.12/testtools/tests/test_runtest.py => test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py rename : test/3rdparty/testtools-0.9.12/testtools/utils.py => test/3rdparty/testtools-0.9.34/testtools/utils.py
-rw-r--r--test/3rdparty/concurrencytest-0.1.2/PKG-INFO22
-rw-r--r--test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/PKG-INFO22
-rw-r--r--test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/SOURCES.txt7
-rw-r--r--test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/dependency_links.txt1
-rw-r--r--test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/requires.txt2
-rw-r--r--test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/top_level.txt1
-rw-r--r--test/3rdparty/concurrencytest-0.1.2/concurrencytest.py144
-rw-r--r--test/3rdparty/concurrencytest-0.1.2/setup.cfg5
-rw-r--r--test/3rdparty/concurrencytest-0.1.2/setup.py33
-rw-r--r--test/3rdparty/extras-0.0.3/.gitignore35
-rw-r--r--test/3rdparty/extras-0.0.3/LICENSE26
-rw-r--r--test/3rdparty/extras-0.0.3/MANIFEST.in6
-rw-r--r--test/3rdparty/extras-0.0.3/Makefile30
-rw-r--r--test/3rdparty/extras-0.0.3/NEWS27
-rw-r--r--test/3rdparty/extras-0.0.3/PKG-INFO68
-rw-r--r--test/3rdparty/extras-0.0.3/README.rst57
-rw-r--r--test/3rdparty/extras-0.0.3/extras.egg-info/PKG-INFO68
-rw-r--r--test/3rdparty/extras-0.0.3/extras.egg-info/SOURCES.txt15
-rw-r--r--test/3rdparty/extras-0.0.3/extras.egg-info/dependency_links.txt1
-rw-r--r--test/3rdparty/extras-0.0.3/extras.egg-info/top_level.txt1
-rw-r--r--test/3rdparty/extras-0.0.3/extras/__init__.py (renamed from test/3rdparty/testtools-0.9.12/testtools/helpers.py)22
-rw-r--r--test/3rdparty/extras-0.0.3/extras/tests/__init__.py17
-rw-r--r--test/3rdparty/extras-0.0.3/extras/tests/test_extras.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_helpers.py)62
-rw-r--r--test/3rdparty/extras-0.0.3/setup.cfg10
-rwxr-xr-xtest/3rdparty/extras-0.0.3/setup.py43
-rw-r--r--test/3rdparty/python-subunit-0.0.16/MANIFEST.in20
-rw-r--r--test/3rdparty/python-subunit-0.0.16/NEWS493
-rw-r--r--test/3rdparty/python-subunit-0.0.16/PKG-INFO483
-rw-r--r--test/3rdparty/python-subunit-0.0.16/README468
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/filters/subunit-1to242
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/filters/subunit-2to147
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/filters/subunit-filter165
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/filters/subunit-ls60
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/filters/subunit-notify48
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/filters/subunit-stats32
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/filters/subunit-tags27
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/filters/subunit2gtk240
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/filters/subunit2junitxml36
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/filters/subunit2pyunit59
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/filters/tap2subunit26
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/__init__.py1320
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/chunked.py185
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/details.py119
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/filters.py206
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/iso8601.py133
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/progress_model.py106
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/python/subunit/run.py131
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/test_results.py729
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/__init__.py63
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-script.py21
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-two-script.py7
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_chunked.py146
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_details.py106
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_filters.py35
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_progress_model.py112
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_run.py64
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_filter.py346
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_stats.py78
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_tags.py85
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_tap2subunit.py387
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol.py1362
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol2.py436
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_results.py566
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python/subunit/v2.py495
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/PKG-INFO483
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/SOURCES.txt44
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/dependency_links.txt1
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/requires.txt2
-rw-r--r--test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/top_level.txt1
-rw-r--r--test/3rdparty/python-subunit-0.0.16/setup.cfg5
-rwxr-xr-xtest/3rdparty/python-subunit-0.0.16/setup.py66
-rw-r--r--test/3rdparty/testscenarios-0.2/lib/testscenarios/scenarios.py78
-rw-r--r--test/3rdparty/testscenarios-0.4/.bzrignore (renamed from test/3rdparty/testscenarios-0.2/.bzrignore)0
-rw-r--r--test/3rdparty/testscenarios-0.4/Apache-2.0 (renamed from test/3rdparty/testscenarios-0.2/Apache-2.0)0
-rw-r--r--test/3rdparty/testscenarios-0.4/BSD (renamed from test/3rdparty/testscenarios-0.2/BSD)0
-rw-r--r--test/3rdparty/testscenarios-0.4/COPYING (renamed from test/3rdparty/testscenarios-0.2/COPYING)0
-rw-r--r--test/3rdparty/testscenarios-0.4/GOALS (renamed from test/3rdparty/testscenarios-0.2/GOALS)0
-rw-r--r--test/3rdparty/testscenarios-0.4/HACKING (renamed from test/3rdparty/testscenarios-0.2/HACKING)0
-rw-r--r--test/3rdparty/testscenarios-0.4/MANIFEST.in (renamed from test/3rdparty/testscenarios-0.2/MANIFEST.in)0
-rw-r--r--test/3rdparty/testscenarios-0.4/Makefile (renamed from test/3rdparty/testscenarios-0.2/Makefile)0
-rw-r--r--test/3rdparty/testscenarios-0.4/NEWS (renamed from test/3rdparty/testscenarios-0.2/NEWS)39
-rw-r--r--test/3rdparty/testscenarios-0.4/PKG-INFO (renamed from test/3rdparty/testscenarios-0.2/PKG-INFO)251
-rw-r--r--test/3rdparty/testscenarios-0.4/README (renamed from test/3rdparty/testscenarios-0.2/README)68
-rw-r--r--test/3rdparty/testscenarios-0.4/doc/__init__.py (renamed from test/3rdparty/testscenarios-0.2/doc/__init__.py)0
-rw-r--r--test/3rdparty/testscenarios-0.4/doc/example.py (renamed from test/3rdparty/testscenarios-0.2/doc/example.py)0
-rw-r--r--test/3rdparty/testscenarios-0.4/doc/test_sample.py (renamed from test/3rdparty/testscenarios-0.2/doc/test_sample.py)0
-rw-r--r--test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/PKG-INFO335
-rw-r--r--test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/SOURCES.txt25
-rw-r--r--test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/dependency_links.txt1
-rw-r--r--test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/requires.txt1
-rw-r--r--test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/top_level.txt1
-rw-r--r--test/3rdparty/testscenarios-0.4/lib/testscenarios/__init__.py (renamed from test/3rdparty/testscenarios-0.2/lib/testscenarios/__init__.py)16
-rw-r--r--test/3rdparty/testscenarios-0.4/lib/testscenarios/scenarios.py167
-rw-r--r--test/3rdparty/testscenarios-0.4/lib/testscenarios/testcase.py (renamed from test/3rdparty/testscenarios-0.2/lib/testscenarios/testcase.py)28
-rw-r--r--test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/__init__.py (renamed from test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/__init__.py)5
-rw-r--r--test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_scenarios.py (renamed from test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_scenarios.py)88
-rw-r--r--test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_testcase.py (renamed from test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_testcase.py)34
-rw-r--r--test/3rdparty/testscenarios-0.4/setup.cfg5
-rwxr-xr-xtest/3rdparty/testscenarios-0.4/setup.py (renamed from test/3rdparty/testscenarios-0.2/setup.py)10
-rw-r--r--test/3rdparty/testtools-0.9.12/doc/for-framework-folk.rst219
-rw-r--r--test/3rdparty/testtools-0.9.12/setup.cfg4
-rw-r--r--test/3rdparty/testtools-0.9.12/testtools/__init__.py83
-rw-r--r--test/3rdparty/testtools-0.9.12/testtools/matchers.py1059
-rw-r--r--test/3rdparty/testtools-0.9.12/testtools/testresult/__init__.py19
-rw-r--r--test/3rdparty/testtools-0.9.12/testtools/testresult/real.py658
-rw-r--r--test/3rdparty/testtools-0.9.12/testtools/tests/test_matchers.py1071
-rw-r--r--test/3rdparty/testtools-0.9.12/testtools/tests/test_run.py80
-rw-r--r--test/3rdparty/testtools-0.9.12/testtools/tests/test_testresult.py1507
-rw-r--r--test/3rdparty/testtools-0.9.12/testtools/tests/test_testsuite.py75
-rw-r--r--test/3rdparty/testtools-0.9.12/testtools/testsuite.py101
-rw-r--r--test/3rdparty/testtools-0.9.34/.gitignore (renamed from test/3rdparty/testtools-0.9.12/.bzrignore)7
-rw-r--r--test/3rdparty/testtools-0.9.34/LICENSE (renamed from test/3rdparty/testtools-0.9.12/LICENSE)2
-rw-r--r--test/3rdparty/testtools-0.9.34/MANIFEST.in (renamed from test/3rdparty/testtools-0.9.12/MANIFEST.in)6
-rw-r--r--test/3rdparty/testtools-0.9.34/Makefile (renamed from test/3rdparty/testtools-0.9.12/Makefile)4
-rw-r--r--test/3rdparty/testtools-0.9.34/NEWS (renamed from test/3rdparty/testtools-0.9.12/NEWS)593
-rw-r--r--test/3rdparty/testtools-0.9.34/PKG-INFO (renamed from test/3rdparty/testtools-0.9.12/PKG-INFO)16
-rw-r--r--test/3rdparty/testtools-0.9.34/README.rst (renamed from test/3rdparty/testtools-0.9.12/README)12
-rw-r--r--test/3rdparty/testtools-0.9.34/doc/Makefile (renamed from test/3rdparty/testtools-0.9.12/doc/Makefile)0
-rw-r--r--test/3rdparty/testtools-0.9.34/doc/_static/placeholder.txt (renamed from test/3rdparty/testtools-0.9.12/doc/_static/placeholder.txt)0
-rw-r--r--test/3rdparty/testtools-0.9.34/doc/_templates/placeholder.txt (renamed from test/3rdparty/testtools-0.9.12/doc/_templates/placeholder.txt)0
-rw-r--r--test/3rdparty/testtools-0.9.34/doc/conf.py (renamed from test/3rdparty/testtools-0.9.12/doc/conf.py)0
-rw-r--r--test/3rdparty/testtools-0.9.34/doc/for-framework-folk.rst454
-rw-r--r--test/3rdparty/testtools-0.9.34/doc/for-test-authors.rst (renamed from test/3rdparty/testtools-0.9.12/doc/for-test-authors.rst)248
-rw-r--r--test/3rdparty/testtools-0.9.34/doc/hacking.rst (renamed from test/3rdparty/testtools-0.9.12/doc/hacking.rst)67
-rw-r--r--test/3rdparty/testtools-0.9.34/doc/index.rst (renamed from test/3rdparty/testtools-0.9.12/doc/index.rst)5
-rw-r--r--test/3rdparty/testtools-0.9.34/doc/make.bat (renamed from test/3rdparty/testtools-0.9.12/doc/make.bat)0
-rw-r--r--test/3rdparty/testtools-0.9.34/doc/overview.rst (renamed from test/3rdparty/testtools-0.9.12/doc/overview.rst)9
-rw-r--r--test/3rdparty/testtools-0.9.34/setup.cfg10
-rwxr-xr-xtest/3rdparty/testtools-0.9.34/setup.py (renamed from test/3rdparty/testtools-0.9.12/setup.py)59
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools.egg-info/PKG-INFO113
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools.egg-info/SOURCES.txt84
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools.egg-info/dependency_links.txt1
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools.egg-info/not-zip-safe1
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools.egg-info/requires.txt2
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools.egg-info/top_level.txt1
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/__init__.py125
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/_compat2x.py (renamed from test/3rdparty/testtools-0.9.12/testtools/_compat2x.py)0
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/_compat3x.py (renamed from test/3rdparty/testtools-0.9.12/testtools/_compat3x.py)2
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/_spinner.py (renamed from test/3rdparty/testtools-0.9.12/testtools/_spinner.py)0
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/compat.py (renamed from test/3rdparty/testtools-0.9.12/testtools/compat.py)78
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/content.py (renamed from test/3rdparty/testtools-0.9.12/testtools/content.py)195
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/content_type.py (renamed from test/3rdparty/testtools-0.9.12/testtools/content_type.py)8
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/deferredruntest.py (renamed from test/3rdparty/testtools-0.9.12/testtools/deferredruntest.py)2
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/distutilscmd.py (renamed from test/3rdparty/testtools-0.9.12/testtools/distutilscmd.py)0
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/helpers.py48
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/matchers/__init__.py119
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/matchers/_basic.py326
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/matchers/_datastructures.py228
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/matchers/_dict.py259
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/matchers/_doctest.py104
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py126
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/matchers/_filesystem.py192
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/matchers/_higherorder.py368
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/matchers/_impl.py175
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/monkey.py (renamed from test/3rdparty/testtools-0.9.12/testtools/monkey.py)0
-rwxr-xr-xtest/3rdparty/testtools-0.9.34/testtools/run.py (renamed from test/3rdparty/testtools-0.9.12/testtools/run.py)121
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/runtest.py (renamed from test/3rdparty/testtools-0.9.12/testtools/runtest.py)7
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tags.py34
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/testcase.py (renamed from test/3rdparty/testtools-0.9.12/testtools/testcase.py)294
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/testresult/__init__.py49
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/testresult/doubles.py (renamed from test/3rdparty/testtools-0.9.12/testtools/testresult/doubles.py)63
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/testresult/real.py1776
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/__init__.py)9
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/helpers.py)45
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py29
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py42
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py396
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py209
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py227
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py82
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py192
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py243
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py254
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py132
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_compat.py)223
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_content.py)126
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_content_type.py)16
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_deferredruntest.py)18
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_distutilscmd.py)24
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_fixturesupport.py)11
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py30
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_monkey.py)0
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py248
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_runtest.py)0
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_spinner.py)11
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py84
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_testcase.py)342
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py2919
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py279
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py (renamed from test/3rdparty/testtools-0.9.12/testtools/tests/test_with_with.py)15
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/testsuite.py317
-rw-r--r--test/3rdparty/testtools-0.9.34/testtools/utils.py (renamed from test/3rdparty/testtools-0.9.12/testtools/utils.py)0
-rw-r--r--test/suite/run.py14
-rw-r--r--test/suite/wttest.py2
194 files changed, 24281 insertions, 5490 deletions
diff --git a/test/3rdparty/concurrencytest-0.1.2/PKG-INFO b/test/3rdparty/concurrencytest-0.1.2/PKG-INFO
new file mode 100644
index 00000000000..edb06bf7a42
--- /dev/null
+++ b/test/3rdparty/concurrencytest-0.1.2/PKG-INFO
@@ -0,0 +1,22 @@
+Metadata-Version: 1.1
+Name: concurrencytest
+Version: 0.1.2
+Summary: testtools extension for running unittest suites concurrently
+Home-page: https://github.com/cgoldberg/concurrencytest
+Author: Corey Goldberg
+Author-email: cgoldberg _at_ gmail.com
+License: GNU GPLv3
+Download-URL: http://pypi.python.org/pypi/concurrencytest
+Description: UNKNOWN
+Keywords: test,testtools,unittest,concurrency,parallel
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Software Development :: Testing
diff --git a/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/PKG-INFO b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/PKG-INFO
new file mode 100644
index 00000000000..edb06bf7a42
--- /dev/null
+++ b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/PKG-INFO
@@ -0,0 +1,22 @@
+Metadata-Version: 1.1
+Name: concurrencytest
+Version: 0.1.2
+Summary: testtools extension for running unittest suites concurrently
+Home-page: https://github.com/cgoldberg/concurrencytest
+Author: Corey Goldberg
+Author-email: cgoldberg _at_ gmail.com
+License: GNU GPLv3
+Download-URL: http://pypi.python.org/pypi/concurrencytest
+Description: UNKNOWN
+Keywords: test,testtools,unittest,concurrency,parallel
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Software Development :: Testing
diff --git a/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/SOURCES.txt b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/SOURCES.txt
new file mode 100644
index 00000000000..bf9f692ad1f
--- /dev/null
+++ b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/SOURCES.txt
@@ -0,0 +1,7 @@
+concurrencytest.py
+setup.py
+concurrencytest.egg-info/PKG-INFO
+concurrencytest.egg-info/SOURCES.txt
+concurrencytest.egg-info/dependency_links.txt
+concurrencytest.egg-info/requires.txt
+concurrencytest.egg-info/top_level.txt \ No newline at end of file
diff --git a/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/dependency_links.txt b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/dependency_links.txt
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/requires.txt b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/requires.txt
new file mode 100644
index 00000000000..537ebcbac33
--- /dev/null
+++ b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/requires.txt
@@ -0,0 +1,2 @@
+python-subunit
+testtools \ No newline at end of file
diff --git a/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/top_level.txt b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/top_level.txt
new file mode 100644
index 00000000000..cfc96e6db71
--- /dev/null
+++ b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.egg-info/top_level.txt
@@ -0,0 +1 @@
+concurrencytest
diff --git a/test/3rdparty/concurrencytest-0.1.2/concurrencytest.py b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.py
new file mode 100644
index 00000000000..b3cb52d48d3
--- /dev/null
+++ b/test/3rdparty/concurrencytest-0.1.2/concurrencytest.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+#
+# Modified by: Corey Goldberg, 2013
+# License: GPLv2+
+#
+# Original code from:
+# Bazaar (bzrlib.tests.__init__.py, v2.6, copied Jun 01 2013)
+# Copyright (C) 2005-2011 Canonical Ltd
+# License: GPLv2+
+
+"""Python testtools extension for running unittest suites concurrently.
+
+The `testtools` project provides a ConcurrentTestSuite class, but does
+not provide a `make_tests` implementation needed to use it.
+
+This allows you to parallelize a test run across a configurable number
+of worker processes. While this can speed up CPU-bound test runs, it is
+mainly useful for IO-bound tests that spend most of their time waiting for
+data to arrive from someplace else and can benefit from cocncurrency.
+
+Unix only.
+"""
+
+import os
+import sys
+import traceback
+import unittest
+from itertools import cycle
+from multiprocessing import cpu_count
+
+from subunit import ProtocolTestCase, TestProtocolClient
+from subunit.test_results import AutoTimingTestResultDecorator
+
+from testtools import ConcurrentTestSuite, iterate_tests
+
+
+_all__ = [
+ 'ConcurrentTestSuite',
+ 'fork_for_tests',
+ 'partition_tests',
+]
+
+
+CPU_COUNT = cpu_count()
+
+
+def fork_for_tests(concurrency_num=CPU_COUNT):
+ """Implementation of `make_tests` used to construct `ConcurrentTestSuite`.
+
+ :param concurrency_num: number of processes to use.
+ """
+ def do_fork(suite):
+ """Take suite and start up multiple runners by forking (Unix only).
+
+ :param suite: TestSuite object.
+
+ :return: An iterable of TestCase-like objects which can each have
+ run(result) called on them to feed tests to result.
+ """
+ result = []
+ test_blocks = partition_tests(suite, concurrency_num)
+ # Clear the tests from the original suite so it doesn't keep them alive
+ suite._tests[:] = []
+ for process_tests in test_blocks:
+ process_suite = unittest.TestSuite(process_tests)
+ # Also clear each split list so new suite has only reference
+ process_tests[:] = []
+ c2pread, c2pwrite = os.pipe()
+ pid = os.fork()
+ if pid == 0:
+ try:
+ stream = os.fdopen(c2pwrite, 'wb', 1)
+ os.close(c2pread)
+ # Leave stderr and stdout open so we can see test noise
+ # Close stdin so that the child goes away if it decides to
+ # read from stdin (otherwise its a roulette to see what
+ # child actually gets keystrokes for pdb etc).
+ sys.stdin.close()
+ subunit_result = AutoTimingTestResultDecorator(
+ TestProtocolClient(stream)
+ )
+ process_suite.run(subunit_result)
+ except:
+ # Try and report traceback on stream, but exit with error
+ # even if stream couldn't be created or something else
+ # goes wrong. The traceback is formatted to a string and
+ # written in one go to avoid interleaving lines from
+ # multiple failing children.
+ try:
+ stream.write(traceback.format_exc())
+ finally:
+ os._exit(1)
+ os._exit(0)
+ else:
+ os.close(c2pwrite)
+ stream = os.fdopen(c2pread, 'rb', 1)
+ test = ProtocolTestCase(stream)
+ result.append(test)
+ return result
+ return do_fork
+
+
+def partition_tests(suite, count):
+ """Partition suite into count lists of tests."""
+ # This just assigns tests in a round-robin fashion. On one hand this
+ # splits up blocks of related tests that might run faster if they shared
+ # resources, but on the other it avoids assigning blocks of slow tests to
+ # just one partition. So the slowest partition shouldn't be much slower
+ # than the fastest.
+ partitions = [list() for _ in range(count)]
+ tests = iterate_tests(suite)
+ for partition, test in zip(cycle(partitions), tests):
+ partition.append(test)
+ return partitions
+
+
+if __name__ == '__main__':
+ import time
+
+ class SampleTestCase(unittest.TestCase):
+ """Dummy tests that sleep for demo."""
+
+ def test_me_1(self):
+ time.sleep(0.5)
+
+ def test_me_2(self):
+ time.sleep(0.5)
+
+ def test_me_3(self):
+ time.sleep(0.5)
+
+ def test_me_4(self):
+ time.sleep(0.5)
+
+ # Load tests from SampleTestCase defined above
+ suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
+ runner = unittest.TextTestRunner()
+
+ # Run tests sequentially
+ runner.run(suite)
+
+ # Run same tests across 4 processes
+ concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(4))
+ runner.run(concurrent_suite)
diff --git a/test/3rdparty/concurrencytest-0.1.2/setup.cfg b/test/3rdparty/concurrencytest-0.1.2/setup.cfg
new file mode 100644
index 00000000000..861a9f55426
--- /dev/null
+++ b/test/3rdparty/concurrencytest-0.1.2/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/test/3rdparty/concurrencytest-0.1.2/setup.py b/test/3rdparty/concurrencytest-0.1.2/setup.py
new file mode 100644
index 00000000000..447dc4110fe
--- /dev/null
+++ b/test/3rdparty/concurrencytest-0.1.2/setup.py
@@ -0,0 +1,33 @@
+
+"""setup/install script for concurrencytest"""
+
+
+import os
+from setuptools import setup
+
+
+setup(
+ name='concurrencytest',
+ version='0.1.2',
+ py_modules=['concurrencytest'],
+ install_requires=['python-subunit', 'testtools'],
+ author='Corey Goldberg',
+ author_email='cgoldberg _at_ gmail.com',
+ description='testtools extension for running unittest suites concurrently',
+ url='https://github.com/cgoldberg/concurrencytest',
+ download_url='http://pypi.python.org/pypi/concurrencytest',
+ keywords='test testtools unittest concurrency parallel'.split(),
+ license='GNU GPLv3',
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
+ 'Operating System :: POSIX',
+ 'Operating System :: Unix',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 3',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: Software Development :: Testing',
+ ]
+)
diff --git a/test/3rdparty/extras-0.0.3/.gitignore b/test/3rdparty/extras-0.0.3/.gitignore
new file mode 100644
index 00000000000..cfc114cbe95
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/.gitignore
@@ -0,0 +1,35 @@
+*.py[co]
+
+# Packages
+*.egg
+*.egg-info
+dist
+build
+eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+MANIFEST
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.tox
+
+#Translations
+*.mo
+
+#Mr Developer
+.mr.developer.cfg
+
+# editors
+*.swp
+*~
+
+# Testrepository
+.testrepository
diff --git a/test/3rdparty/extras-0.0.3/LICENSE b/test/3rdparty/extras-0.0.3/LICENSE
new file mode 100644
index 00000000000..4dfca452e1a
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/LICENSE
@@ -0,0 +1,26 @@
+Copyright (c) 2010-2012 the extras authors.
+
+The extras authors are:
+ * Jonathan Lange
+ * Martin Pool
+ * Robert Collins
+
+and are collectively referred to as "extras developers".
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/test/3rdparty/extras-0.0.3/MANIFEST.in b/test/3rdparty/extras-0.0.3/MANIFEST.in
new file mode 100644
index 00000000000..da2696e2430
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/MANIFEST.in
@@ -0,0 +1,6 @@
+include LICENSE
+include Makefile
+include MANIFEST.in
+include NEWS
+include README.rst
+include .gitignore
diff --git a/test/3rdparty/extras-0.0.3/Makefile b/test/3rdparty/extras-0.0.3/Makefile
new file mode 100644
index 00000000000..270e8d11546
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/Makefile
@@ -0,0 +1,30 @@
+# See README.rst for copyright and licensing details.
+
+PYTHON=python
+SOURCES=$(shell find extras -name "*.py")
+
+check:
+ PYTHONPATH=$(PWD) $(PYTHON) -m testtools.run extras.tests.test_suite
+
+TAGS: ${SOURCES}
+ ctags -e -R extras/
+
+tags: ${SOURCES}
+ ctags -R extras/
+
+clean:
+ rm -f TAGS tags
+ find extras -name "*.pyc" -exec rm '{}' \;
+
+### Documentation ###
+
+apidocs:
+ # pydoctor emits deprecation warnings under Ubuntu 10.10 LTS
+ PYTHONWARNINGS='ignore::DeprecationWarning' \
+ pydoctor --make-html --add-package extras \
+ --docformat=restructuredtext --project-name=extras \
+ --project-url=https://launchpad.net/extras
+
+
+.PHONY: apidocs
+.PHONY: check clean
diff --git a/test/3rdparty/extras-0.0.3/NEWS b/test/3rdparty/extras-0.0.3/NEWS
new file mode 100644
index 00000000000..60713b8efa6
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/NEWS
@@ -0,0 +1,27 @@
+extras NEWS
++++++++++++
+
+Changes and improvements to extras_, grouped by release.
+
+NEXT
+~~~~
+
+0.0.3
+~~~~~
+
+* Extras setup.py would break on older testtools releases, which could break
+ installs of newer testtools due to extras then failing to install.
+ (Robert Collins)
+
+0.0.2
+~~~~~
+
+* Fix Makefile to not have cruft leftover from testtools.
+
+0.0.1
+~~~~~
+
+* Initial extraction from testtools.
+
+
+.. _extras: http://pypi.python.org/pypi/extras
diff --git a/test/3rdparty/extras-0.0.3/PKG-INFO b/test/3rdparty/extras-0.0.3/PKG-INFO
new file mode 100644
index 00000000000..645b7c7e619
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/PKG-INFO
@@ -0,0 +1,68 @@
+Metadata-Version: 1.1
+Name: extras
+Version: 0.0.3
+Summary: Useful extra bits for Python - things that shold be in the standard library
+Home-page: https://github.com/testing-cabal/extras
+Author: Testing cabal
+Author-email: testtools-dev@lists.launchpad.net
+License: UNKNOWN
+Description: ======
+ extras
+ ======
+
+ extras is a set of extensions to the Python standard library, originally
+ written to make the code within testtools cleaner, but now split out for
+ general use outside of a testing context.
+
+
+ Documentation
+ -------------
+
+ pydoc extras is your friend. extras currently contains the following functions:
+
+ * try_import
+
+ * try_imports
+
+ * safe_hasattr
+
+ Which do what their name suggests.
+
+
+ Licensing
+ ---------
+
+ This project is distributed under the MIT license and copyright is owned by
+ the extras authors. See LICENSE for details.
+
+
+ Required Dependencies
+ ---------------------
+
+ * Python 2.6+ or 3.0+
+
+
+ Bug reports and patches
+ -----------------------
+
+ Please report bugs using github issues at <https://github.com/testing-cabal/extras>.
+ Patches can also be submitted via github. You can mail the authors directly
+ via the mailing list testtools-dev@lists.launchpad.net. (Note that Launchpad
+ discards email from unknown addresses - be sure to sign up for a Launchpad
+ account before mailing the list, or your mail will be silently discarded).
+
+
+ History
+ -------
+
+ extras used to be testtools.helpers, and was factored out when folk wanted to
+ use it separately.
+
+
+ Thanks
+ ------
+
+ * Martin Pool
+
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: MIT License
diff --git a/test/3rdparty/extras-0.0.3/README.rst b/test/3rdparty/extras-0.0.3/README.rst
new file mode 100644
index 00000000000..7d3f10ba93c
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/README.rst
@@ -0,0 +1,57 @@
+======
+extras
+======
+
+extras is a set of extensions to the Python standard library, originally
+written to make the code within testtools cleaner, but now split out for
+general use outside of a testing context.
+
+
+Documentation
+-------------
+
+pydoc extras is your friend. extras currently contains the following functions:
+
+* try_import
+
+* try_imports
+
+* safe_hasattr
+
+Which do what their name suggests.
+
+
+Licensing
+---------
+
+This project is distributed under the MIT license and copyright is owned by
+the extras authors. See LICENSE for details.
+
+
+Required Dependencies
+---------------------
+
+ * Python 2.6+ or 3.0+
+
+
+Bug reports and patches
+-----------------------
+
+Please report bugs using github issues at <https://github.com/testing-cabal/extras>.
+Patches can also be submitted via github. You can mail the authors directly
+via the mailing list testtools-dev@lists.launchpad.net. (Note that Launchpad
+discards email from unknown addresses - be sure to sign up for a Launchpad
+account before mailing the list, or your mail will be silently discarded).
+
+
+History
+-------
+
+extras used to be testtools.helpers, and was factored out when folk wanted to
+use it separately.
+
+
+Thanks
+------
+
+ * Martin Pool
diff --git a/test/3rdparty/extras-0.0.3/extras.egg-info/PKG-INFO b/test/3rdparty/extras-0.0.3/extras.egg-info/PKG-INFO
new file mode 100644
index 00000000000..645b7c7e619
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/extras.egg-info/PKG-INFO
@@ -0,0 +1,68 @@
+Metadata-Version: 1.1
+Name: extras
+Version: 0.0.3
+Summary: Useful extra bits for Python - things that shold be in the standard library
+Home-page: https://github.com/testing-cabal/extras
+Author: Testing cabal
+Author-email: testtools-dev@lists.launchpad.net
+License: UNKNOWN
+Description: ======
+ extras
+ ======
+
+ extras is a set of extensions to the Python standard library, originally
+ written to make the code within testtools cleaner, but now split out for
+ general use outside of a testing context.
+
+
+ Documentation
+ -------------
+
+ pydoc extras is your friend. extras currently contains the following functions:
+
+ * try_import
+
+ * try_imports
+
+ * safe_hasattr
+
+ Which do what their name suggests.
+
+
+ Licensing
+ ---------
+
+ This project is distributed under the MIT license and copyright is owned by
+ the extras authors. See LICENSE for details.
+
+
+ Required Dependencies
+ ---------------------
+
+ * Python 2.6+ or 3.0+
+
+
+ Bug reports and patches
+ -----------------------
+
+ Please report bugs using github issues at <https://github.com/testing-cabal/extras>.
+ Patches can also be submitted via github. You can mail the authors directly
+ via the mailing list testtools-dev@lists.launchpad.net. (Note that Launchpad
+ discards email from unknown addresses - be sure to sign up for a Launchpad
+ account before mailing the list, or your mail will be silently discarded).
+
+
+ History
+ -------
+
+ extras used to be testtools.helpers, and was factored out when folk wanted to
+ use it separately.
+
+
+ Thanks
+ ------
+
+ * Martin Pool
+
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: MIT License
diff --git a/test/3rdparty/extras-0.0.3/extras.egg-info/SOURCES.txt b/test/3rdparty/extras-0.0.3/extras.egg-info/SOURCES.txt
new file mode 100644
index 00000000000..7abc10f3073
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/extras.egg-info/SOURCES.txt
@@ -0,0 +1,15 @@
+.gitignore
+LICENSE
+MANIFEST.in
+Makefile
+NEWS
+README.rst
+setup.cfg
+setup.py
+extras/__init__.py
+extras.egg-info/PKG-INFO
+extras.egg-info/SOURCES.txt
+extras.egg-info/dependency_links.txt
+extras.egg-info/top_level.txt
+extras/tests/__init__.py
+extras/tests/test_extras.py \ No newline at end of file
diff --git a/test/3rdparty/extras-0.0.3/extras.egg-info/dependency_links.txt b/test/3rdparty/extras-0.0.3/extras.egg-info/dependency_links.txt
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/extras.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/test/3rdparty/extras-0.0.3/extras.egg-info/top_level.txt b/test/3rdparty/extras-0.0.3/extras.egg-info/top_level.txt
new file mode 100644
index 00000000000..8c35a2295a8
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/extras.egg-info/top_level.txt
@@ -0,0 +1 @@
+extras
diff --git a/test/3rdparty/testtools-0.9.12/testtools/helpers.py b/test/3rdparty/extras-0.0.3/extras/__init__.py
index dbf66719edf..2d34b5258de 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/helpers.py
+++ b/test/3rdparty/extras-0.0.3/extras/__init__.py
@@ -1,4 +1,8 @@
-# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
+# Copyright (c) 2010-2012 extras developers. See LICENSE for details.
+
+"""Extensions to the Python standard library."""
+
+import sys
__all__ = [
'safe_hasattr',
@@ -6,7 +10,19 @@ __all__ = [
'try_imports',
]
-import sys
+# same format as sys.version_info: "A tuple containing the five components of
+# the version number: major, minor, micro, releaselevel, and serial. All
+# values except releaselevel are integers; the release level is 'alpha',
+# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
+# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
+# releaselevel of 'dev' for unreleased under-development code.
+#
+# If the releaselevel is 'alpha' then the major/minor/micro components are not
+# established at this point, and setup.py will use a version of next-$(revno).
+# If the releaselevel is 'final', then the tarball will be major.minor.micro.
+# Otherwise it is major.minor.micro~$(revno).
+
+__version__ = (0, 0, 3, 'final', 0)
def try_import(name, alternative=None, error_callback=None):
@@ -85,3 +101,5 @@ def safe_hasattr(obj, attr, _marker=object()):
properties.
"""
return getattr(obj, attr, _marker) is not _marker
+
+
diff --git a/test/3rdparty/extras-0.0.3/extras/tests/__init__.py b/test/3rdparty/extras-0.0.3/extras/tests/__init__.py
new file mode 100644
index 00000000000..e0d7d4a34d6
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/extras/tests/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2010-2012 extras developers. See LICENSE for details.
+
+"""Tests for extras."""
+
+from unittest import TestSuite, TestLoader
+
+
+def test_suite():
+ from extras.tests import (
+ test_extras,
+ )
+ modules = [
+ test_extras,
+ ]
+ loader = TestLoader()
+ suites = map(loader.loadTestsFromModule, modules)
+ return TestSuite(suites)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_helpers.py b/test/3rdparty/extras-0.0.3/extras/tests/test_extras.py
index 55de34b7e72..be1ed1c69f6 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_helpers.py
+++ b/test/3rdparty/extras-0.0.3/extras/tests/test_extras.py
@@ -1,25 +1,18 @@
-# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
+# Copyright (c) 2010-2012 extras developers. See LICENSE for details.
from testtools import TestCase
-from testtools.helpers import (
- try_import,
- try_imports,
- )
from testtools.matchers import (
- AllMatch,
- AfterPreprocessing,
Equals,
Is,
Not,
)
-from testtools.tests.helpers import (
- FullStackRunTest,
- hide_testtools_stack,
- is_stack_hidden,
+
+from extras import (
safe_hasattr,
+ try_import,
+ try_imports,
)
-
def check_error_callback(test, function, arg, expected_error_count,
expect_result):
"""General test template for error_callback argument.
@@ -193,48 +186,3 @@ class TestTryImports(TestCase):
0, True)
-import testtools.matchers
-import testtools.runtest
-import testtools.testcase
-
-
-def StackHidden(is_hidden):
- return AllMatch(
- AfterPreprocessing(
- lambda module: safe_hasattr(module, '__unittest'),
- Equals(is_hidden)))
-
-
-class TestStackHiding(TestCase):
-
- modules = [
- testtools.matchers,
- testtools.runtest,
- testtools.testcase,
- ]
-
- run_tests_with = FullStackRunTest
-
- def setUp(self):
- super(TestStackHiding, self).setUp()
- self.addCleanup(hide_testtools_stack, is_stack_hidden())
-
- def test_shown_during_testtools_testsuite(self):
- self.assertThat(self.modules, StackHidden(False))
-
- def test_is_stack_hidden_consistent_true(self):
- hide_testtools_stack(True)
- self.assertEqual(True, is_stack_hidden())
-
- def test_is_stack_hidden_consistent_false(self):
- hide_testtools_stack(False)
- self.assertEqual(False, is_stack_hidden())
-
- def test_show_stack(self):
- hide_testtools_stack(False)
- self.assertThat(self.modules, StackHidden(False))
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/extras-0.0.3/setup.cfg b/test/3rdparty/extras-0.0.3/setup.cfg
new file mode 100644
index 00000000000..92ee5499429
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/setup.cfg
@@ -0,0 +1,10 @@
+[test]
+test_module = extras.tests
+buffer = 1
+catch = 1
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/test/3rdparty/extras-0.0.3/setup.py b/test/3rdparty/extras-0.0.3/setup.py
new file mode 100755
index 00000000000..c384a765801
--- /dev/null
+++ b/test/3rdparty/extras-0.0.3/setup.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""Distutils installer for extras."""
+
+from setuptools import setup
+import os.path
+
+import extras
+testtools_cmd = extras.try_import('testtools.TestCommand')
+
+
+def get_version():
+ """Return the version of extras that we are building."""
+ version = '.'.join(
+ str(component) for component in extras.__version__[0:3])
+ return version
+
+
+def get_long_description():
+ readme_path = os.path.join(
+ os.path.dirname(__file__), 'README.rst')
+ return open(readme_path).read()
+
+
+cmdclass = {}
+
+if testtools_cmd is not None:
+ cmdclass['test'] = testtools_cmd
+
+
+setup(name='extras',
+ author='Testing cabal',
+ author_email='testtools-dev@lists.launchpad.net',
+ url='https://github.com/testing-cabal/extras',
+ description=('Useful extra bits for Python - things that shold be '
+ 'in the standard library'),
+ long_description=get_long_description(),
+ version=get_version(),
+ classifiers=["License :: OSI Approved :: MIT License"],
+ packages=[
+ 'extras',
+ 'extras.tests',
+ ],
+ cmdclass=cmdclass)
diff --git a/test/3rdparty/python-subunit-0.0.16/MANIFEST.in b/test/3rdparty/python-subunit-0.0.16/MANIFEST.in
new file mode 100644
index 00000000000..eb989816283
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/MANIFEST.in
@@ -0,0 +1,20 @@
+exclude .bzrignore
+exclude aclocal.m4
+prune autom4te.cache
+prune c
+prune c++
+prune compile
+exclude configure*
+exclude depcomp
+exclude INSTALL
+exclude install-sh
+exclude lib*
+exclude ltmain.sh
+prune m4
+exclude Makefile*
+exclude missing
+prune perl
+exclude py-compile
+prune shell
+exclude stamp-h1
+include NEWS
diff --git a/test/3rdparty/python-subunit-0.0.16/NEWS b/test/3rdparty/python-subunit-0.0.16/NEWS
new file mode 100644
index 00000000000..59af931ea2f
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/NEWS
@@ -0,0 +1,493 @@
+---------------------
+subunit release notes
+---------------------
+
+NEXT (In development)
+---------------------
+
+0.0.16
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Perl files should now honour perl system config.
+ (Benedikt Morbach, #1233198)
+
+* Python 3.1 and 3.2 have an inconsistent memoryview implementation which
+ required a workaround for NUL byte detection. (Robert Collins, #1216246)
+
+* The test suite was failing 6 tests due to testtools changing it's output
+ formatting of exceptions. (Robert Collins)
+
+* V2 parser errors now set appropriate mime types for the encapsulated packet
+ data and the error message. (Robert Collins)
+
+* When tests fail to import ``python subunit.run -l ...`` will now write a
+ subunit file attachment listing the failed imports and exit 2, rather than
+ listing the stub objects from the importer and exiting 0.
+ (Robert Collins, #1245672)
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Most filters will now accept a file path argument instead of only reading
+ from stdin. (Robert Collins, #409206)
+
+0.0.15
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Clients of subunit did not expect memoryview objects in StreamResult events.
+ (Robert Collins)
+
+* Memoryview and struct were mutually incompatible in 2.7.3 and 3.2.
+ (Robert Collins, #1216163)
+
+0.0.14
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Memoryview detection was broken and thus it's use was never really tested.
+ (Robert Collins, 1216101)
+
+* TestProtocol2's tag tests were set sort order dependent.
+ (Robert Collins, #1025392)
+
+* TestTestProtocols' test_tags_both was set sort order dependent.
+ (Robert Collins, #1025392)
+
+* TestTestProtocols' test_*_details were dictionary sort order dependent.
+ (Robert Collins, #1025392)
+
+* TestSubUnitTags's test_add_tag was also se sort order dependent.
+ (Robert Collins, #1025392)
+
+0.0.13
+------
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* subunit should now build with automake 1.11 again. (Robert Collins)
+
+* `subunit-stats` no longer outputs encapsulated stdout as subunit.
+ (Robert Collins, #1171987)
+
+* The logic for `subunit.run` is now importable via python -
+ `subunit.run.main`. (Robert Collins, #606770)
+
+BUG FIXES
+~~~~~~~~~
+
+* Removed GPL files that were (C) non Subunit Developers - they are
+ incompatible for binary distribution, which affects redistributors.
+ (Robert Collins, #1185591)
+
+0.0.12
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Subunit v2 packets with both file content and route code were not being
+ parsed correctly - they would incorrectly emit a parser error, due to trying
+ to parse the route code length from the first byes of the file content.
+ (Robert Collins, 1172815)
+
+0.0.11
+------
+
+v2 protocol draft included in this release. The v2 protocol trades off human
+readability for a massive improvement in robustness, the ability to represent
+concurrent tests in a single stream, cheaper parsing, and that provides
+significantly better in-line debugging support and structured forwarding
+of non-test data (such as stdout or stdin data).
+
+This change includes two new filters (subunit-1to2 and subunit-2to1). Use
+these filters to convert old streams to v2 and convert v2 streams to v1.
+
+All the other filters now only parse and emit v2 streams. V2 is still in
+draft format, so if you want to delay and wait for v2 to be finalised, you
+should use subunit-2to1 before any serialisation steps take place.
+With the ability to encapsulate multiple non-test streams, another significant
+cange is that filters which emit subunit now encapsulate any non-subunit they
+encounter, labelling it 'stdout'. This permits multiplexing such streams and
+detangling the stdout streams from each input.
+
+The subunit libraries (Python etc) have not changed their behaviour: they
+still emit v1 from their existing API calls. New API's are being added
+and applications should migrate once their language has those API's available.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* ``subunit.run`` now replaces sys.stdout to ensure that stdout is unbuffered
+ - without this pdb output is not reliably visible when stdout is a pipe
+ as it usually is. (Robert Collins)
+
+* v2 protocol draft included in this release. (Python implementation only so
+ far). (Robert Collins)
+
+* Two new Python classes -- ``StreamResultToBytes`` and
+ ``ByteStreamToStreamResult`` handle v2 generation and parsing.
+ (Robert Collins)
+
+0.0.10
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* make_stream_binary is now public for reuse. (Robert Collins)
+
+* NAME was not defined in the protocol BNF. (Robert Collins)
+
+* UnsupportedOperation is available in the Python2.6 io library, so ask
+ forgiveness rather than permission for obtaining it. (Robert Collins)
+
+* Streams with no fileno() attribute are now supported, but they are not
+ checked for being in binary mode: be sure to take care of that if using
+ the library yourself. (Robert Collins)
+
+0.0.9
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* All the source files are now included in the distribution tarball.
+ (Arfrever Frehtes Taifersar Arahesis, Robert Collins, #996275)
+
+* ``python/subunit/tests/test_run.py`` and ``python/subunit/filters.py`` were
+ not included in the 0.0.8 tarball. (Robert Collins)
+
+* Test ids which include non-ascii unicode characters are now supported.
+ (Robert Collins, #1029866)
+
+* The ``failfast`` option to ``subunit.run`` will now work. The dependency on
+ testtools has been raised to 0.9.23 to permit this.
+ (Robert Collins, #1090582)
+
+0.0.8
+-----
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Perl module now correctly outputs "failure" instead of "fail". (Stewart Smith)
+
+* Shell functions now output timestamps. (Stewart Smith, Robert Collins)
+
+* 'subunit2csv' script that converts subunit output to CSV format.
+ (Jonathan Lange)
+
+* ``TagCollapsingDecorator`` now correctly distinguishes between local and
+ global tags. (Jonathan Lange)
+
+* ``TestResultFilter`` always forwards ``time:`` events.
+ (Benji York, Brad Crittenden)
+
+BUG FIXES
+~~~~~~~~~
+
+* Add 'subunit --no-xfail', which will omit expected failures from the subunit
+ stream. (John Arbash Meinel, #623642)
+
+* Add 'subunit -F/--only-genuine-failures' which sets all of '--no-skips',
+ '--no-xfail', '--no-passthrough, '--no-success', and gives you just the
+ failure stream. (John Arbash Meinel)
+
+* Python2.6 support was broken by the fixup feature.
+ (Arfrever Frehtes Taifersar Arahesis, #987490)
+
+* Python3 support regressed in trunk.
+ (Arfrever Frehtes Taifersar Arahesis, #987514)
+
+* Python3 support was insufficiently robust in detecting unicode streams.
+ (Robert Collins, Arfrever Frehtes Taifersar Arahesis)
+
+* Tag support has been implemented for TestProtocolClient.
+ (Robert Collins, #518016)
+
+* Tags can now be filtered. (Jonathan Lange, #664171)
+
+* Test suite works with latest testtools (but not older ones - formatting
+ changes only). (Robert Collins)
+
+0.0.7
+-----
+
+The Subunit Python test runner ``python -m subunit.run`` can now report the
+test ids and also filter via a test id list file thanks to improvements in
+``testtools.run``. See the testtools manual, or testrepository - a major
+user of such functionality.
+
+Additionally the protocol now has a keyword uxsuccess for Unexpected Success
+reporting. Older parsers will report tests with this status code as 'lost
+connection'.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Add ``TimeCollapsingDecorator`` which collapses multiple sequential time()
+ calls into just the first and last. (Jonathan Lange)
+
+* Add ``TagCollapsingDecorator`` which collapses many tags() calls into one
+ where possible. (Jonathan Lange, Robert Collins)
+
+* Force flush of writes to stdout in c/tests/test_child.
+ (Jelmer Vernooij, #687611)
+
+* Provisional Python 3.x support.
+ (Robert Collins, Tres Seaver, Martin[gz], #666819)
+
+* ``subunit.chunked.Decoder`` Python class takes a new ``strict`` option,
+ which defaults to ``True``. When ``False``, the ``Decoder`` will accept
+ incorrect input that is still unambiguous. i.e. subunit will not barf if
+ a \r is missing from the input. (Martin Pool)
+
+* ``subunit-filter`` preserves the relative ordering of ``time:`` statements,
+ so you can now use filtered streams to gather data about how long it takes
+ to run a test. (Jonathan Lange, #716554)
+
+* ``subunit-ls`` now handles a stream with time: instructions that start
+ partway through the stream (which may lead to strange times) more gracefully.
+ (Robert Collins, #785954)
+
+* ``subunit-ls`` should handle the new test outcomes in Python2.7 better.
+ (Robert Collins, #785953)
+
+* ``TestResultFilter`` now collapses sequential calls to time().
+ (Jonathan Lange, #567150)
+
+* ``TestResultDecorator.tags()`` now actually works, and is no longer a buggy
+ copy/paste of ``TestResultDecorator.time()``. (Jonathan Lange, #681828)
+
+* ``TestResultFilter`` now supports a ``fixup_expected_failures``
+ argument. (Jelmer Vernooij, #755241)
+
+* The ``subunit.run`` Python module supports ``-l`` and ``--load-list`` as
+ per ``testtools.run``. This required a dependency bump due to a small
+ API change in ``testtools``. (Robert Collins)
+
+* The help for subunit-filter was confusing about the behaviour of ``-f`` /
+ ``--no-failure``. (Robert Collins, #703392)
+
+* The Python2.7 / testtools addUnexpectedSuccess API is now supported. This
+ required adding a new status code to the protocol. (Robert Collins, #654474)
+
+CHANGES
+~~~~~~~
+
+* testtools 0.9.11 or newer is new needed (due to the Python 3 support).
+ (Robert Collins)
+
+0.0.6
+-----
+
+This release of subunit fixes a number of unicode related bugs. This depends on
+testtools 0.9.4 and will not function without it. Thanks to Tres Seaver there
+is also an optional native setup.py file for use with easy_install and the
+like.
+
+BUG FIXES
+~~~~~~~~~
+
+* Be consistent about delivering unicode content to testtools StringException
+ class which has become (appropriately) conservative. (Robert Collins)
+
+* Fix incorrect reference to subunit_test_failf in c/README.
+ (Brad Hards, #524341)
+
+* Fix incorrect ordering of tags method parameters in TestResultDecorator. This
+ is purely cosmetic as the parameters are passed down with no interpretation.
+ (Robert Collins, #537611)
+
+* Old style tracebacks with no encoding info are now treated as UTF8 rather
+ than some-random-codec-like-ascii. (Robert Collins)
+
+* On windows, ProtocolTestCase and TestProtocolClient will set their streams to
+ binary mode by calling into msvcrt; this avoids having their input or output
+ mangled by the default line ending translation on that platform.
+ (Robert Collins, Martin [gz], #579296)
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Subunit now has a setup.py for python deployments that are not using
+ distribution packages. (Tres Seaver, #538181)
+
+* Subunit now supports test discovery by building on the testtools support for
+ it. You can take advantage of it with "python -m subunit.run discover [path]"
+ and see "python -m subunit.run discover --help" for more options.
+
+* Subunit now uses the improved unicode support in testtools when outputting
+ non-details based test information; this should consistently UTF8 encode such
+ strings.
+
+* The Python TestProtocolClient now flushes output on startTest and stopTest.
+ (Martin [gz]).
+
+
+0.0.5
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* make check was failing if subunit wasn't installed due to a missing include
+ path for the test program test_child.
+
+* make distcheck was failing due to a missing $(top_srcdir) rune.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* New filter `subunit-notify` that will show a notification window with test
+ statistics when the test run finishes.
+
+* subunit.run will now pipe its output to the command in the
+ SUBUNIT_FORMATTER environment variable, if set.
+
+0.0.4
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* subunit2junitxml -f required a value, this is now fixed and -f acts as a
+ boolean switch with no parameter.
+
+* Building with autoconf 2.65 is now supported.
+
+
+0.0.3
+-----
+
+ CHANGES:
+
+ * License change, by unanimous agreement of contributors to BSD/Apache
+ License Version 2.0. This makes Subunit compatible with more testing
+ frameworks.
+
+ IMPROVEMENTS:
+
+ * CPPUnit is now directly supported: subunit builds a cppunit listener
+ ``libcppunit-subunit``.
+
+ * In the python API ``addExpectedFailure`` and ``addUnexpectedSuccess``
+ from python 2.7/3.1 are now supported. ``addExpectedFailure`` is
+ serialised as ``xfail``, and ``addUnexpectedSuccess`` as ``success``.
+ The ``ProtocolTestCase`` parser now calls outcomes using an extended
+ API that permits attaching arbitrary MIME resources such as text files
+ log entries and so on. This extended API is being developed with the
+ Python testing community, and is in flux. ``TestResult`` objects that
+ do not support the API will be detected and transparently downgraded
+ back to the regular Python unittest API.
+
+ * INSTALLDIRS can be set to control the perl MakeMaker 'INSTALLDIRS'
+ viarable when installing.
+
+ * Multipart test outcomes are tentatively supported; the exact protocol
+ for them, both serialiser and object is not yet finalised. Testers and
+ early adopters are sought. As part of this and also in an attempt to
+ provider a more precise focus on the wire protocol and toolchain,
+ Subunit now depends on testtools (http://launchpad.net/testtools)
+ release 0.9.0 or newer.
+
+ * subunit2junitxml supports a new option, --forward which causes it
+ to forward the raw subunit stream in a similar manner to tee. This
+ is used with the -o option to both write a xml report and get some
+ other subunit filter to process the stream.
+
+ * The C library now has ``subunit_test_skip``.
+
+ BUG FIXES:
+
+ * Install progress_model.py correctly.
+
+ * Non-gcc builds will no longer try to use gcc specific flags.
+ (Thanks trondn-norbye)
+
+ API CHANGES:
+
+ INTERNALS:
+
+0.0.2
+-----
+
+ CHANGES:
+
+ IMPROVEMENTS:
+
+ * A number of filters now support ``--no-passthrough`` to cause all
+ non-subunit content to be discarded. This is useful when precise control
+ over what is output is required - such as with subunit2junitxml.
+
+ * A small perl parser is now included, and a new ``subunit-diff`` tool
+ using that is included. (Jelmer Vernooij)
+
+ * Subunit streams can now include optional, incremental lookahead
+ information about progress. This allows reporters to make estimates
+ about completion, when such information is available. See the README
+ under ``progress`` for more details.
+
+ * ``subunit-filter`` now supports regex filtering via ``--with`` and
+ ``without`` options. (Martin Pool)
+
+ * ``subunit2gtk`` has been added, a filter that shows a GTK summary of a
+ test stream.
+
+ * ``subunit2pyunit`` has a --progress flag which will cause the bzrlib
+ test reporter to be used, which has a textual progress bar. This requires
+ a recent bzrlib as a minor bugfix was required in bzrlib to support this.
+
+ * ``subunit2junitxml`` has been added. This filter converts a subunit
+ stream to a single JUnit style XML stream using the pyjunitxml
+ python library.
+
+ * The shell functions support skipping via ``subunit_skip_test`` now.
+
+ BUG FIXES:
+
+ * ``xfail`` outcomes are now passed to python TestResult's via
+ addExpectedFailure if it is present on the TestResult. Python 2.6 and
+ earlier which do not have this function will have ``xfail`` outcomes
+ passed through as success outcomes as earlier versions of subunit did.
+
+ API CHANGES:
+
+ * tags are no longer passed around in python via the ``TestCase.tags``
+ attribute. Instead ``TestResult.tags(new_tags, gone_tags)`` is called,
+ and like in the protocol, if called while a test is active only applies
+ to that test. (Robert Collins)
+
+ * ``TestResultFilter`` takes a new optional constructor parameter
+ ``filter_predicate``. (Martin Pool)
+
+ * When a progress: directive is encountered in a subunit stream, the
+ python bindings now call the ``progress(offset, whence)`` method on
+ ``TestResult``.
+
+ * When a time: directive is encountered in a subunit stream, the python
+ bindings now call the ``time(seconds)`` method on ``TestResult``.
+
+ INTERNALS:
+
+ * (python) Added ``subunit.test_results.AutoTimingTestResultDecorator``. Most
+ users of subunit will want to wrap their ``TestProtocolClient`` objects
+ in this decorator to get test timing data for performance analysis.
+
+ * (python) ExecTestCase supports passing arguments to test scripts.
+
+ * (python) New helper ``subunit.test_results.HookedTestResultDecorator``
+ which can be used to call some code on every event, without having to
+ implement all the event methods.
+
+ * (python) ``TestProtocolClient.time(a_datetime)`` has been added which
+ causes a timestamp to be output to the stream.
diff --git a/test/3rdparty/python-subunit-0.0.16/PKG-INFO b/test/3rdparty/python-subunit-0.0.16/PKG-INFO
new file mode 100644
index 00000000000..de79389b594
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/PKG-INFO
@@ -0,0 +1,483 @@
+Metadata-Version: 1.0
+Name: python-subunit
+Version: 0.0.16
+Summary: Python implementation of subunit test streaming protocol
+Home-page: http://launchpad.net/subunit
+Author: Robert Collins
+Author-email: subunit-dev@lists.launchpad.net
+License: UNKNOWN
+Description:
+ subunit: A streaming protocol for test results
+ Copyright (C) 2005-2013 Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+ See the COPYING file for full details on the licensing of Subunit.
+
+ subunit reuses iso8601 by Michael Twomey, distributed under an MIT style
+ licence - see python/iso8601/LICENSE for details.
+
+ Subunit
+ -------
+
+ Subunit is a streaming protocol for test results.
+
+ There are two major revisions of the protocol. Version 1 was trivially human
+ readable but had significant defects as far as highly parallel testing was
+ concerned - it had no room for doing discovery and execution in parallel,
+ required substantial buffering when multiplexing and was fragile - a corrupt
+ byte could cause an entire stream to be misparsed. Version 1.1 added
+ encapsulation of binary streams which mitigated some of the issues but the
+ core remained.
+
+ Version 2 shares many of the good characteristics of Version 1 - it can be
+ embedded into a regular text stream (e.g. from a build system) and it still
+ models xUnit style test execution. It also fixes many of the issues with
+ Version 1 - Version 2 can be multiplexed without excessive buffering (in
+ time or space), it has a well defined recovery mechanism for dealing with
+ corrupted streams (e.g. where two processes write to the same stream
+ concurrently, or where the stream generator suffers a bug).
+
+ More details on both protocol version s can be found in the 'Protocol' section
+ of this document.
+
+ Subunit comes with command line filters to process a subunit stream and
+ language bindings for python, C, C++ and shell. Bindings are easy to write
+ for other languages.
+
+ A number of useful things can be done easily with subunit:
+ * Test aggregation: Tests run separately can be combined and then
+ reported/displayed together. For instance, tests from different languages
+ can be shown as a seamless whole, and tests running on multiple machines
+ can be aggregated into a single stream through a multiplexer.
+ * Test archiving: A test run may be recorded and replayed later.
+ * Test isolation: Tests that may crash or otherwise interact badly with each
+ other can be run seperately and then aggregated, rather than interfering
+ with each other or requiring an adhoc test->runner reporting protocol.
+ * Grid testing: subunit can act as the necessary serialisation and
+ deserialiation to get test runs on distributed machines to be reported in
+ real time.
+
+ Subunit supplies the following filters:
+ * tap2subunit - convert perl's TestAnythingProtocol to subunit.
+ * subunit2csv - convert a subunit stream to csv.
+ * subunit2pyunit - convert a subunit stream to pyunit test results.
+ * subunit2gtk - show a subunit stream in GTK.
+ * subunit2junitxml - convert a subunit stream to JUnit's XML format.
+ * subunit-diff - compare two subunit streams.
+ * subunit-filter - filter out tests from a subunit stream.
+ * subunit-ls - list info about tests present in a subunit stream.
+ * subunit-stats - generate a summary of a subunit stream.
+ * subunit-tags - add or remove tags from a stream.
+
+ Integration with other tools
+ ----------------------------
+
+ Subunit's language bindings act as integration with various test runners like
+ 'check', 'cppunit', Python's 'unittest'. Beyond that a small amount of glue
+ (typically a few lines) will allow Subunit to be used in more sophisticated
+ ways.
+
+ Python
+ ======
+
+ Subunit has excellent Python support: most of the filters and tools are written
+ in python and there are facilities for using Subunit to increase test isolation
+ seamlessly within a test suite.
+
+ The most common way is to run an existing python test suite and have it output
+ subunit via the ``subunit.run`` module::
+
+ $ python -m subunit.run mypackage.tests.test_suite
+
+ For more information on the Python support Subunit offers , please see
+ ``pydoc subunit``, or the source in ``python/subunit/``
+
+ C
+ =
+
+ Subunit has C bindings to emit the protocol. The 'check' C unit testing project
+ has included subunit support in their project for some years now. See
+ 'c/README' for more details.
+
+ C++
+ ===
+
+ The C library is includable and usable directly from C++. A TestListener for
+ CPPUnit is included in the Subunit distribution. See 'c++/README' for details.
+
+ shell
+ =====
+
+ There are two sets of shell tools. There are filters, which accept a subunit
+ stream on stdin and output processed data (or a transformed stream) on stdout.
+
+ Then there are unittest facilities similar to those for C : shell bindings
+ consisting of simple functions to output protocol elements, and a patch for
+ adding subunit output to the 'ShUnit' shell test runner. See 'shell/README' for
+ details.
+
+ Filter recipes
+ --------------
+
+ To ignore some failing tests whose root cause is already known::
+
+ subunit-filter --without 'AttributeError.*flavor'
+
+
+ The xUnit test model
+ --------------------
+
+ Subunit implements a slightly modified xUnit test model. The stock standard
+ model is that there are tests, which have an id(), can be run, and when run
+ start, emit an outcome (like success or failure) and then finish.
+
+ Subunit extends this with the idea of test enumeration (find out about tests
+ a runner has without running them), tags (allow users to describe tests in
+ ways the test framework doesn't apply any semantic value to), file attachments
+ (allow arbitrary data to make analysing a failure easy) and timestamps.
+
+ The protocol
+ ------------
+
+ Version 2, or v2 is new and still under development, but is intended to
+ supercede version 1 in the very near future. Subunit's bundled tools accept
+ only version 2 and only emit version 2, but the new filters subunit-1to2 and
+ subunit-2to1 can be used to interoperate with older third party libraries.
+
+ Version 2
+ =========
+
+ Version 2 is a binary protocol consisting of independent packets that can be
+ embedded in the output from tools like make - as long as each packet has no
+ other bytes mixed in with it (which 'make -j N>1' has a tendency of doing).
+ Version 2 is currently in draft form, and early adopters should be willing
+ to either discard stored results (if protocol changes are made), or bulk
+ convert them back to v1 and then to a newer edition of v2.
+
+ The protocol synchronises at the start of the stream, after a packet, or
+ after any 0x0A byte. That is, a subunit v2 packet starts after a newline or
+ directly after the end of the prior packet.
+
+ Subunit is intended to be transported over a reliable streaming protocol such
+ as TCP. As such it does not concern itself with out of order delivery of
+ packets. However, because of the possibility of corruption due to either
+ bugs in the sender, or due to mixed up data from concurrent writes to the same
+ fd when being embedded, subunit strives to recover reasonably gracefully from
+ damaged data.
+
+ A key design goal for Subunit version 2 is to allow processing and multiplexing
+ without forcing buffering for semantic correctness, as buffering tends to hide
+ hung or otherwise misbehaving tests. That said, limited time based buffering
+ for network efficiency is a good idea - this is ultimately implementator
+ choice. Line buffering is also discouraged for subunit streams, as dropping
+ into a debugger or other tool may require interactive traffic even if line
+ buffering would not otherwise be a problem.
+
+ In version two there are two conceptual events - a test status event and a file
+ attachment event. Events may have timestamps, and the path of multiplexers that
+ an event is routed through is recorded to permit sending actions back to the
+ source (such as new tests to run or stdin for driving debuggers and other
+ interactive input). Test status events are used to enumerate tests, to report
+ tests and test helpers as they run. Tests may have tags, used to allow
+ tunnelling extra meanings through subunit without requiring parsing of
+ arbitrary file attachments. Things that are not standalone tests get marked
+ as such by setting the 'Runnable' flag to false. (For instance, individual
+ assertions in TAP are not runnable tests, only the top level TAP test script
+ is runnable).
+
+ File attachments are used to provide rich detail about the nature of a failure.
+ File attachments can also be used to encapsulate stdout and stderr both during
+ and outside tests.
+
+ Most numbers are stored in network byte order - Most Significant Byte first
+ encoded using a variation of http://www.dlugosz.com/ZIP2/VLI.html. The first
+ byte's top 2 high order bits encode the total number of octets in the number.
+ This encoding can encode values from 0 to 2**30-1, enough to encode a
+ nanosecond. Numbers that are not variable length encoded are still stored in
+ MSB order.
+
+ prefix octets max max
+ +-------+--------+---------+------------+
+ | 00 | 1 | 2**6-1 | 63 |
+ | 01 | 2 | 2**14-1 | 16383 |
+ | 10 | 3 | 2**22-1 | 4194303 |
+ | 11 | 4 | 2**30-1 | 1073741823 |
+ +-------+--------+---------+------------+
+
+ All variable length elements of the packet are stored with a length prefix
+ number allowing them to be skipped over for consumers that don't need to
+ interpret them.
+
+ UTF-8 strings are with no terminating NUL and should not have any embedded NULs
+ (implementations SHOULD validate any such strings that they process and take
+ some remedial action (such as discarding the packet as corrupt).
+
+ In short the structure of a packet is:
+ PACKET := SIGNATURE FLAGS PACKET_LENGTH TIMESTAMP? TESTID? TAGS? MIME?
+ FILECONTENT? ROUTING_CODE? CRC32
+
+ In more detail...
+
+ Packets are identified by a single byte signature - 0xB3, which is never legal
+ in a UTF-8 stream as the first byte of a character. 0xB3 starts with the first
+ bit set and the second not, which is the UTF-8 signature for a continuation
+ byte. 0xB3 was chosen as 0x73 ('s' in ASCII') with the top two bits replaced by
+ the 1 and 0 for a continuation byte.
+
+ If subunit packets are being embedded in a non-UTF-8 text stream, where 0x73 is
+ a legal character, consider either recoding the text to UTF-8, or using
+ subunit's 'file' packets to embed the text stream in subunit, rather than the
+ other way around.
+
+ Following the signature byte comes a 16-bit flags field, which includes a
+ 4-bit version field - if the version is not 0x2 then the packet cannot be
+ read. It is recommended to signal an error at this point (e.g. by emitting
+ a synthetic error packet and returning to the top level loop to look for
+ new packets, or exiting with an error). If recovery is desired, treat the
+ packet signature as an opaque byte and scan for a new synchronisation point.
+ NB: Subunit V1 and V2 packets may legitimately included 0xB3 internally,
+ as they are an 8-bit safe container format, so recovery from this situation
+ may involve an arbitrary number of false positives until an actual packet
+ is encountered : and even then it may still be false, failing after passing
+ the version check due to coincidence.
+
+ Flags are stored in network byte order too.
+ +-------------------------+------------------------+
+ | High byte | Low byte |
+ | 15 14 13 12 11 10 9 8 | 7 6 5 4 3 2 1 0 |
+ | VERSION |feature bits| |
+ +------------+------------+------------------------+
+
+ Valid version values are:
+ 0x2 - version 2
+
+ Feature bits:
+ Bit 11 - mask 0x0800 - Test id present.
+ Bit 10 - mask 0x0400 - Routing code present.
+ Bit 9 - mask 0x0200 - Timestamp present.
+ Bit 8 - mask 0x0100 - Test is 'runnable'.
+ Bit 7 - mask 0x0080 - Tags are present.
+ Bit 6 - mask 0x0040 - File content is present.
+ Bit 5 - mask 0x0020 - File MIME type is present.
+ Bit 4 - mask 0x0010 - EOF marker.
+ Bit 3 - mask 0x0008 - Must be zero in version 2.
+
+ Test status gets three bits:
+ Bit 2 | Bit 1 | Bit 0 - mask 0x0007 - A test status enum lookup:
+ 000 - undefined / no test
+ 001 - Enumeration / existence
+ 002 - In progress
+ 003 - Success
+ 004 - Unexpected Success
+ 005 - Skipped
+ 006 - Failed
+ 007 - Expected failure
+
+ After the flags field is a number field giving the length in bytes for the
+ entire packet including the signature and the checksum. This length must
+ be less than 4MiB - 4194303 bytes. The encoding can obviously record a larger
+ number but one of the goals is to avoid requiring large buffers, or causing
+ large latency in the packet forward/processing pipeline. Larger file
+ attachments can be communicated in multiple packets, and the overhead in such a
+ 4MiB packet is approximately 0.2%.
+
+ The rest of the packet is a series of optional features as specified by the set
+ feature bits in the flags field. When absent they are entirely absent.
+
+ Forwarding and multiplexing of packets can be done without interpreting the
+ remainder of the packet until the routing code and checksum (which are both at
+ the end of the packet). Additionally, routers can often avoid copying or moving
+ the bulk of the packet, as long as the routing code size increase doesn't force
+ the length encoding to take up a new byte (which will only happen to packets
+ less than or equal to 16KiB in length) - large packets are very efficient to
+ route.
+
+ Timestamp when present is a 32 bit unsigned integer for secnods, and a variable
+ length number for nanoseconds, representing UTC time since Unix Epoch in
+ seconds and nanoseconds.
+
+ Test id when present is a UTF-8 string. The test id should uniquely identify
+ runnable tests such that they can be selected individually. For tests and other
+ actions which cannot be individually run (such as test
+ fixtures/layers/subtests) uniqueness is not required (though being human
+ meaningful is highly recommended).
+
+ Tags when present is a length prefixed vector of UTF-8 strings, one per tag.
+ There are no restrictions on tag content (other than the restrictions on UTF-8
+ strings in subunit in general). Tags have no ordering.
+
+ When a MIME type is present, it defines the MIME type for the file across all
+ packets same file (routing code + testid + name uniquely identifies a file,
+ reset when EOF is flagged). If a file never has a MIME type set, it should be
+ treated as application/octet-stream.
+
+ File content when present is a UTF-8 string for the name followed by the length
+ in bytes of the content, and then the content octets.
+
+ If present routing code is a UTF-8 string. The routing code is used to
+ determine which test backend a test was running on when doing data analysis,
+ and to route stdin to the test process if interaction is required.
+
+ Multiplexers SHOULD add a routing code if none is present, and prefix any
+ existing routing code with a routing code ('/' separated) if one is already
+ present. For example, a multiplexer might label each stream it is multiplexing
+ with a simple ordinal ('0', '1' etc), and given an incoming packet with route
+ code '3' from stream '0' would adjust the route code when forwarding the packet
+ to be '0/3'.
+
+ Following the end of the packet is a CRC-32 checksum of the contents of the
+ packet including the signature.
+
+ Example packets
+ ~~~~~~~~~~~~~~~
+
+ Trivial test "foo" enumeration packet, with test id, runnable set,
+ status=enumeration. Spaces below are to visually break up signature / flags /
+ length / testid / crc32
+
+ b3 2901 0c 03666f6f 08555f1b
+
+
+ Version 1 (and 1.1)
+ ===================
+
+ Version 1 (and 1.1) are mostly human readable protocols.
+
+ Sample subunit wire contents
+ ----------------------------
+
+ The following::
+ test: test foo works
+ success: test foo works.
+ test: tar a file.
+ failure: tar a file. [
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+ ]
+ a writeln to stdout
+
+ When run through subunit2pyunit::
+ .F
+ a writeln to stdout
+
+ ========================
+ FAILURE: tar a file.
+ -------------------
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+
+
+ Subunit protocol description
+ ============================
+
+ This description is being ported to an EBNF style. Currently its only partly in
+ that style, but should be fairly clear all the same. When in doubt, refer the
+ source (and ideally help fix up the description!). Generally the protocol is
+ line orientated and consists of either directives and their parameters, or
+ when outside a DETAILS region unexpected lines which are not interpreted by
+ the parser - they should be forwarded unaltered.
+
+ test|testing|test:|testing: test LABEL
+ success|success:|successful|successful: test LABEL
+ success|success:|successful|successful: test LABEL DETAILS
+ failure: test LABEL
+ failure: test LABEL DETAILS
+ error: test LABEL
+ error: test LABEL DETAILS
+ skip[:] test LABEL
+ skip[:] test LABEL DETAILS
+ xfail[:] test LABEL
+ xfail[:] test LABEL DETAILS
+ uxsuccess[:] test LABEL
+ uxsuccess[:] test LABEL DETAILS
+ progress: [+|-]X
+ progress: push
+ progress: pop
+ tags: [-]TAG ...
+ time: YYYY-MM-DD HH:MM:SSZ
+
+ LABEL: UTF8*
+ NAME: UTF8*
+ DETAILS ::= BRACKETED | MULTIPART
+ BRACKETED ::= '[' CR UTF8-lines ']' CR
+ MULTIPART ::= '[ multipart' CR PART* ']' CR
+ PART ::= PART_TYPE CR NAME CR PART_BYTES CR
+ PART_TYPE ::= Content-Type: type/sub-type(;parameter=value,parameter=value)
+ PART_BYTES ::= (DIGITS CR LF BYTE{DIGITS})* '0' CR LF
+
+ unexpected output on stdout -> stdout.
+ exit w/0 or last test completing -> error
+
+ Tags given outside a test are applied to all following tests
+ Tags given after a test: line and before the result line for the same test
+ apply only to that test, and inherit the current global tags.
+ A '-' before a tag is used to remove tags - e.g. to prevent a global tag
+ applying to a single test, or to cancel a global tag.
+
+ The progress directive is used to provide progress information about a stream
+ so that stream consumer can provide completion estimates, progress bars and so
+ on. Stream generators that know how many tests will be present in the stream
+ should output "progress: COUNT". Stream filters that add tests should output
+ "progress: +COUNT", and those that remove tests should output
+ "progress: -COUNT". An absolute count should reset the progress indicators in
+ use - it indicates that two separate streams from different generators have
+ been trivially concatenated together, and there is no knowledge of how many
+ more complete streams are incoming. Smart concatenation could scan each stream
+ for their count and sum them, or alternatively translate absolute counts into
+ relative counts inline. It is recommended that outputters avoid absolute counts
+ unless necessary. The push and pop directives are used to provide local regions
+ for progress reporting. This fits with hierarchically operating test
+ environments - such as those that organise tests into suites - the top-most
+ runner can report on the number of suites, and each suite surround its output
+ with a (push, pop) pair. Interpreters should interpret a pop as also advancing
+ the progress of the restored level by one step. Encountering progress
+ directives between the start and end of a test pair indicates that a previous
+ test was interrupted and did not cleanly terminate: it should be implicitly
+ closed with an error (the same as when a stream ends with no closing test
+ directive for the most recently started test).
+
+ The time directive acts as a clock event - it sets the time for all future
+ events. The value should be a valid ISO8601 time.
+
+ The skip, xfail and uxsuccess outcomes are not supported by all testing
+ environments. In Python the testttools (https://launchpad.net/testtools)
+ library is used to translate these automatically if an older Python version
+ that does not support them is in use. See the testtools documentation for the
+ translation policy.
+
+ skip is used to indicate a test was discovered but not executed. xfail is used
+ to indicate a test that errored in some expected fashion (also know as "TODO"
+ tests in some frameworks). uxsuccess is used to indicate and unexpected success
+ where a test though to be failing actually passes. It is complementary to
+ xfail.
+
+ Hacking on subunit
+ ------------------
+
+ Releases
+ ========
+
+ * Update versions in configure.ac and python/subunit/__init__.py.
+ * Make PyPI and regular tarball releases. Upload the regular one to LP, the
+ PyPI one to PyPI.
+ * Push a tagged commit.
+
+
+Keywords: python test streaming
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Testing
diff --git a/test/3rdparty/python-subunit-0.0.16/README b/test/3rdparty/python-subunit-0.0.16/README
new file mode 100644
index 00000000000..4fa9444ea6f
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/README
@@ -0,0 +1,468 @@
+
+ subunit: A streaming protocol for test results
+ Copyright (C) 2005-2013 Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+ See the COPYING file for full details on the licensing of Subunit.
+
+ subunit reuses iso8601 by Michael Twomey, distributed under an MIT style
+ licence - see python/iso8601/LICENSE for details.
+
+Subunit
+-------
+
+Subunit is a streaming protocol for test results.
+
+There are two major revisions of the protocol. Version 1 was trivially human
+readable but had significant defects as far as highly parallel testing was
+concerned - it had no room for doing discovery and execution in parallel,
+required substantial buffering when multiplexing and was fragile - a corrupt
+byte could cause an entire stream to be misparsed. Version 1.1 added
+encapsulation of binary streams which mitigated some of the issues but the
+core remained.
+
+Version 2 shares many of the good characteristics of Version 1 - it can be
+embedded into a regular text stream (e.g. from a build system) and it still
+models xUnit style test execution. It also fixes many of the issues with
+Version 1 - Version 2 can be multiplexed without excessive buffering (in
+time or space), it has a well defined recovery mechanism for dealing with
+corrupted streams (e.g. where two processes write to the same stream
+concurrently, or where the stream generator suffers a bug).
+
+More details on both protocol version s can be found in the 'Protocol' section
+of this document.
+
+Subunit comes with command line filters to process a subunit stream and
+language bindings for python, C, C++ and shell. Bindings are easy to write
+for other languages.
+
+A number of useful things can be done easily with subunit:
+ * Test aggregation: Tests run separately can be combined and then
+ reported/displayed together. For instance, tests from different languages
+ can be shown as a seamless whole, and tests running on multiple machines
+ can be aggregated into a single stream through a multiplexer.
+ * Test archiving: A test run may be recorded and replayed later.
+ * Test isolation: Tests that may crash or otherwise interact badly with each
+ other can be run seperately and then aggregated, rather than interfering
+ with each other or requiring an adhoc test->runner reporting protocol.
+ * Grid testing: subunit can act as the necessary serialisation and
+ deserialiation to get test runs on distributed machines to be reported in
+ real time.
+
+Subunit supplies the following filters:
+ * tap2subunit - convert perl's TestAnythingProtocol to subunit.
+ * subunit2csv - convert a subunit stream to csv.
+ * subunit2pyunit - convert a subunit stream to pyunit test results.
+ * subunit2gtk - show a subunit stream in GTK.
+ * subunit2junitxml - convert a subunit stream to JUnit's XML format.
+ * subunit-diff - compare two subunit streams.
+ * subunit-filter - filter out tests from a subunit stream.
+ * subunit-ls - list info about tests present in a subunit stream.
+ * subunit-stats - generate a summary of a subunit stream.
+ * subunit-tags - add or remove tags from a stream.
+
+Integration with other tools
+----------------------------
+
+Subunit's language bindings act as integration with various test runners like
+'check', 'cppunit', Python's 'unittest'. Beyond that a small amount of glue
+(typically a few lines) will allow Subunit to be used in more sophisticated
+ways.
+
+Python
+======
+
+Subunit has excellent Python support: most of the filters and tools are written
+in python and there are facilities for using Subunit to increase test isolation
+seamlessly within a test suite.
+
+The most common way is to run an existing python test suite and have it output
+subunit via the ``subunit.run`` module::
+
+ $ python -m subunit.run mypackage.tests.test_suite
+
+For more information on the Python support Subunit offers , please see
+``pydoc subunit``, or the source in ``python/subunit/``
+
+C
+=
+
+Subunit has C bindings to emit the protocol. The 'check' C unit testing project
+has included subunit support in their project for some years now. See
+'c/README' for more details.
+
+C++
+===
+
+The C library is includable and usable directly from C++. A TestListener for
+CPPUnit is included in the Subunit distribution. See 'c++/README' for details.
+
+shell
+=====
+
+There are two sets of shell tools. There are filters, which accept a subunit
+stream on stdin and output processed data (or a transformed stream) on stdout.
+
+Then there are unittest facilities similar to those for C : shell bindings
+consisting of simple functions to output protocol elements, and a patch for
+adding subunit output to the 'ShUnit' shell test runner. See 'shell/README' for
+details.
+
+Filter recipes
+--------------
+
+To ignore some failing tests whose root cause is already known::
+
+ subunit-filter --without 'AttributeError.*flavor'
+
+
+The xUnit test model
+--------------------
+
+Subunit implements a slightly modified xUnit test model. The stock standard
+model is that there are tests, which have an id(), can be run, and when run
+start, emit an outcome (like success or failure) and then finish.
+
+Subunit extends this with the idea of test enumeration (find out about tests
+a runner has without running them), tags (allow users to describe tests in
+ways the test framework doesn't apply any semantic value to), file attachments
+(allow arbitrary data to make analysing a failure easy) and timestamps.
+
+The protocol
+------------
+
+Version 2, or v2 is new and still under development, but is intended to
+supercede version 1 in the very near future. Subunit's bundled tools accept
+only version 2 and only emit version 2, but the new filters subunit-1to2 and
+subunit-2to1 can be used to interoperate with older third party libraries.
+
+Version 2
+=========
+
+Version 2 is a binary protocol consisting of independent packets that can be
+embedded in the output from tools like make - as long as each packet has no
+other bytes mixed in with it (which 'make -j N>1' has a tendency of doing).
+Version 2 is currently in draft form, and early adopters should be willing
+to either discard stored results (if protocol changes are made), or bulk
+convert them back to v1 and then to a newer edition of v2.
+
+The protocol synchronises at the start of the stream, after a packet, or
+after any 0x0A byte. That is, a subunit v2 packet starts after a newline or
+directly after the end of the prior packet.
+
+Subunit is intended to be transported over a reliable streaming protocol such
+as TCP. As such it does not concern itself with out of order delivery of
+packets. However, because of the possibility of corruption due to either
+bugs in the sender, or due to mixed up data from concurrent writes to the same
+fd when being embedded, subunit strives to recover reasonably gracefully from
+damaged data.
+
+A key design goal for Subunit version 2 is to allow processing and multiplexing
+without forcing buffering for semantic correctness, as buffering tends to hide
+hung or otherwise misbehaving tests. That said, limited time based buffering
+for network efficiency is a good idea - this is ultimately implementator
+choice. Line buffering is also discouraged for subunit streams, as dropping
+into a debugger or other tool may require interactive traffic even if line
+buffering would not otherwise be a problem.
+
+In version two there are two conceptual events - a test status event and a file
+attachment event. Events may have timestamps, and the path of multiplexers that
+an event is routed through is recorded to permit sending actions back to the
+source (such as new tests to run or stdin for driving debuggers and other
+interactive input). Test status events are used to enumerate tests, to report
+tests and test helpers as they run. Tests may have tags, used to allow
+tunnelling extra meanings through subunit without requiring parsing of
+arbitrary file attachments. Things that are not standalone tests get marked
+as such by setting the 'Runnable' flag to false. (For instance, individual
+assertions in TAP are not runnable tests, only the top level TAP test script
+is runnable).
+
+File attachments are used to provide rich detail about the nature of a failure.
+File attachments can also be used to encapsulate stdout and stderr both during
+and outside tests.
+
+Most numbers are stored in network byte order - Most Significant Byte first
+encoded using a variation of http://www.dlugosz.com/ZIP2/VLI.html. The first
+byte's top 2 high order bits encode the total number of octets in the number.
+This encoding can encode values from 0 to 2**30-1, enough to encode a
+nanosecond. Numbers that are not variable length encoded are still stored in
+MSB order.
+
+ prefix octets max max
++-------+--------+---------+------------+
+| 00 | 1 | 2**6-1 | 63 |
+| 01 | 2 | 2**14-1 | 16383 |
+| 10 | 3 | 2**22-1 | 4194303 |
+| 11 | 4 | 2**30-1 | 1073741823 |
++-------+--------+---------+------------+
+
+All variable length elements of the packet are stored with a length prefix
+number allowing them to be skipped over for consumers that don't need to
+interpret them.
+
+UTF-8 strings are with no terminating NUL and should not have any embedded NULs
+(implementations SHOULD validate any such strings that they process and take
+some remedial action (such as discarding the packet as corrupt).
+
+In short the structure of a packet is:
+PACKET := SIGNATURE FLAGS PACKET_LENGTH TIMESTAMP? TESTID? TAGS? MIME?
+ FILECONTENT? ROUTING_CODE? CRC32
+
+In more detail...
+
+Packets are identified by a single byte signature - 0xB3, which is never legal
+in a UTF-8 stream as the first byte of a character. 0xB3 starts with the first
+bit set and the second not, which is the UTF-8 signature for a continuation
+byte. 0xB3 was chosen as 0x73 ('s' in ASCII') with the top two bits replaced by
+the 1 and 0 for a continuation byte.
+
+If subunit packets are being embedded in a non-UTF-8 text stream, where 0x73 is
+a legal character, consider either recoding the text to UTF-8, or using
+subunit's 'file' packets to embed the text stream in subunit, rather than the
+other way around.
+
+Following the signature byte comes a 16-bit flags field, which includes a
+4-bit version field - if the version is not 0x2 then the packet cannot be
+read. It is recommended to signal an error at this point (e.g. by emitting
+a synthetic error packet and returning to the top level loop to look for
+new packets, or exiting with an error). If recovery is desired, treat the
+packet signature as an opaque byte and scan for a new synchronisation point.
+NB: Subunit V1 and V2 packets may legitimately included 0xB3 internally,
+as they are an 8-bit safe container format, so recovery from this situation
+may involve an arbitrary number of false positives until an actual packet
+is encountered : and even then it may still be false, failing after passing
+the version check due to coincidence.
+
+Flags are stored in network byte order too.
++-------------------------+------------------------+
+| High byte | Low byte |
+| 15 14 13 12 11 10 9 8 | 7 6 5 4 3 2 1 0 |
+| VERSION |feature bits| |
++------------+------------+------------------------+
+
+Valid version values are:
+0x2 - version 2
+
+Feature bits:
+Bit 11 - mask 0x0800 - Test id present.
+Bit 10 - mask 0x0400 - Routing code present.
+Bit 9 - mask 0x0200 - Timestamp present.
+Bit 8 - mask 0x0100 - Test is 'runnable'.
+Bit 7 - mask 0x0080 - Tags are present.
+Bit 6 - mask 0x0040 - File content is present.
+Bit 5 - mask 0x0020 - File MIME type is present.
+Bit 4 - mask 0x0010 - EOF marker.
+Bit 3 - mask 0x0008 - Must be zero in version 2.
+
+Test status gets three bits:
+Bit 2 | Bit 1 | Bit 0 - mask 0x0007 - A test status enum lookup:
+000 - undefined / no test
+001 - Enumeration / existence
+002 - In progress
+003 - Success
+004 - Unexpected Success
+005 - Skipped
+006 - Failed
+007 - Expected failure
+
+After the flags field is a number field giving the length in bytes for the
+entire packet including the signature and the checksum. This length must
+be less than 4MiB - 4194303 bytes. The encoding can obviously record a larger
+number but one of the goals is to avoid requiring large buffers, or causing
+large latency in the packet forward/processing pipeline. Larger file
+attachments can be communicated in multiple packets, and the overhead in such a
+4MiB packet is approximately 0.2%.
+
+The rest of the packet is a series of optional features as specified by the set
+feature bits in the flags field. When absent they are entirely absent.
+
+Forwarding and multiplexing of packets can be done without interpreting the
+remainder of the packet until the routing code and checksum (which are both at
+the end of the packet). Additionally, routers can often avoid copying or moving
+the bulk of the packet, as long as the routing code size increase doesn't force
+the length encoding to take up a new byte (which will only happen to packets
+less than or equal to 16KiB in length) - large packets are very efficient to
+route.
+
+Timestamp when present is a 32 bit unsigned integer for secnods, and a variable
+length number for nanoseconds, representing UTC time since Unix Epoch in
+seconds and nanoseconds.
+
+Test id when present is a UTF-8 string. The test id should uniquely identify
+runnable tests such that they can be selected individually. For tests and other
+actions which cannot be individually run (such as test
+fixtures/layers/subtests) uniqueness is not required (though being human
+meaningful is highly recommended).
+
+Tags when present is a length prefixed vector of UTF-8 strings, one per tag.
+There are no restrictions on tag content (other than the restrictions on UTF-8
+strings in subunit in general). Tags have no ordering.
+
+When a MIME type is present, it defines the MIME type for the file across all
+packets same file (routing code + testid + name uniquely identifies a file,
+reset when EOF is flagged). If a file never has a MIME type set, it should be
+treated as application/octet-stream.
+
+File content when present is a UTF-8 string for the name followed by the length
+in bytes of the content, and then the content octets.
+
+If present routing code is a UTF-8 string. The routing code is used to
+determine which test backend a test was running on when doing data analysis,
+and to route stdin to the test process if interaction is required.
+
+Multiplexers SHOULD add a routing code if none is present, and prefix any
+existing routing code with a routing code ('/' separated) if one is already
+present. For example, a multiplexer might label each stream it is multiplexing
+with a simple ordinal ('0', '1' etc), and given an incoming packet with route
+code '3' from stream '0' would adjust the route code when forwarding the packet
+to be '0/3'.
+
+Following the end of the packet is a CRC-32 checksum of the contents of the
+packet including the signature.
+
+Example packets
+~~~~~~~~~~~~~~~
+
+Trivial test "foo" enumeration packet, with test id, runnable set,
+status=enumeration. Spaces below are to visually break up signature / flags /
+length / testid / crc32
+
+b3 2901 0c 03666f6f 08555f1b
+
+
+Version 1 (and 1.1)
+===================
+
+Version 1 (and 1.1) are mostly human readable protocols.
+
+Sample subunit wire contents
+----------------------------
+
+The following::
+ test: test foo works
+ success: test foo works.
+ test: tar a file.
+ failure: tar a file. [
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+ ]
+ a writeln to stdout
+
+When run through subunit2pyunit::
+ .F
+ a writeln to stdout
+
+ ========================
+ FAILURE: tar a file.
+ -------------------
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+
+
+Subunit protocol description
+============================
+
+This description is being ported to an EBNF style. Currently its only partly in
+that style, but should be fairly clear all the same. When in doubt, refer the
+source (and ideally help fix up the description!). Generally the protocol is
+line orientated and consists of either directives and their parameters, or
+when outside a DETAILS region unexpected lines which are not interpreted by
+the parser - they should be forwarded unaltered.
+
+test|testing|test:|testing: test LABEL
+success|success:|successful|successful: test LABEL
+success|success:|successful|successful: test LABEL DETAILS
+failure: test LABEL
+failure: test LABEL DETAILS
+error: test LABEL
+error: test LABEL DETAILS
+skip[:] test LABEL
+skip[:] test LABEL DETAILS
+xfail[:] test LABEL
+xfail[:] test LABEL DETAILS
+uxsuccess[:] test LABEL
+uxsuccess[:] test LABEL DETAILS
+progress: [+|-]X
+progress: push
+progress: pop
+tags: [-]TAG ...
+time: YYYY-MM-DD HH:MM:SSZ
+
+LABEL: UTF8*
+NAME: UTF8*
+DETAILS ::= BRACKETED | MULTIPART
+BRACKETED ::= '[' CR UTF8-lines ']' CR
+MULTIPART ::= '[ multipart' CR PART* ']' CR
+PART ::= PART_TYPE CR NAME CR PART_BYTES CR
+PART_TYPE ::= Content-Type: type/sub-type(;parameter=value,parameter=value)
+PART_BYTES ::= (DIGITS CR LF BYTE{DIGITS})* '0' CR LF
+
+unexpected output on stdout -> stdout.
+exit w/0 or last test completing -> error
+
+Tags given outside a test are applied to all following tests
+Tags given after a test: line and before the result line for the same test
+apply only to that test, and inherit the current global tags.
+A '-' before a tag is used to remove tags - e.g. to prevent a global tag
+applying to a single test, or to cancel a global tag.
+
+The progress directive is used to provide progress information about a stream
+so that stream consumer can provide completion estimates, progress bars and so
+on. Stream generators that know how many tests will be present in the stream
+should output "progress: COUNT". Stream filters that add tests should output
+"progress: +COUNT", and those that remove tests should output
+"progress: -COUNT". An absolute count should reset the progress indicators in
+use - it indicates that two separate streams from different generators have
+been trivially concatenated together, and there is no knowledge of how many
+more complete streams are incoming. Smart concatenation could scan each stream
+for their count and sum them, or alternatively translate absolute counts into
+relative counts inline. It is recommended that outputters avoid absolute counts
+unless necessary. The push and pop directives are used to provide local regions
+for progress reporting. This fits with hierarchically operating test
+environments - such as those that organise tests into suites - the top-most
+runner can report on the number of suites, and each suite surround its output
+with a (push, pop) pair. Interpreters should interpret a pop as also advancing
+the progress of the restored level by one step. Encountering progress
+directives between the start and end of a test pair indicates that a previous
+test was interrupted and did not cleanly terminate: it should be implicitly
+closed with an error (the same as when a stream ends with no closing test
+directive for the most recently started test).
+
+The time directive acts as a clock event - it sets the time for all future
+events. The value should be a valid ISO8601 time.
+
+The skip, xfail and uxsuccess outcomes are not supported by all testing
+environments. In Python the testttools (https://launchpad.net/testtools)
+library is used to translate these automatically if an older Python version
+that does not support them is in use. See the testtools documentation for the
+translation policy.
+
+skip is used to indicate a test was discovered but not executed. xfail is used
+to indicate a test that errored in some expected fashion (also know as "TODO"
+tests in some frameworks). uxsuccess is used to indicate and unexpected success
+where a test though to be failing actually passes. It is complementary to
+xfail.
+
+Hacking on subunit
+------------------
+
+Releases
+========
+
+* Update versions in configure.ac and python/subunit/__init__.py.
+* Make PyPI and regular tarball releases. Upload the regular one to LP, the
+ PyPI one to PyPI.
+* Push a tagged commit.
+
diff --git a/test/3rdparty/python-subunit-0.0.16/filters/subunit-1to2 b/test/3rdparty/python-subunit-0.0.16/filters/subunit-1to2
new file mode 100755
index 00000000000..9725820cfc5
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/filters/subunit-1to2
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2013 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Convert a version 1 subunit stream to version 2 stream."""
+
+from optparse import OptionParser
+import sys
+
+from testtools import ExtendedToStreamDecorator
+
+from subunit import StreamResultToBytes
+from subunit.filters import find_stream, run_tests_from_stream
+
+
+def make_options(description):
+ parser = OptionParser(description=__doc__)
+ return parser
+
+
+def main():
+ parser = make_options(__doc__)
+ (options, args) = parser.parse_args()
+ run_tests_from_stream(find_stream(sys.stdin, args),
+ ExtendedToStreamDecorator(StreamResultToBytes(sys.stdout)))
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/3rdparty/python-subunit-0.0.16/filters/subunit-2to1 b/test/3rdparty/python-subunit-0.0.16/filters/subunit-2to1
new file mode 100755
index 00000000000..0072307f0fb
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/filters/subunit-2to1
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2013 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Convert a version 2 subunit stream to a version 1 stream."""
+
+from optparse import OptionParser
+import sys
+
+from testtools import StreamToExtendedDecorator
+
+from subunit import ByteStreamToStreamResult, TestProtocolClient
+from subunit.filters import find_stream, run_tests_from_stream
+
+
+def make_options(description):
+ parser = OptionParser(description=__doc__)
+ return parser
+
+
+def main():
+ parser = make_options(__doc__)
+ (options, args) = parser.parse_args()
+ case = ByteStreamToStreamResult(
+ find_stream(sys.stdin, args), non_subunit_name='stdout')
+ result = StreamToExtendedDecorator(TestProtocolClient(sys.stdout))
+ # What about stdout chunks?
+ result.startTestRun()
+ case.run(result)
+ result.stopTestRun()
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/3rdparty/python-subunit-0.0.16/filters/subunit-filter b/test/3rdparty/python-subunit-0.0.16/filters/subunit-filter
new file mode 100755
index 00000000000..e9e2bb06325
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/filters/subunit-filter
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 200-2013 Robert Collins <robertc@robertcollins.net>
+# (C) 2009 Martin Pool
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to include/exclude tests.
+
+The default is to strip successful tests.
+
+Tests can be filtered by Python regular expressions with --with and --without,
+which match both the test name and the error text (if any). The result
+contains tests which match any of the --with expressions and none of the
+--without expressions. For case-insensitive matching prepend '(?i)'.
+Remember to quote shell metacharacters.
+"""
+
+from optparse import OptionParser
+import sys
+import re
+
+from testtools import ExtendedToStreamDecorator, StreamToExtendedDecorator
+
+from subunit import (
+ DiscardStream,
+ ProtocolTestCase,
+ StreamResultToBytes,
+ read_test_list,
+ )
+from subunit.filters import filter_by_result, find_stream
+from subunit.test_results import (
+ and_predicates,
+ make_tag_filter,
+ TestResultFilter,
+ )
+
+
+def make_options(description):
+ parser = OptionParser(description=__doc__)
+ parser.add_option("--error", action="store_false",
+ help="include errors", default=False, dest="error")
+ parser.add_option("-e", "--no-error", action="store_true",
+ help="exclude errors", dest="error")
+ parser.add_option("--failure", action="store_false",
+ help="include failures", default=False, dest="failure")
+ parser.add_option("-f", "--no-failure", action="store_true",
+ help="exclude failures", dest="failure")
+ parser.add_option("--passthrough", action="store_false",
+ help="Forward non-subunit input as 'stdout'.", default=False,
+ dest="no_passthrough")
+ parser.add_option("--no-passthrough", action="store_true",
+ help="Discard all non subunit input.", default=False,
+ dest="no_passthrough")
+ parser.add_option("-s", "--success", action="store_false",
+ help="include successes", dest="success")
+ parser.add_option("--no-success", action="store_true",
+ help="exclude successes", default=True, dest="success")
+ parser.add_option("--no-skip", action="store_true",
+ help="exclude skips", dest="skip")
+ parser.add_option("--xfail", action="store_false",
+ help="include expected falures", default=True, dest="xfail")
+ parser.add_option("--no-xfail", action="store_true",
+ help="exclude expected falures", default=True, dest="xfail")
+ parser.add_option(
+ "--with-tag", type=str,
+ help="include tests with these tags", action="append", dest="with_tags")
+ parser.add_option(
+ "--without-tag", type=str,
+ help="exclude tests with these tags", action="append", dest="without_tags")
+ parser.add_option("-m", "--with", type=str,
+ help="regexp to include (case-sensitive by default)",
+ action="append", dest="with_regexps")
+ parser.add_option("--fixup-expected-failures", type=str,
+ help="File with list of test ids that are expected to fail; on failure "
+ "their result will be changed to xfail; on success they will be "
+ "changed to error.", dest="fixup_expected_failures", action="append")
+ parser.add_option("--without", type=str,
+ help="regexp to exclude (case-sensitive by default)",
+ action="append", dest="without_regexps")
+ parser.add_option("-F", "--only-genuine-failures", action="callback",
+ callback=only_genuine_failures_callback,
+ help="Only pass through failures and exceptions.")
+ return parser
+
+
+def only_genuine_failures_callback(option, opt, value, parser):
+ parser.rargs.insert(0, '--no-passthrough')
+ parser.rargs.insert(0, '--no-xfail')
+ parser.rargs.insert(0, '--no-skip')
+ parser.rargs.insert(0, '--no-success')
+
+
+def _compile_re_from_list(l):
+ return re.compile("|".join(l), re.MULTILINE)
+
+
+def _make_regexp_filter(with_regexps, without_regexps):
+ """Make a callback that checks tests against regexps.
+
+ with_regexps and without_regexps are each either a list of regexp strings,
+ or None.
+ """
+ with_re = with_regexps and _compile_re_from_list(with_regexps)
+ without_re = without_regexps and _compile_re_from_list(without_regexps)
+
+ def check_regexps(test, outcome, err, details, tags):
+ """Check if this test and error match the regexp filters."""
+ test_str = str(test) + outcome + str(err) + str(details)
+ if with_re and not with_re.search(test_str):
+ return False
+ if without_re and without_re.search(test_str):
+ return False
+ return True
+ return check_regexps
+
+
+def _make_result(output, options, predicate):
+ """Make the result that we'll send the test outcomes to."""
+ fixup_expected_failures = set()
+ for path in options.fixup_expected_failures or ():
+ fixup_expected_failures.update(read_test_list(path))
+ return StreamToExtendedDecorator(TestResultFilter(
+ ExtendedToStreamDecorator(
+ StreamResultToBytes(output)),
+ filter_error=options.error,
+ filter_failure=options.failure,
+ filter_success=options.success,
+ filter_skip=options.skip,
+ filter_xfail=options.xfail,
+ filter_predicate=predicate,
+ fixup_expected_failures=fixup_expected_failures))
+
+
+def main():
+ parser = make_options(__doc__)
+ (options, args) = parser.parse_args()
+
+ regexp_filter = _make_regexp_filter(
+ options.with_regexps, options.without_regexps)
+ tag_filter = make_tag_filter(options.with_tags, options.without_tags)
+ filter_predicate = and_predicates([regexp_filter, tag_filter])
+
+ filter_by_result(
+ lambda output_to: _make_result(sys.stdout, options, filter_predicate),
+ output_path=None,
+ passthrough=(not options.no_passthrough),
+ forward=False,
+ protocol_version=2,
+ input_stream=find_stream(sys.stdin, args))
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/3rdparty/python-subunit-0.0.16/filters/subunit-ls b/test/3rdparty/python-subunit-0.0.16/filters/subunit-ls
new file mode 100755
index 00000000000..8c6a1e7e8f5
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/filters/subunit-ls
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2008 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""List tests in a subunit stream."""
+
+from optparse import OptionParser
+import sys
+
+from testtools import (
+ CopyStreamResult, StreamToExtendedDecorator, StreamResultRouter,
+ StreamSummary)
+
+from subunit import ByteStreamToStreamResult
+from subunit.filters import find_stream, run_tests_from_stream
+from subunit.test_results import (
+ CatFiles,
+ TestIdPrintingResult,
+ )
+
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--times", action="store_true",
+ help="list the time each test took (requires a timestamped stream)",
+ default=False)
+parser.add_option("--exists", action="store_true",
+ help="list tests that are reported as existing (as well as ran)",
+ default=False)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+(options, args) = parser.parse_args()
+test = ByteStreamToStreamResult(
+ find_stream(sys.stdin, args), non_subunit_name="stdout")
+result = TestIdPrintingResult(sys.stdout, options.times, options.exists)
+if not options.no_passthrough:
+ result = StreamResultRouter(result)
+ cat = CatFiles(sys.stdout)
+ result.add_rule(cat, 'test_id', test_id=None)
+summary = StreamSummary()
+result = CopyStreamResult([result, summary])
+result.startTestRun()
+test.run(result)
+result.stopTestRun()
+if summary.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/test/3rdparty/python-subunit-0.0.16/filters/subunit-notify b/test/3rdparty/python-subunit-0.0.16/filters/subunit-notify
new file mode 100755
index 00000000000..bc833da779d
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/filters/subunit-notify
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2010 Jelmer Vernooij <jelmer@samba.org>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Notify the user of a finished test run."""
+
+import pygtk
+pygtk.require('2.0')
+import pynotify
+from testtools import StreamToExtendedDecorator
+
+from subunit import TestResultStats
+from subunit.filters import run_filter_script
+
+if not pynotify.init("Subunit-notify"):
+ sys.exit(1)
+
+
+def notify_of_result(result):
+ result = result.decorated
+ if result.failed_tests > 0:
+ summary = "Test run failed"
+ else:
+ summary = "Test run successful"
+ body = "Total tests: %d; Passed: %d; Failed: %d" % (
+ result.total_tests,
+ result.passed_tests,
+ result.failed_tests,
+ )
+ nw = pynotify.Notification(summary, body)
+ nw.show()
+
+
+run_filter_script(
+ lambda output:StreamToExtendedDecorator(TestResultStats(output)),
+ __doc__, notify_of_result, protocol_version=2)
diff --git a/test/3rdparty/python-subunit-0.0.16/filters/subunit-stats b/test/3rdparty/python-subunit-0.0.16/filters/subunit-stats
new file mode 100755
index 00000000000..79733b06226
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/filters/subunit-stats
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to get aggregate statistics."""
+
+import sys
+
+from testtools import StreamToExtendedDecorator
+
+from subunit import TestResultStats
+from subunit.filters import run_filter_script
+
+
+result = TestResultStats(sys.stdout)
+def show_stats(r):
+ r.decorated.formatStats()
+run_filter_script(
+ lambda output:StreamToExtendedDecorator(result),
+ __doc__, show_stats, protocol_version=2, passthrough_subunit=False)
diff --git a/test/3rdparty/python-subunit-0.0.16/filters/subunit-tags b/test/3rdparty/python-subunit-0.0.16/filters/subunit-tags
new file mode 100755
index 00000000000..10224924eac
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/filters/subunit-tags
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""A filter to change tags on a subunit stream.
+
+subunit-tags foo -> adds foo
+subunit-tags foo -bar -> adds foo and removes bar
+"""
+
+import sys
+
+from subunit import tag_stream
+
+sys.exit(tag_stream(sys.stdin, sys.stdout, sys.argv[1:]))
diff --git a/test/3rdparty/python-subunit-0.0.16/filters/subunit2gtk b/test/3rdparty/python-subunit-0.0.16/filters/subunit2gtk
new file mode 100755
index 00000000000..78b43097ec9
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/filters/subunit2gtk
@@ -0,0 +1,240 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+### The GTK progress bar __init__ function is derived from the pygtk tutorial:
+# The PyGTK Tutorial is Copyright (C) 2001-2005 John Finlay.
+#
+# The GTK Tutorial is Copyright (C) 1997 Ian Main.
+#
+# Copyright (C) 1998-1999 Tony Gale.
+#
+# Permission is granted to make and distribute verbatim copies of this manual
+# provided the copyright notice and this permission notice are preserved on all
+# copies.
+#
+# Permission is granted to copy and distribute modified versions of this
+# document under the conditions for verbatim copying, provided that this
+# copyright notice is included exactly as in the original, and that the entire
+# resulting derived work is distributed under the terms of a permission notice
+# identical to this one.
+#
+# Permission is granted to copy and distribute translations of this document
+# into another language, under the above conditions for modified versions.
+#
+# If you are intending to incorporate this document into a published work,
+# please contact the maintainer, and we will make an effort to ensure that you
+# have the most up to date information available.
+#
+# There is no guarantee that this document lives up to its intended purpose.
+# This is simply provided as a free resource. As such, the authors and
+# maintainers of the information provided within can not make any guarantee
+# that the information is even accurate.
+
+"""Display a subunit stream in a gtk progress window."""
+
+import sys
+import threading
+import unittest
+
+import pygtk
+pygtk.require('2.0')
+import gtk, gtk.gdk, gobject
+
+from testtools import StreamToExtendedDecorator
+
+from subunit import (
+ PROGRESS_POP,
+ PROGRESS_PUSH,
+ PROGRESS_SET,
+ ByteStreamToStreamResult,
+ )
+from subunit.progress_model import ProgressModel
+
+
+class GTKTestResult(unittest.TestResult):
+
+ def __init__(self):
+ super(GTKTestResult, self).__init__()
+ # Instance variables (in addition to TestResult)
+ self.window = None
+ self.run_label = None
+ self.ok_label = None
+ self.not_ok_label = None
+ self.total_tests = None
+
+ self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
+ self.window.set_resizable(True)
+
+ self.window.connect("destroy", gtk.main_quit)
+ self.window.set_title("Tests...")
+ self.window.set_border_width(0)
+
+ vbox = gtk.VBox(False, 5)
+ vbox.set_border_width(10)
+ self.window.add(vbox)
+ vbox.show()
+
+ # Create a centering alignment object
+ align = gtk.Alignment(0.5, 0.5, 0, 0)
+ vbox.pack_start(align, False, False, 5)
+ align.show()
+
+ # Create the ProgressBar
+ self.pbar = gtk.ProgressBar()
+ align.add(self.pbar)
+ self.pbar.set_text("Running")
+ self.pbar.show()
+ self.progress_model = ProgressModel()
+
+ separator = gtk.HSeparator()
+ vbox.pack_start(separator, False, False, 0)
+ separator.show()
+
+ # rows, columns, homogeneous
+ table = gtk.Table(2, 3, False)
+ vbox.pack_start(table, False, True, 0)
+ table.show()
+ # Show summary details about the run. Could use an expander.
+ label = gtk.Label("Run:")
+ table.attach(label, 0, 1, 1, 2, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.run_label = gtk.Label("N/A")
+ table.attach(self.run_label, 1, 2, 1, 2, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.run_label.show()
+
+ label = gtk.Label("OK:")
+ table.attach(label, 0, 1, 2, 3, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.ok_label = gtk.Label("N/A")
+ table.attach(self.ok_label, 1, 2, 2, 3, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.ok_label.show()
+
+ label = gtk.Label("Not OK:")
+ table.attach(label, 0, 1, 3, 4, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.not_ok_label = gtk.Label("N/A")
+ table.attach(self.not_ok_label, 1, 2, 3, 4, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.not_ok_label.show()
+
+ self.window.show()
+ # For the demo.
+ self.window.set_keep_above(True)
+ self.window.present()
+
+ def stopTest(self, test):
+ super(GTKTestResult, self).stopTest(test)
+ gobject.idle_add(self._stopTest)
+
+ def _stopTest(self):
+ self.progress_model.advance()
+ if self.progress_model.width() == 0:
+ self.pbar.pulse()
+ else:
+ pos = self.progress_model.pos()
+ width = self.progress_model.width()
+ percentage = (pos / float(width))
+ self.pbar.set_fraction(percentage)
+
+ def stopTestRun(self):
+ try:
+ super(GTKTestResult, self).stopTestRun()
+ except AttributeError:
+ pass
+ gobject.idle_add(self.pbar.set_text, 'Finished')
+
+ def addError(self, test, err):
+ super(GTKTestResult, self).addError(test, err)
+ gobject.idle_add(self.update_counts)
+
+ def addFailure(self, test, err):
+ super(GTKTestResult, self).addFailure(test, err)
+ gobject.idle_add(self.update_counts)
+
+ def addSuccess(self, test):
+ super(GTKTestResult, self).addSuccess(test)
+ gobject.idle_add(self.update_counts)
+
+ def addSkip(self, test, reason):
+ # addSkip is new in Python 2.7/3.1
+ addSkip = getattr(super(GTKTestResult, self), 'addSkip', None)
+ if callable(addSkip):
+ addSkip(test, reason)
+ gobject.idle_add(self.update_counts)
+
+ def addExpectedFailure(self, test, err):
+ # addExpectedFailure is new in Python 2.7/3.1
+ addExpectedFailure = getattr(super(GTKTestResult, self),
+ 'addExpectedFailure', None)
+ if callable(addExpectedFailure):
+ addExpectedFailure(test, err)
+ gobject.idle_add(self.update_counts)
+
+ def addUnexpectedSuccess(self, test):
+ # addUnexpectedSuccess is new in Python 2.7/3.1
+ addUnexpectedSuccess = getattr(super(GTKTestResult, self),
+ 'addUnexpectedSuccess', None)
+ if callable(addUnexpectedSuccess):
+ addUnexpectedSuccess(test)
+ gobject.idle_add(self.update_counts)
+
+ def progress(self, offset, whence):
+ if whence == PROGRESS_PUSH:
+ self.progress_model.push()
+ elif whence == PROGRESS_POP:
+ self.progress_model.pop()
+ elif whence == PROGRESS_SET:
+ self.total_tests = offset
+ self.progress_model.set_width(offset)
+ else:
+ self.total_tests += offset
+ self.progress_model.adjust_width(offset)
+
+ def time(self, a_datetime):
+ # We don't try to estimate completion yet.
+ pass
+
+ def update_counts(self):
+ self.run_label.set_text(str(self.testsRun))
+ bad = len(self.failures + self.errors)
+ self.ok_label.set_text(str(self.testsRun - bad))
+ self.not_ok_label.set_text(str(bad))
+
+gobject.threads_init()
+result = StreamToExtendedDecorator(GTKTestResult())
+test = ByteStreamToStreamResult(sys.stdin, non_subunit_name='stdout')
+# Get setup
+while gtk.events_pending():
+ gtk.main_iteration()
+# Start IO
+def run_and_finish():
+ test.run(result)
+ result.stopTestRun()
+t = threading.Thread(target=run_and_finish)
+t.daemon = True
+result.startTestRun()
+t.start()
+gtk.main()
+if result.decorated.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/test/3rdparty/python-subunit-0.0.16/filters/subunit2junitxml b/test/3rdparty/python-subunit-0.0.16/filters/subunit2junitxml
new file mode 100755
index 00000000000..8e827d53740
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/filters/subunit2junitxml
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to get aggregate statistics."""
+
+
+import sys
+
+from testtools import StreamToExtendedDecorator
+
+from subunit.filters import run_filter_script
+
+try:
+ from junitxml import JUnitXmlResult
+except ImportError:
+ sys.stderr.write("python-junitxml (https://launchpad.net/pyjunitxml or "
+ "http://pypi.python.org/pypi/junitxml) is required for this filter.")
+ raise
+
+
+run_filter_script(
+ lambda output:StreamToExtendedDecorator(JUnitXmlResult(output)), __doc__,
+ protocol_version=2)
diff --git a/test/3rdparty/python-subunit-0.0.16/filters/subunit2pyunit b/test/3rdparty/python-subunit-0.0.16/filters/subunit2pyunit
new file mode 100755
index 00000000000..d10ceea6f09
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/filters/subunit2pyunit
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Display a subunit stream through python's unittest test runner."""
+
+from operator import methodcaller
+from optparse import OptionParser
+import sys
+import unittest
+
+from testtools import StreamToExtendedDecorator, DecorateTestCaseResult, StreamResultRouter
+
+from subunit import ByteStreamToStreamResult
+from subunit.filters import find_stream
+from subunit.test_results import CatFiles
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+parser.add_option("--progress", action="store_true",
+ help="Use bzrlib's test reporter (requires bzrlib)",
+ default=False)
+(options, args) = parser.parse_args()
+test = ByteStreamToStreamResult(
+ find_stream(sys.stdin, args), non_subunit_name='stdout')
+def wrap_result(result):
+ result = StreamToExtendedDecorator(result)
+ if not options.no_passthrough:
+ result = StreamResultRouter(result)
+ result.add_rule(CatFiles(sys.stdout), 'test_id', test_id=None)
+ return result
+test = DecorateTestCaseResult(test, wrap_result,
+ before_run=methodcaller('startTestRun'),
+ after_run=methodcaller('stopTestRun'))
+if options.progress:
+ from bzrlib.tests import TextTestRunner
+ from bzrlib import ui
+ ui.ui_factory = ui.make_ui_for_terminal(None, sys.stdout, sys.stderr)
+ runner = TextTestRunner()
+else:
+ runner = unittest.TextTestRunner(verbosity=2)
+if runner.run(test).wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/test/3rdparty/python-subunit-0.0.16/filters/tap2subunit b/test/3rdparty/python-subunit-0.0.16/filters/tap2subunit
new file mode 100755
index 00000000000..c571972225d
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/filters/tap2subunit
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""A filter that reads a TAP stream and outputs a subunit stream.
+
+More information on TAP is available at
+http://testanything.org/wiki/index.php/Main_Page.
+"""
+
+import sys
+
+from subunit import TAP2SubUnit
+sys.exit(TAP2SubUnit(sys.stdin, sys.stdout))
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/__init__.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/__init__.py
new file mode 100644
index 00000000000..8352585fb3a
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/__init__.py
@@ -0,0 +1,1320 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Subunit - a streaming test protocol
+
+Overview
+++++++++
+
+The ``subunit`` Python package provides a number of ``unittest`` extensions
+which can be used to cause tests to output Subunit, to parse Subunit streams
+into test activity, perform seamless test isolation within a regular test
+case and variously sort, filter and report on test runs.
+
+
+Key Classes
+-----------
+
+The ``subunit.TestProtocolClient`` class is a ``unittest.TestResult``
+extension which will translate a test run into a Subunit stream.
+
+The ``subunit.ProtocolTestCase`` class is an adapter between the Subunit wire
+protocol and the ``unittest.TestCase`` object protocol. It is used to translate
+a stream into a test run, which regular ``unittest.TestResult`` objects can
+process and report/inspect.
+
+Subunit has support for non-blocking usage too, for use with asyncore or
+Twisted. See the ``TestProtocolServer`` parser class for more details.
+
+Subunit includes extensions to the Python ``TestResult`` protocol. These are
+all done in a compatible manner: ``TestResult`` objects that do not implement
+the extension methods will not cause errors to be raised, instead the extension
+will either lose fidelity (for instance, folding expected failures to success
+in Python versions < 2.7 or 3.1), or discard the extended data (for extra
+details, tags, timestamping and progress markers).
+
+The test outcome methods ``addSuccess``, ``addError``, ``addExpectedFailure``,
+``addFailure``, ``addSkip`` take an optional keyword parameter ``details``
+which can be used instead of the usual python unittest parameter.
+When used the value of details should be a dict from ``string`` to
+``testtools.content.Content`` objects. This is a draft API being worked on with
+the Python Testing In Python mail list, with the goal of permitting a common
+way to provide additional data beyond a traceback, such as captured data from
+disk, logging messages etc. The reference for this API is in testtools (0.9.0
+and newer).
+
+The ``tags(new_tags, gone_tags)`` method is called (if present) to add or
+remove tags in the test run that is currently executing. If called when no
+test is in progress (that is, if called outside of the ``startTest``,
+``stopTest`` pair), the the tags apply to all subsequent tests. If called
+when a test is in progress, then the tags only apply to that test.
+
+The ``time(a_datetime)`` method is called (if present) when a ``time:``
+directive is encountered in a Subunit stream. This is used to tell a TestResult
+about the time that events in the stream occurred at, to allow reconstructing
+test timing from a stream.
+
+The ``progress(offset, whence)`` method controls progress data for a stream.
+The offset parameter is an int, and whence is one of subunit.PROGRESS_CUR,
+subunit.PROGRESS_SET, PROGRESS_PUSH, PROGRESS_POP. Push and pop operations
+ignore the offset parameter.
+
+
+Python test support
+-------------------
+
+``subunit.run`` is a convenience wrapper to run a Python test suite via
+the command line, reporting via Subunit::
+
+ $ python -m subunit.run mylib.tests.test_suite
+
+The ``IsolatedTestSuite`` class is a TestSuite that forks before running its
+tests, allowing isolation between the test runner and some tests.
+
+Similarly, ``IsolatedTestCase`` is a base class which can be subclassed to get
+tests that will fork() before that individual test is run.
+
+`ExecTestCase`` is a convenience wrapper for running an external
+program to get a Subunit stream and then report that back to an arbitrary
+result object::
+
+ class AggregateTests(subunit.ExecTestCase):
+
+ def test_script_one(self):
+ './bin/script_one'
+
+ def test_script_two(self):
+ './bin/script_two'
+
+ # Normally your normal test loading would take of this automatically,
+ # It is only spelt out in detail here for clarity.
+ suite = unittest.TestSuite([AggregateTests("test_script_one"),
+ AggregateTests("test_script_two")])
+ # Create any TestResult class you like.
+ result = unittest._TextTestResult(sys.stdout)
+ # And run your suite as normal, Subunit will exec each external script as
+ # needed and report to your result object.
+ suite.run(result)
+
+Utility modules
+---------------
+
+* subunit.chunked contains HTTP chunked encoding/decoding logic.
+* subunit.test_results contains TestResult helper classes.
+"""
+
+import os
+import re
+import subprocess
+import sys
+import unittest
+try:
+ from io import UnsupportedOperation as _UnsupportedOperation
+except ImportError:
+ _UnsupportedOperation = AttributeError
+
+from extras import safe_hasattr
+from testtools import content, content_type, ExtendedToOriginalDecorator
+from testtools.content import TracebackContent
+from testtools.compat import _b, _u, BytesIO, StringIO
+try:
+ from testtools.testresult.real import _StringException
+ RemoteException = _StringException
+except ImportError:
+ raise ImportError ("testtools.testresult.real does not contain "
+ "_StringException, check your version.")
+from testtools import testresult, CopyStreamResult
+
+from subunit import chunked, details, iso8601, test_results
+from subunit.v2 import ByteStreamToStreamResult, StreamResultToBytes
+
+# same format as sys.version_info: "A tuple containing the five components of
+# the version number: major, minor, micro, releaselevel, and serial. All
+# values except releaselevel are integers; the release level is 'alpha',
+# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
+# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
+# releaselevel of 'dev' for unreleased under-development code.
+#
+# If the releaselevel is 'alpha' then the major/minor/micro components are not
+# established at this point, and setup.py will use a version of next-$(revno).
+# If the releaselevel is 'final', then the tarball will be major.minor.micro.
+# Otherwise it is major.minor.micro~$(revno).
+
+__version__ = (0, 0, 16, 'final', 0)
+
+PROGRESS_SET = 0
+PROGRESS_CUR = 1
+PROGRESS_PUSH = 2
+PROGRESS_POP = 3
+
+
+def test_suite():
+ import subunit.tests
+ return subunit.tests.test_suite()
+
+
+def join_dir(base_path, path):
+ """
+ Returns an absolute path to C{path}, calculated relative to the parent
+ of C{base_path}.
+
+ @param base_path: A path to a file or directory.
+ @param path: An absolute path, or a path relative to the containing
+ directory of C{base_path}.
+
+ @return: An absolute path to C{path}.
+ """
+ return os.path.join(os.path.dirname(os.path.abspath(base_path)), path)
+
+
+def tags_to_new_gone(tags):
+ """Split a list of tags into a new_set and a gone_set."""
+ new_tags = set()
+ gone_tags = set()
+ for tag in tags:
+ if tag[0] == '-':
+ gone_tags.add(tag[1:])
+ else:
+ new_tags.add(tag)
+ return new_tags, gone_tags
+
+
+class DiscardStream(object):
+ """A filelike object which discards what is written to it."""
+
+ def fileno(self):
+ raise _UnsupportedOperation()
+
+ def write(self, bytes):
+ pass
+
+ def read(self, len=0):
+ return _b('')
+
+
+class _ParserState(object):
+ """State for the subunit parser."""
+
+ def __init__(self, parser):
+ self.parser = parser
+ self._test_sym = (_b('test'), _b('testing'))
+ self._colon_sym = _b(':')
+ self._error_sym = (_b('error'),)
+ self._failure_sym = (_b('failure'),)
+ self._progress_sym = (_b('progress'),)
+ self._skip_sym = _b('skip')
+ self._success_sym = (_b('success'), _b('successful'))
+ self._tags_sym = (_b('tags'),)
+ self._time_sym = (_b('time'),)
+ self._xfail_sym = (_b('xfail'),)
+ self._uxsuccess_sym = (_b('uxsuccess'),)
+ self._start_simple = _u(" [")
+ self._start_multipart = _u(" [ multipart")
+
+ def addError(self, offset, line):
+ """An 'error:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addExpectedFail(self, offset, line):
+ """An 'xfail:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addFailure(self, offset, line):
+ """A 'failure:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addSkip(self, offset, line):
+ """A 'skip:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def addSuccess(self, offset, line):
+ """A 'success:' directive has been read."""
+ self.parser.stdOutLineReceived(line)
+
+ def lineReceived(self, line):
+ """a line has been received."""
+ parts = line.split(None, 1)
+ if len(parts) == 2 and line.startswith(parts[0]):
+ cmd, rest = parts
+ offset = len(cmd) + 1
+ cmd = cmd.rstrip(self._colon_sym)
+ if cmd in self._test_sym:
+ self.startTest(offset, line)
+ elif cmd in self._error_sym:
+ self.addError(offset, line)
+ elif cmd in self._failure_sym:
+ self.addFailure(offset, line)
+ elif cmd in self._progress_sym:
+ self.parser._handleProgress(offset, line)
+ elif cmd in self._skip_sym:
+ self.addSkip(offset, line)
+ elif cmd in self._success_sym:
+ self.addSuccess(offset, line)
+ elif cmd in self._tags_sym:
+ self.parser._handleTags(offset, line)
+ self.parser.subunitLineReceived(line)
+ elif cmd in self._time_sym:
+ self.parser._handleTime(offset, line)
+ self.parser.subunitLineReceived(line)
+ elif cmd in self._xfail_sym:
+ self.addExpectedFail(offset, line)
+ elif cmd in self._uxsuccess_sym:
+ self.addUnexpectedSuccess(offset, line)
+ else:
+ self.parser.stdOutLineReceived(line)
+ else:
+ self.parser.stdOutLineReceived(line)
+
+ def lostConnection(self):
+ """Connection lost."""
+ self.parser._lostConnectionInTest(_u('unknown state of '))
+
+ def startTest(self, offset, line):
+ """A test start command received."""
+ self.parser.stdOutLineReceived(line)
+
+
+class _InTest(_ParserState):
+ """State for the subunit parser after reading a test: directive."""
+
+ def _outcome(self, offset, line, no_details, details_state):
+ """An outcome directive has been read.
+
+ :param no_details: Callable to call when no details are presented.
+ :param details_state: The state to switch to for details
+ processing of this outcome.
+ """
+ test_name = line[offset:-1].decode('utf8')
+ if self.parser.current_test_description == test_name:
+ self.parser._state = self.parser._outside_test
+ self.parser.current_test_description = None
+ no_details()
+ self.parser.client.stopTest(self.parser._current_test)
+ self.parser._current_test = None
+ self.parser.subunitLineReceived(line)
+ elif self.parser.current_test_description + self._start_simple == \
+ test_name:
+ self.parser._state = details_state
+ details_state.set_simple()
+ self.parser.subunitLineReceived(line)
+ elif self.parser.current_test_description + self._start_multipart == \
+ test_name:
+ self.parser._state = details_state
+ details_state.set_multipart()
+ self.parser.subunitLineReceived(line)
+ else:
+ self.parser.stdOutLineReceived(line)
+
+ def _error(self):
+ self.parser.client.addError(self.parser._current_test,
+ details={})
+
+ def addError(self, offset, line):
+ """An 'error:' directive has been read."""
+ self._outcome(offset, line, self._error,
+ self.parser._reading_error_details)
+
+ def _xfail(self):
+ self.parser.client.addExpectedFailure(self.parser._current_test,
+ details={})
+
+ def addExpectedFail(self, offset, line):
+ """An 'xfail:' directive has been read."""
+ self._outcome(offset, line, self._xfail,
+ self.parser._reading_xfail_details)
+
+ def _uxsuccess(self):
+ self.parser.client.addUnexpectedSuccess(self.parser._current_test)
+
+ def addUnexpectedSuccess(self, offset, line):
+ """A 'uxsuccess:' directive has been read."""
+ self._outcome(offset, line, self._uxsuccess,
+ self.parser._reading_uxsuccess_details)
+
+ def _failure(self):
+ self.parser.client.addFailure(self.parser._current_test, details={})
+
+ def addFailure(self, offset, line):
+ """A 'failure:' directive has been read."""
+ self._outcome(offset, line, self._failure,
+ self.parser._reading_failure_details)
+
+ def _skip(self):
+ self.parser.client.addSkip(self.parser._current_test, details={})
+
+ def addSkip(self, offset, line):
+ """A 'skip:' directive has been read."""
+ self._outcome(offset, line, self._skip,
+ self.parser._reading_skip_details)
+
+ def _succeed(self):
+ self.parser.client.addSuccess(self.parser._current_test, details={})
+
+ def addSuccess(self, offset, line):
+ """A 'success:' directive has been read."""
+ self._outcome(offset, line, self._succeed,
+ self.parser._reading_success_details)
+
+ def lostConnection(self):
+ """Connection lost."""
+ self.parser._lostConnectionInTest(_u(''))
+
+
+class _OutSideTest(_ParserState):
+ """State for the subunit parser outside of a test context."""
+
+ def lostConnection(self):
+ """Connection lost."""
+
+ def startTest(self, offset, line):
+ """A test start command received."""
+ self.parser._state = self.parser._in_test
+ test_name = line[offset:-1].decode('utf8')
+ self.parser._current_test = RemotedTestCase(test_name)
+ self.parser.current_test_description = test_name
+ self.parser.client.startTest(self.parser._current_test)
+ self.parser.subunitLineReceived(line)
+
+
+class _ReadingDetails(_ParserState):
+ """Common logic for readin state details."""
+
+ def endDetails(self):
+ """The end of a details section has been reached."""
+ self.parser._state = self.parser._outside_test
+ self.parser.current_test_description = None
+ self._report_outcome()
+ self.parser.client.stopTest(self.parser._current_test)
+
+ def lineReceived(self, line):
+ """a line has been received."""
+ self.details_parser.lineReceived(line)
+ self.parser.subunitLineReceived(line)
+
+ def lostConnection(self):
+ """Connection lost."""
+ self.parser._lostConnectionInTest(_u('%s report of ') %
+ self._outcome_label())
+
+ def _outcome_label(self):
+ """The label to describe this outcome."""
+ raise NotImplementedError(self._outcome_label)
+
+ def set_simple(self):
+ """Start a simple details parser."""
+ self.details_parser = details.SimpleDetailsParser(self)
+
+ def set_multipart(self):
+ """Start a multipart details parser."""
+ self.details_parser = details.MultipartDetailsParser(self)
+
+
+class _ReadingFailureDetails(_ReadingDetails):
+ """State for the subunit parser when reading failure details."""
+
+ def _report_outcome(self):
+ self.parser.client.addFailure(self.parser._current_test,
+ details=self.details_parser.get_details())
+
+ def _outcome_label(self):
+ return "failure"
+
+
+class _ReadingErrorDetails(_ReadingDetails):
+ """State for the subunit parser when reading error details."""
+
+ def _report_outcome(self):
+ self.parser.client.addError(self.parser._current_test,
+ details=self.details_parser.get_details())
+
+ def _outcome_label(self):
+ return "error"
+
+
+class _ReadingExpectedFailureDetails(_ReadingDetails):
+ """State for the subunit parser when reading xfail details."""
+
+ def _report_outcome(self):
+ self.parser.client.addExpectedFailure(self.parser._current_test,
+ details=self.details_parser.get_details())
+
+ def _outcome_label(self):
+ return "xfail"
+
+
+class _ReadingUnexpectedSuccessDetails(_ReadingDetails):
+ """State for the subunit parser when reading uxsuccess details."""
+
+ def _report_outcome(self):
+ self.parser.client.addUnexpectedSuccess(self.parser._current_test,
+ details=self.details_parser.get_details())
+
+ def _outcome_label(self):
+ return "uxsuccess"
+
+
+class _ReadingSkipDetails(_ReadingDetails):
+ """State for the subunit parser when reading skip details."""
+
+ def _report_outcome(self):
+ self.parser.client.addSkip(self.parser._current_test,
+ details=self.details_parser.get_details("skip"))
+
+ def _outcome_label(self):
+ return "skip"
+
+
+class _ReadingSuccessDetails(_ReadingDetails):
+ """State for the subunit parser when reading success details."""
+
+ def _report_outcome(self):
+ self.parser.client.addSuccess(self.parser._current_test,
+ details=self.details_parser.get_details("success"))
+
+ def _outcome_label(self):
+ return "success"
+
+
+class TestProtocolServer(object):
+ """A parser for subunit.
+
+ :ivar tags: The current tags associated with the protocol stream.
+ """
+
+ def __init__(self, client, stream=None, forward_stream=None):
+ """Create a TestProtocolServer instance.
+
+ :param client: An object meeting the unittest.TestResult protocol.
+ :param stream: The stream that lines received which are not part of the
+ subunit protocol should be written to. This allows custom handling
+ of mixed protocols. By default, sys.stdout will be used for
+ convenience. It should accept bytes to its write() method.
+ :param forward_stream: A stream to forward subunit lines to. This
+ allows a filter to forward the entire stream while still parsing
+ and acting on it. By default forward_stream is set to
+ DiscardStream() and no forwarding happens.
+ """
+ self.client = ExtendedToOriginalDecorator(client)
+ if stream is None:
+ stream = sys.stdout
+ if sys.version_info > (3, 0):
+ stream = stream.buffer
+ self._stream = stream
+ self._forward_stream = forward_stream or DiscardStream()
+ # state objects we can switch too
+ self._in_test = _InTest(self)
+ self._outside_test = _OutSideTest(self)
+ self._reading_error_details = _ReadingErrorDetails(self)
+ self._reading_failure_details = _ReadingFailureDetails(self)
+ self._reading_skip_details = _ReadingSkipDetails(self)
+ self._reading_success_details = _ReadingSuccessDetails(self)
+ self._reading_xfail_details = _ReadingExpectedFailureDetails(self)
+ self._reading_uxsuccess_details = _ReadingUnexpectedSuccessDetails(self)
+ # start with outside test.
+ self._state = self._outside_test
+ # Avoid casts on every call
+ self._plusminus = _b('+-')
+ self._push_sym = _b('push')
+ self._pop_sym = _b('pop')
+
+ def _handleProgress(self, offset, line):
+ """Process a progress directive."""
+ line = line[offset:].strip()
+ if line[0] in self._plusminus:
+ whence = PROGRESS_CUR
+ delta = int(line)
+ elif line == self._push_sym:
+ whence = PROGRESS_PUSH
+ delta = None
+ elif line == self._pop_sym:
+ whence = PROGRESS_POP
+ delta = None
+ else:
+ whence = PROGRESS_SET
+ delta = int(line)
+ self.client.progress(delta, whence)
+
+ def _handleTags(self, offset, line):
+ """Process a tags command."""
+ tags = line[offset:].decode('utf8').split()
+ new_tags, gone_tags = tags_to_new_gone(tags)
+ self.client.tags(new_tags, gone_tags)
+
+ def _handleTime(self, offset, line):
+ # Accept it, but do not do anything with it yet.
+ try:
+ event_time = iso8601.parse_date(line[offset:-1])
+ except TypeError:
+ raise TypeError(_u("Failed to parse %r, got %r")
+ % (line, sys.exec_info[1]))
+ self.client.time(event_time)
+
+ def lineReceived(self, line):
+ """Call the appropriate local method for the received line."""
+ self._state.lineReceived(line)
+
+ def _lostConnectionInTest(self, state_string):
+ error_string = _u("lost connection during %stest '%s'") % (
+ state_string, self.current_test_description)
+ self.client.addError(self._current_test, RemoteError(error_string))
+ self.client.stopTest(self._current_test)
+
+ def lostConnection(self):
+ """The input connection has finished."""
+ self._state.lostConnection()
+
+ def readFrom(self, pipe):
+ """Blocking convenience API to parse an entire stream.
+
+ :param pipe: A file-like object supporting readlines().
+ :return: None.
+ """
+ for line in pipe.readlines():
+ self.lineReceived(line)
+ self.lostConnection()
+
+ def _startTest(self, offset, line):
+ """Internal call to change state machine. Override startTest()."""
+ self._state.startTest(offset, line)
+
+ def subunitLineReceived(self, line):
+ self._forward_stream.write(line)
+
+ def stdOutLineReceived(self, line):
+ self._stream.write(line)
+
+
+class TestProtocolClient(testresult.TestResult):
+ """A TestResult which generates a subunit stream for a test run.
+
+ # Get a TestSuite or TestCase to run
+ suite = make_suite()
+ # Create a stream (any object with a 'write' method). This should accept
+ # bytes not strings: subunit is a byte orientated protocol.
+ stream = file('tests.log', 'wb')
+ # Create a subunit result object which will output to the stream
+ result = subunit.TestProtocolClient(stream)
+ # Optionally, to get timing data for performance analysis, wrap the
+ # serialiser with a timing decorator
+ result = subunit.test_results.AutoTimingTestResultDecorator(result)
+ # Run the test suite reporting to the subunit result object
+ suite.run(result)
+ # Close the stream.
+ stream.close()
+ """
+
+ def __init__(self, stream):
+ testresult.TestResult.__init__(self)
+ stream = make_stream_binary(stream)
+ self._stream = stream
+ self._progress_fmt = _b("progress: ")
+ self._bytes_eol = _b("\n")
+ self._progress_plus = _b("+")
+ self._progress_push = _b("push")
+ self._progress_pop = _b("pop")
+ self._empty_bytes = _b("")
+ self._start_simple = _b(" [\n")
+ self._end_simple = _b("]\n")
+
+ def addError(self, test, error=None, details=None):
+ """Report an error in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addError(self, test, error)
+ addError(self, test, details)
+
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._addOutcome("error", test, error=error, details=details)
+ if self.failfast:
+ self.stop()
+
+ def addExpectedFailure(self, test, error=None, details=None):
+ """Report an expected failure in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addError(self, test, error)
+ addError(self, test, details)
+
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._addOutcome("xfail", test, error=error, details=details)
+
+ def addFailure(self, test, error=None, details=None):
+ """Report a failure in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addFailure(self, test, error)
+ addFailure(self, test, details)
+
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._addOutcome("failure", test, error=error, details=details)
+ if self.failfast:
+ self.stop()
+
+ def _addOutcome(self, outcome, test, error=None, details=None,
+ error_permitted=True):
+ """Report a failure in test test.
+
+ Only one of error and details should be provided: conceptually there
+ are two separate methods:
+ addOutcome(self, test, error)
+ addOutcome(self, test, details)
+
+ :param outcome: A string describing the outcome - used as the
+ event name in the subunit stream.
+ :param error: Standard unittest positional argument form - an
+ exc_info tuple.
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ :param error_permitted: If True then one and only one of error or
+ details must be supplied. If False then error must not be supplied
+ and details is still optional. """
+ self._stream.write(_b("%s: " % outcome) + self._test_id(test))
+ if error_permitted:
+ if error is None and details is None:
+ raise ValueError
+ else:
+ if error is not None:
+ raise ValueError
+ if error is not None:
+ self._stream.write(self._start_simple)
+ tb_content = TracebackContent(error, test)
+ for bytes in tb_content.iter_bytes():
+ self._stream.write(bytes)
+ elif details is not None:
+ self._write_details(details)
+ else:
+ self._stream.write(_b("\n"))
+ if details is not None or error is not None:
+ self._stream.write(self._end_simple)
+
+ def addSkip(self, test, reason=None, details=None):
+ """Report a skipped test."""
+ if reason is None:
+ self._addOutcome("skip", test, error=None, details=details)
+ else:
+ self._stream.write(_b("skip: %s [\n" % test.id()))
+ self._stream.write(_b("%s\n" % reason))
+ self._stream.write(self._end_simple)
+
+ def addSuccess(self, test, details=None):
+ """Report a success in a test."""
+ self._addOutcome("successful", test, details=details, error_permitted=False)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ """Report an unexpected success in test test.
+
+ Details can optionally be provided: conceptually there
+ are two separate methods:
+ addError(self, test)
+ addError(self, test, details)
+
+ :param details: New Testing-in-python drafted API; a dict from string
+ to subunit.Content objects.
+ """
+ self._addOutcome("uxsuccess", test, details=details,
+ error_permitted=False)
+ if self.failfast:
+ self.stop()
+
+ def _test_id(self, test):
+ result = test.id()
+ if type(result) is not bytes:
+ result = result.encode('utf8')
+ return result
+
+ def startTest(self, test):
+ """Mark a test as starting its test run."""
+ super(TestProtocolClient, self).startTest(test)
+ self._stream.write(_b("test: ") + self._test_id(test) + _b("\n"))
+ self._stream.flush()
+
+ def stopTest(self, test):
+ super(TestProtocolClient, self).stopTest(test)
+ self._stream.flush()
+
+ def progress(self, offset, whence):
+ """Provide indication about the progress/length of the test run.
+
+ :param offset: Information about the number of tests remaining. If
+ whence is PROGRESS_CUR, then offset increases/decreases the
+ remaining test count. If whence is PROGRESS_SET, then offset
+ specifies exactly the remaining test count.
+ :param whence: One of PROGRESS_CUR, PROGRESS_SET, PROGRESS_PUSH,
+ PROGRESS_POP.
+ """
+ if whence == PROGRESS_CUR and offset > -1:
+ prefix = self._progress_plus
+ offset = _b(str(offset))
+ elif whence == PROGRESS_PUSH:
+ prefix = self._empty_bytes
+ offset = self._progress_push
+ elif whence == PROGRESS_POP:
+ prefix = self._empty_bytes
+ offset = self._progress_pop
+ else:
+ prefix = self._empty_bytes
+ offset = _b(str(offset))
+ self._stream.write(self._progress_fmt + prefix + offset +
+ self._bytes_eol)
+
+ def tags(self, new_tags, gone_tags):
+ """Inform the client about tags added/removed from the stream."""
+ if not new_tags and not gone_tags:
+ return
+ tags = set([tag.encode('utf8') for tag in new_tags])
+ tags.update([_b("-") + tag.encode('utf8') for tag in gone_tags])
+ tag_line = _b("tags: ") + _b(" ").join(tags) + _b("\n")
+ self._stream.write(tag_line)
+
+ def time(self, a_datetime):
+ """Inform the client of the time.
+
+ ":param datetime: A datetime.datetime object.
+ """
+ time = a_datetime.astimezone(iso8601.Utc())
+ self._stream.write(_b("time: %04d-%02d-%02d %02d:%02d:%02d.%06dZ\n" % (
+ time.year, time.month, time.day, time.hour, time.minute,
+ time.second, time.microsecond)))
+
+ def _write_details(self, details):
+ """Output details to the stream.
+
+ :param details: An extended details dict for a test outcome.
+ """
+ self._stream.write(_b(" [ multipart\n"))
+ for name, content in sorted(details.items()):
+ self._stream.write(_b("Content-Type: %s/%s" %
+ (content.content_type.type, content.content_type.subtype)))
+ parameters = content.content_type.parameters
+ if parameters:
+ self._stream.write(_b(";"))
+ param_strs = []
+ for param, value in parameters.items():
+ param_strs.append("%s=%s" % (param, value))
+ self._stream.write(_b(",".join(param_strs)))
+ self._stream.write(_b("\n%s\n" % name))
+ encoder = chunked.Encoder(self._stream)
+ list(map(encoder.write, content.iter_bytes()))
+ encoder.close()
+
+ def done(self):
+ """Obey the testtools result.done() interface."""
+
+
+def RemoteError(description=_u("")):
+ return (_StringException, _StringException(description), None)
+
+
+class RemotedTestCase(unittest.TestCase):
+ """A class to represent test cases run in child processes.
+
+ Instances of this class are used to provide the Python test API a TestCase
+ that can be printed to the screen, introspected for metadata and so on.
+ However, as they are a simply a memoisation of a test that was actually
+ run in the past by a separate process, they cannot perform any interactive
+ actions.
+ """
+
+ def __eq__ (self, other):
+ try:
+ return self.__description == other.__description
+ except AttributeError:
+ return False
+
+ def __init__(self, description):
+ """Create a psuedo test case with description description."""
+ self.__description = description
+
+ def error(self, label):
+ raise NotImplementedError("%s on RemotedTestCases is not permitted." %
+ label)
+
+ def setUp(self):
+ self.error("setUp")
+
+ def tearDown(self):
+ self.error("tearDown")
+
+ def shortDescription(self):
+ return self.__description
+
+ def id(self):
+ return "%s" % (self.__description,)
+
+ def __str__(self):
+ return "%s (%s)" % (self.__description, self._strclass())
+
+ def __repr__(self):
+ return "<%s description='%s'>" % \
+ (self._strclass(), self.__description)
+
+ def run(self, result=None):
+ if result is None: result = self.defaultTestResult()
+ result.startTest(self)
+ result.addError(self, RemoteError(_u("Cannot run RemotedTestCases.\n")))
+ result.stopTest(self)
+
+ def _strclass(self):
+ cls = self.__class__
+ return "%s.%s" % (cls.__module__, cls.__name__)
+
+
+class ExecTestCase(unittest.TestCase):
+ """A test case which runs external scripts for test fixtures."""
+
+ def __init__(self, methodName='runTest'):
+ """Create an instance of the class that will use the named test
+ method when executed. Raises a ValueError if the instance does
+ not have a method with the specified name.
+ """
+ unittest.TestCase.__init__(self, methodName)
+ testMethod = getattr(self, methodName)
+ self.script = join_dir(sys.modules[self.__class__.__module__].__file__,
+ testMethod.__doc__)
+
+ def countTestCases(self):
+ return 1
+
+ def run(self, result=None):
+ if result is None: result = self.defaultTestResult()
+ self._run(result)
+
+ def debug(self):
+ """Run the test without collecting errors in a TestResult"""
+ self._run(testresult.TestResult())
+
+ def _run(self, result):
+ protocol = TestProtocolServer(result)
+ process = subprocess.Popen(self.script, shell=True,
+ stdout=subprocess.PIPE)
+ make_stream_binary(process.stdout)
+ output = process.communicate()[0]
+ protocol.readFrom(BytesIO(output))
+
+
+class IsolatedTestCase(unittest.TestCase):
+ """A TestCase which executes in a forked process.
+
+ Each test gets its own process, which has a performance overhead but will
+ provide excellent isolation from global state (such as django configs,
+ zope utilities and so on).
+ """
+
+ def run(self, result=None):
+ if result is None: result = self.defaultTestResult()
+ run_isolated(unittest.TestCase, self, result)
+
+
+class IsolatedTestSuite(unittest.TestSuite):
+ """A TestSuite which runs its tests in a forked process.
+
+ This decorator that will fork() before running the tests and report the
+ results from the child process using a Subunit stream. This is useful for
+ handling tests that mutate global state, or are testing C extensions that
+ could crash the VM.
+ """
+
+ def run(self, result=None):
+ if result is None: result = testresult.TestResult()
+ run_isolated(unittest.TestSuite, self, result)
+
+
+def run_isolated(klass, self, result):
+ """Run a test suite or case in a subprocess, using the run method on klass.
+ """
+ c2pread, c2pwrite = os.pipe()
+ # fixme - error -> result
+ # now fork
+ pid = os.fork()
+ if pid == 0:
+ # Child
+ # Close parent's pipe ends
+ os.close(c2pread)
+ # Dup fds for child
+ os.dup2(c2pwrite, 1)
+ # Close pipe fds.
+ os.close(c2pwrite)
+
+ # at this point, sys.stdin is redirected, now we want
+ # to filter it to escape ]'s.
+ ### XXX: test and write that bit.
+ stream = os.fdopen(1, 'wb')
+ result = TestProtocolClient(stream)
+ klass.run(self, result)
+ stream.flush()
+ sys.stderr.flush()
+ # exit HARD, exit NOW.
+ os._exit(0)
+ else:
+ # Parent
+ # Close child pipe ends
+ os.close(c2pwrite)
+ # hookup a protocol engine
+ protocol = TestProtocolServer(result)
+ fileobj = os.fdopen(c2pread, 'rb')
+ protocol.readFrom(fileobj)
+ os.waitpid(pid, 0)
+ # TODO return code evaluation.
+ return result
+
+
+def TAP2SubUnit(tap, output_stream):
+ """Filter a TAP pipe into a subunit pipe.
+
+ This should be invoked once per TAP script, as TAP scripts get
+ mapped to a single runnable case with multiple components.
+
+ :param tap: A tap pipe/stream/file object - should emit unicode strings.
+ :param subunit: A pipe/stream/file object to write subunit results to.
+ :return: The exit code to exit with.
+ """
+ output = StreamResultToBytes(output_stream)
+ UTF8_TEXT = 'text/plain; charset=UTF8'
+ BEFORE_PLAN = 0
+ AFTER_PLAN = 1
+ SKIP_STREAM = 2
+ state = BEFORE_PLAN
+ plan_start = 1
+ plan_stop = 0
+ # Test data for the next test to emit
+ test_name = None
+ log = []
+ result = None
+ def missing_test(plan_start):
+ output.status(test_id='test %d' % plan_start,
+ test_status='fail', runnable=False,
+ mime_type=UTF8_TEXT, eof=True, file_name="tap meta",
+ file_bytes=b"test missing from TAP output")
+ def _emit_test():
+ "write out a test"
+ if test_name is None:
+ return
+ if log:
+ log_bytes = b'\n'.join(log_line.encode('utf8') for log_line in log)
+ mime_type = UTF8_TEXT
+ file_name = 'tap comment'
+ eof = True
+ else:
+ log_bytes = None
+ mime_type = None
+ file_name = None
+ eof = True
+ del log[:]
+ output.status(test_id=test_name, test_status=result,
+ file_bytes=log_bytes, mime_type=mime_type, eof=eof,
+ file_name=file_name, runnable=False)
+ for line in tap:
+ if state == BEFORE_PLAN:
+ match = re.match("(\d+)\.\.(\d+)\s*(?:\#\s+(.*))?\n", line)
+ if match:
+ state = AFTER_PLAN
+ _, plan_stop, comment = match.groups()
+ plan_stop = int(plan_stop)
+ if plan_start > plan_stop and plan_stop == 0:
+ # skipped file
+ state = SKIP_STREAM
+ output.status(test_id='file skip', test_status='skip',
+ file_bytes=comment.encode('utf8'), eof=True,
+ file_name='tap comment')
+ continue
+ # not a plan line, or have seen one before
+ match = re.match("(ok|not ok)(?:\s+(\d+)?)?(?:\s+([^#]*[^#\s]+)\s*)?(?:\s+#\s+(TODO|SKIP|skip|todo)(?:\s+(.*))?)?\n", line)
+ if match:
+ # new test, emit current one.
+ _emit_test()
+ status, number, description, directive, directive_comment = match.groups()
+ if status == 'ok':
+ result = 'success'
+ else:
+ result = "fail"
+ if description is None:
+ description = ''
+ else:
+ description = ' ' + description
+ if directive is not None:
+ if directive.upper() == 'TODO':
+ result = 'xfail'
+ elif directive.upper() == 'SKIP':
+ result = 'skip'
+ if directive_comment is not None:
+ log.append(directive_comment)
+ if number is not None:
+ number = int(number)
+ while plan_start < number:
+ missing_test(plan_start)
+ plan_start += 1
+ test_name = "test %d%s" % (plan_start, description)
+ plan_start += 1
+ continue
+ match = re.match("Bail out\!(?:\s*(.*))?\n", line)
+ if match:
+ reason, = match.groups()
+ if reason is None:
+ extra = ''
+ else:
+ extra = ' %s' % reason
+ _emit_test()
+ test_name = "Bail out!%s" % extra
+ result = "fail"
+ state = SKIP_STREAM
+ continue
+ match = re.match("\#.*\n", line)
+ if match:
+ log.append(line[:-1])
+ continue
+ # Should look at buffering status and binding this to the prior result.
+ output.status(file_bytes=line.encode('utf8'), file_name='stdout',
+ mime_type=UTF8_TEXT)
+ _emit_test()
+ while plan_start <= plan_stop:
+ # record missed tests
+ missing_test(plan_start)
+ plan_start += 1
+ return 0
+
+
+def tag_stream(original, filtered, tags):
+ """Alter tags on a stream.
+
+ :param original: The input stream.
+ :param filtered: The output stream.
+ :param tags: The tags to apply. As in a normal stream - a list of 'TAG' or
+ '-TAG' commands.
+
+ A 'TAG' command will add the tag to the output stream,
+ and override any existing '-TAG' command in that stream.
+ Specifically:
+ * A global 'tags: TAG' will be added to the start of the stream.
+ * Any tags commands with -TAG will have the -TAG removed.
+
+ A '-TAG' command will remove the TAG command from the stream.
+ Specifically:
+ * A 'tags: -TAG' command will be added to the start of the stream.
+ * Any 'tags: TAG' command will have 'TAG' removed from it.
+ Additionally, any redundant tagging commands (adding a tag globally
+ present, or removing a tag globally removed) are stripped as a
+ by-product of the filtering.
+ :return: 0
+ """
+ new_tags, gone_tags = tags_to_new_gone(tags)
+ source = ByteStreamToStreamResult(original, non_subunit_name='stdout')
+ class Tagger(CopyStreamResult):
+ def status(self, **kwargs):
+ tags = kwargs.get('test_tags')
+ if not tags:
+ tags = set()
+ tags.update(new_tags)
+ tags.difference_update(gone_tags)
+ if tags:
+ kwargs['test_tags'] = tags
+ else:
+ kwargs['test_tags'] = None
+ super(Tagger, self).status(**kwargs)
+ output = Tagger([StreamResultToBytes(filtered)])
+ source.run(output)
+ return 0
+
+
+class ProtocolTestCase(object):
+ """Subunit wire protocol to unittest.TestCase adapter.
+
+ ProtocolTestCase honours the core of ``unittest.TestCase`` protocol -
+ calling a ProtocolTestCase or invoking the run() method will make a 'test
+ run' happen. The 'test run' will simply be a replay of the test activity
+ that has been encoded into the stream. The ``unittest.TestCase`` ``debug``
+ and ``countTestCases`` methods are not supported because there isn't a
+ sensible mapping for those methods.
+
+ # Get a stream (any object with a readline() method), in this case the
+ # stream output by the example from ``subunit.TestProtocolClient``.
+ stream = file('tests.log', 'rb')
+ # Create a parser which will read from the stream and emit
+ # activity to a unittest.TestResult when run() is called.
+ suite = subunit.ProtocolTestCase(stream)
+ # Create a result object to accept the contents of that stream.
+ result = unittest._TextTestResult(sys.stdout)
+ # 'run' the tests - process the stream and feed its contents to result.
+ suite.run(result)
+ stream.close()
+
+ :seealso: TestProtocolServer (the subunit wire protocol parser).
+ """
+
+ def __init__(self, stream, passthrough=None, forward=None):
+ """Create a ProtocolTestCase reading from stream.
+
+ :param stream: A filelike object which a subunit stream can be read
+ from.
+ :param passthrough: A stream pass non subunit input on to. If not
+ supplied, the TestProtocolServer default is used.
+ :param forward: A stream to pass subunit input on to. If not supplied
+ subunit input is not forwarded.
+ """
+ stream = make_stream_binary(stream)
+ self._stream = stream
+ self._passthrough = passthrough
+ if forward is not None:
+ forward = make_stream_binary(forward)
+ self._forward = forward
+
+ def __call__(self, result=None):
+ return self.run(result)
+
+ def run(self, result=None):
+ if result is None:
+ result = self.defaultTestResult()
+ protocol = TestProtocolServer(result, self._passthrough, self._forward)
+ line = self._stream.readline()
+ while line:
+ protocol.lineReceived(line)
+ line = self._stream.readline()
+ protocol.lostConnection()
+
+
+class TestResultStats(testresult.TestResult):
+ """A pyunit TestResult interface implementation for making statistics.
+
+ :ivar total_tests: The total tests seen.
+ :ivar passed_tests: The tests that passed.
+ :ivar failed_tests: The tests that failed.
+ :ivar seen_tags: The tags seen across all tests.
+ """
+
+ def __init__(self, stream):
+ """Create a TestResultStats which outputs to stream."""
+ testresult.TestResult.__init__(self)
+ self._stream = stream
+ self.failed_tests = 0
+ self.skipped_tests = 0
+ self.seen_tags = set()
+
+ @property
+ def total_tests(self):
+ return self.testsRun
+
+ def addError(self, test, err, details=None):
+ self.failed_tests += 1
+
+ def addFailure(self, test, err, details=None):
+ self.failed_tests += 1
+
+ def addSkip(self, test, reason, details=None):
+ self.skipped_tests += 1
+
+ def formatStats(self):
+ self._stream.write("Total tests: %5d\n" % self.total_tests)
+ self._stream.write("Passed tests: %5d\n" % self.passed_tests)
+ self._stream.write("Failed tests: %5d\n" % self.failed_tests)
+ self._stream.write("Skipped tests: %5d\n" % self.skipped_tests)
+ tags = sorted(self.seen_tags)
+ self._stream.write("Seen tags: %s\n" % (", ".join(tags)))
+
+ @property
+ def passed_tests(self):
+ return self.total_tests - self.failed_tests - self.skipped_tests
+
+ def tags(self, new_tags, gone_tags):
+ """Accumulate the seen tags."""
+ self.seen_tags.update(new_tags)
+
+ def wasSuccessful(self):
+ """Tells whether or not this result was a success"""
+ return self.failed_tests == 0
+
+
+def get_default_formatter():
+ """Obtain the default formatter to write to.
+
+ :return: A file-like object.
+ """
+ formatter = os.getenv("SUBUNIT_FORMATTER")
+ if formatter:
+ return os.popen(formatter, "w")
+ else:
+ stream = sys.stdout
+ if sys.version_info > (3, 0):
+ if safe_hasattr(stream, 'buffer'):
+ stream = stream.buffer
+ return stream
+
+
+def read_test_list(path):
+ """Read a list of test ids from a file on disk.
+
+ :param path: Path to the file
+ :return: Sequence of test ids
+ """
+ f = open(path, 'rb')
+ try:
+ return [l.rstrip("\n") for l in f.readlines()]
+ finally:
+ f.close()
+
+
+def make_stream_binary(stream):
+ """Ensure that a stream will be binary safe. See _make_binary_on_windows.
+
+ :return: A binary version of the same stream (some streams cannot be
+ 'fixed' but can be unwrapped).
+ """
+ try:
+ fileno = stream.fileno()
+ except (_UnsupportedOperation, AttributeError):
+ pass
+ else:
+ _make_binary_on_windows(fileno)
+ return _unwrap_text(stream)
+
+
+def _make_binary_on_windows(fileno):
+ """Win32 mangles \r\n to \n and that breaks streams. See bug lp:505078."""
+ if sys.platform == "win32":
+ import msvcrt
+ msvcrt.setmode(fileno, os.O_BINARY)
+
+
+def _unwrap_text(stream):
+ """Unwrap stream if it is a text stream to get the original buffer."""
+ if sys.version_info > (3, 0):
+ unicode_type = str
+ else:
+ unicode_type = unicode
+ try:
+ # Read streams
+ if type(stream.read(0)) is unicode_type:
+ return stream.buffer
+ except (_UnsupportedOperation, IOError):
+ # Cannot read from the stream: try via writes
+ try:
+ stream.write(_b(''))
+ except TypeError:
+ return stream.buffer
+ return stream
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/chunked.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/chunked.py
new file mode 100644
index 00000000000..b9921291ea2
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/chunked.py
@@ -0,0 +1,185 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+# Copyright (C) 2011 Martin Pool <mbp@sourcefrog.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Encoder/decoder for http style chunked encoding."""
+
+from testtools.compat import _b
+
+empty = _b('')
+
+class Decoder(object):
+ """Decode chunked content to a byte stream."""
+
+ def __init__(self, output, strict=True):
+ """Create a decoder decoding to output.
+
+ :param output: A file-like object. Bytes written to the Decoder are
+ decoded to strip off the chunking and written to the output.
+ Up to a full write worth of data or a single control line may be
+ buffered (whichever is larger). The close method should be called
+ when no more data is available, to detect short streams; the
+ write method will return none-None when the end of a stream is
+ detected. The output object must accept bytes objects.
+
+ :param strict: If True (the default), the decoder will not knowingly
+ accept input that is not conformant to the HTTP specification.
+ (This does not imply that it will catch every nonconformance.)
+ If False, it will accept incorrect input that is still
+ unambiguous.
+ """
+ self.output = output
+ self.buffered_bytes = []
+ self.state = self._read_length
+ self.body_length = 0
+ self.strict = strict
+ self._match_chars = _b("0123456789abcdefABCDEF\r\n")
+ self._slash_n = _b('\n')
+ self._slash_r = _b('\r')
+ self._slash_rn = _b('\r\n')
+ self._slash_nr = _b('\n\r')
+
+ def close(self):
+ """Close the decoder.
+
+ :raises ValueError: If the stream is incomplete ValueError is raised.
+ """
+ if self.state != self._finished:
+ raise ValueError("incomplete stream")
+
+ def _finished(self):
+ """Finished reading, return any remaining bytes."""
+ if self.buffered_bytes:
+ buffered_bytes = self.buffered_bytes
+ self.buffered_bytes = []
+ return empty.join(buffered_bytes)
+ else:
+ raise ValueError("stream is finished")
+
+ def _read_body(self):
+ """Pass body bytes to the output."""
+ while self.body_length and self.buffered_bytes:
+ if self.body_length >= len(self.buffered_bytes[0]):
+ self.output.write(self.buffered_bytes[0])
+ self.body_length -= len(self.buffered_bytes[0])
+ del self.buffered_bytes[0]
+ # No more data available.
+ if not self.body_length:
+ self.state = self._read_length
+ else:
+ self.output.write(self.buffered_bytes[0][:self.body_length])
+ self.buffered_bytes[0] = \
+ self.buffered_bytes[0][self.body_length:]
+ self.body_length = 0
+ self.state = self._read_length
+ return self.state()
+
+ def _read_length(self):
+ """Try to decode a length from the bytes."""
+ count_chars = []
+ for bytes in self.buffered_bytes:
+ for pos in range(len(bytes)):
+ byte = bytes[pos:pos+1]
+ if byte not in self._match_chars:
+ break
+ count_chars.append(byte)
+ if byte == self._slash_n:
+ break
+ if not count_chars:
+ return
+ if count_chars[-1] != self._slash_n:
+ return
+ count_str = empty.join(count_chars)
+ if self.strict:
+ if count_str[-2:] != self._slash_rn:
+ raise ValueError("chunk header invalid: %r" % count_str)
+ if self._slash_r in count_str[:-2]:
+ raise ValueError("too many CRs in chunk header %r" % count_str)
+ self.body_length = int(count_str.rstrip(self._slash_nr), 16)
+ excess_bytes = len(count_str)
+ while excess_bytes:
+ if excess_bytes >= len(self.buffered_bytes[0]):
+ excess_bytes -= len(self.buffered_bytes[0])
+ del self.buffered_bytes[0]
+ else:
+ self.buffered_bytes[0] = self.buffered_bytes[0][excess_bytes:]
+ excess_bytes = 0
+ if not self.body_length:
+ self.state = self._finished
+ if not self.buffered_bytes:
+ # May not call into self._finished with no buffered data.
+ return empty
+ else:
+ self.state = self._read_body
+ return self.state()
+
+ def write(self, bytes):
+ """Decode bytes to the output stream.
+
+ :raises ValueError: If the stream has already seen the end of file
+ marker.
+ :returns: None, or the excess bytes beyond the end of file marker.
+ """
+ if bytes:
+ self.buffered_bytes.append(bytes)
+ return self.state()
+
+
+class Encoder(object):
+ """Encode content to a stream using HTTP Chunked coding."""
+
+ def __init__(self, output):
+ """Create an encoder encoding to output.
+
+ :param output: A file-like object. Bytes written to the Encoder
+ will be encoded using HTTP chunking. Small writes may be buffered
+ and the ``close`` method must be called to finish the stream.
+ """
+ self.output = output
+ self.buffered_bytes = []
+ self.buffer_size = 0
+
+ def flush(self, extra_len=0):
+ """Flush the encoder to the output stream.
+
+ :param extra_len: Increase the size of the chunk by this many bytes
+ to allow for a subsequent write.
+ """
+ if not self.buffer_size and not extra_len:
+ return
+ buffered_bytes = self.buffered_bytes
+ buffer_size = self.buffer_size
+ self.buffered_bytes = []
+ self.buffer_size = 0
+ self.output.write(_b("%X\r\n" % (buffer_size + extra_len)))
+ if buffer_size:
+ self.output.write(empty.join(buffered_bytes))
+ return True
+
+ def write(self, bytes):
+ """Encode bytes to the output stream."""
+ bytes_len = len(bytes)
+ if self.buffer_size + bytes_len >= 65536:
+ self.flush(bytes_len)
+ self.output.write(bytes)
+ else:
+ self.buffered_bytes.append(bytes)
+ self.buffer_size += bytes_len
+
+ def close(self):
+ """Finish the stream. This does not close the output stream."""
+ self.flush()
+ self.output.write(_b("0\r\n"))
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/details.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/details.py
new file mode 100644
index 00000000000..9e5e005864c
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/details.py
@@ -0,0 +1,119 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Handlers for outcome details."""
+
+from testtools import content, content_type
+from testtools.compat import _b, BytesIO
+
+from subunit import chunked
+
+end_marker = _b("]\n")
+quoted_marker = _b(" ]")
+empty = _b('')
+
+
+class DetailsParser(object):
+ """Base class/API reference for details parsing."""
+
+
+class SimpleDetailsParser(DetailsParser):
+ """Parser for single-part [] delimited details."""
+
+ def __init__(self, state):
+ self._message = _b("")
+ self._state = state
+
+ def lineReceived(self, line):
+ if line == end_marker:
+ self._state.endDetails()
+ return
+ if line[0:2] == quoted_marker:
+ # quoted ] start
+ self._message += line[1:]
+ else:
+ self._message += line
+
+ def get_details(self, style=None):
+ result = {}
+ if not style:
+ # We know that subunit/testtools serialise [] formatted
+ # tracebacks as utf8, but perhaps we need a ReplacingContent
+ # or something like that.
+ result['traceback'] = content.Content(
+ content_type.ContentType("text", "x-traceback",
+ {"charset": "utf8"}),
+ lambda:[self._message])
+ else:
+ if style == 'skip':
+ name = 'reason'
+ else:
+ name = 'message'
+ result[name] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:[self._message])
+ return result
+
+ def get_message(self):
+ return self._message
+
+
+class MultipartDetailsParser(DetailsParser):
+ """Parser for multi-part [] surrounded MIME typed chunked details."""
+
+ def __init__(self, state):
+ self._state = state
+ self._details = {}
+ self._parse_state = self._look_for_content
+
+ def _look_for_content(self, line):
+ if line == end_marker:
+ self._state.endDetails()
+ return
+ # TODO error handling
+ field, value = line[:-1].decode('utf8').split(' ', 1)
+ try:
+ main, sub = value.split('/')
+ except ValueError:
+ raise ValueError("Invalid MIME type %r" % value)
+ self._content_type = content_type.ContentType(main, sub)
+ self._parse_state = self._get_name
+
+ def _get_name(self, line):
+ self._name = line[:-1].decode('utf8')
+ self._body = BytesIO()
+ self._chunk_parser = chunked.Decoder(self._body)
+ self._parse_state = self._feed_chunks
+
+ def _feed_chunks(self, line):
+ residue = self._chunk_parser.write(line)
+ if residue is not None:
+ # Line based use always ends on no residue.
+ assert residue == empty, 'residue: %r' % (residue,)
+ body = self._body
+ self._details[self._name] = content.Content(
+ self._content_type, lambda:[body.getvalue()])
+ self._chunk_parser.close()
+ self._parse_state = self._look_for_content
+
+ def get_details(self, for_skip=False):
+ return self._details
+
+ def get_message(self):
+ return None
+
+ def lineReceived(self, line):
+ self._parse_state(line)
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/filters.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/filters.py
new file mode 100644
index 00000000000..0a0a185c3e3
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/filters.py
@@ -0,0 +1,206 @@
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+
+from optparse import OptionParser
+import sys
+
+from extras import safe_hasattr
+from testtools import CopyStreamResult, StreamResult, StreamResultRouter
+
+from subunit import (
+ DiscardStream, ProtocolTestCase, ByteStreamToStreamResult,
+ StreamResultToBytes,
+ )
+from subunit.test_results import CatFiles
+
+
+def make_options(description):
+ parser = OptionParser(description=description)
+ parser.add_option(
+ "--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False,
+ dest="no_passthrough")
+ parser.add_option(
+ "-o", "--output-to",
+ help="Send the output to this path rather than stdout.")
+ parser.add_option(
+ "-f", "--forward", action="store_true", default=False,
+ help="Forward subunit stream on stdout. When set, received "
+ "non-subunit output will be encapsulated in subunit.")
+ return parser
+
+
+def run_tests_from_stream(input_stream, result, passthrough_stream=None,
+ forward_stream=None, protocol_version=1, passthrough_subunit=True):
+ """Run tests from a subunit input stream through 'result'.
+
+ Non-test events - top level file attachments - are expected to be
+ dropped by v2 StreamResults at the present time (as all the analysis code
+ is in ExtendedTestResult API's), so to implement passthrough_stream they
+ are diverted and copied directly when that is set.
+
+ :param input_stream: A stream containing subunit input.
+ :param result: A TestResult that will receive the test events.
+ NB: This should be an ExtendedTestResult for v1 and a StreamResult for
+ v2.
+ :param passthrough_stream: All non-subunit input received will be
+ sent to this stream. If not provided, uses the ``TestProtocolServer``
+ default, which is ``sys.stdout``.
+ :param forward_stream: All subunit input received will be forwarded
+ to this stream. If not provided, uses the ``TestProtocolServer``
+ default, which is to not forward any input. Do not set this when
+ transforming the stream - items would be double-reported.
+ :param protocol_version: What version of the subunit protocol to expect.
+ :param passthrough_subunit: If True, passthrough should be as subunit
+ otherwise unwrap it. Only has effect when forward_stream is None.
+ (when forwarding as subunit non-subunit input is always turned into
+ subunit)
+ """
+ if 1==protocol_version:
+ test = ProtocolTestCase(
+ input_stream, passthrough=passthrough_stream,
+ forward=forward_stream)
+ elif 2==protocol_version:
+ # In all cases we encapsulate unknown inputs.
+ if forward_stream is not None:
+ # Send events to forward_stream as subunit.
+ forward_result = StreamResultToBytes(forward_stream)
+ # If we're passing non-subunit through, copy:
+ if passthrough_stream is None:
+ # Not passing non-test events - split them off to nothing.
+ router = StreamResultRouter(forward_result)
+ router.add_rule(StreamResult(), 'test_id', test_id=None)
+ result = CopyStreamResult([router, result])
+ else:
+ # otherwise, copy all events to forward_result
+ result = CopyStreamResult([forward_result, result])
+ elif passthrough_stream is not None:
+ if not passthrough_subunit:
+ # Route non-test events to passthrough_stream, unwrapping them for
+ # display.
+ passthrough_result = CatFiles(passthrough_stream)
+ else:
+ passthrough_result = StreamResultToBytes(passthrough_stream)
+ result = StreamResultRouter(result)
+ result.add_rule(passthrough_result, 'test_id', test_id=None)
+ test = ByteStreamToStreamResult(input_stream,
+ non_subunit_name='stdout')
+ else:
+ raise Exception("Unknown protocol version.")
+ result.startTestRun()
+ test.run(result)
+ result.stopTestRun()
+
+
+def filter_by_result(result_factory, output_path, passthrough, forward,
+ input_stream=sys.stdin, protocol_version=1,
+ passthrough_subunit=True):
+ """Filter an input stream using a test result.
+
+ :param result_factory: A callable that when passed an output stream
+ returns a TestResult. It is expected that this result will output
+ to the given stream.
+ :param output_path: A path send output to. If None, output will be go
+ to ``sys.stdout``.
+ :param passthrough: If True, all non-subunit input will be sent to
+ ``sys.stdout``. If False, that input will be discarded.
+ :param forward: If True, all subunit input will be forwarded directly to
+ ``sys.stdout`` as well as to the ``TestResult``.
+ :param input_stream: The source of subunit input. Defaults to
+ ``sys.stdin``.
+ :param protocol_version: The subunit protocol version to expect.
+ :param passthrough_subunit: If True, passthrough should be as subunit.
+ :return: A test result with the results of the run.
+ """
+ if passthrough:
+ passthrough_stream = sys.stdout
+ else:
+ if 1==protocol_version:
+ passthrough_stream = DiscardStream()
+ else:
+ passthrough_stream = None
+
+ if forward:
+ forward_stream = sys.stdout
+ elif 1==protocol_version:
+ forward_stream = DiscardStream()
+ else:
+ forward_stream = None
+
+ if output_path is None:
+ output_to = sys.stdout
+ else:
+ output_to = file(output_path, 'wb')
+
+ try:
+ result = result_factory(output_to)
+ run_tests_from_stream(
+ input_stream, result, passthrough_stream, forward_stream,
+ protocol_version=protocol_version,
+ passthrough_subunit=passthrough_subunit)
+ finally:
+ if output_path:
+ output_to.close()
+ return result
+
+
+def run_filter_script(result_factory, description, post_run_hook=None,
+ protocol_version=1, passthrough_subunit=True):
+ """Main function for simple subunit filter scripts.
+
+ Many subunit filter scripts take a stream of subunit input and use a
+ TestResult to handle the events generated by that stream. This function
+ wraps a lot of the boiler-plate around that by making a script with
+ options for handling passthrough information and stream forwarding, and
+ that will exit with a successful return code (i.e. 0) if the input stream
+ represents a successful test run.
+
+ :param result_factory: A callable that takes an output stream and returns
+ a test result that outputs to that stream.
+ :param description: A description of the filter script.
+ :param protocol_version: What protocol version to consume/emit.
+ :param passthrough_subunit: If True, passthrough should be as subunit.
+ """
+ parser = make_options(description)
+ (options, args) = parser.parse_args()
+ result = filter_by_result(
+ result_factory, options.output_to, not options.no_passthrough,
+ options.forward, protocol_version=protocol_version,
+ passthrough_subunit=passthrough_subunit,
+ input_stream=find_stream(sys.stdin, args))
+ if post_run_hook:
+ post_run_hook(result)
+ if not safe_hasattr(result, 'wasSuccessful'):
+ result = result.decorated
+ if result.wasSuccessful():
+ sys.exit(0)
+ else:
+ sys.exit(1)
+
+
+def find_stream(stdin, argv):
+ """Find a stream to use as input for filters.
+
+ :param stdin: Standard in - used if no files are named in argv.
+ :param argv: Command line arguments after option parsing. If one file
+ is named, that is opened in read only binary mode and returned.
+ A missing file will raise an exception, as will multiple file names.
+ """
+ assert len(argv) < 2, "Too many filenames."
+ if argv:
+ return open(argv[0], 'rb')
+ else:
+ return stdin
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/iso8601.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/iso8601.py
new file mode 100644
index 00000000000..07855d0975c
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/iso8601.py
@@ -0,0 +1,133 @@
+# Copyright (c) 2007 Michael Twomey
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+"""ISO 8601 date time string parsing
+
+Basic usage:
+>>> import iso8601
+>>> iso8601.parse_date("2007-01-25T12:00:00Z")
+datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
+>>>
+
+"""
+
+from datetime import datetime, timedelta, tzinfo
+import re
+import sys
+
+__all__ = ["parse_date", "ParseError"]
+
+# Adapted from http://delete.me.uk/2005/03/iso8601.html
+ISO8601_REGEX_PATTERN = (r"(?P<year>[0-9]{4})(-(?P<month>[0-9]{1,2})(-(?P<day>[0-9]{1,2})"
+ r"((?P<separator>.)(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2})(:(?P<second>[0-9]{2})(\.(?P<fraction>[0-9]+))?)?"
+ r"(?P<timezone>Z|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?"
+)
+TIMEZONE_REGEX_PATTERN = "(?P<prefix>[+-])(?P<hours>[0-9]{2}).(?P<minutes>[0-9]{2})"
+ISO8601_REGEX = re.compile(ISO8601_REGEX_PATTERN.encode('utf8'))
+TIMEZONE_REGEX = re.compile(TIMEZONE_REGEX_PATTERN.encode('utf8'))
+
+zulu = "Z".encode('latin-1')
+minus = "-".encode('latin-1')
+
+if sys.version_info < (3, 0):
+ bytes = str
+
+
+class ParseError(Exception):
+ """Raised when there is a problem parsing a date string"""
+
+# Yoinked from python docs
+ZERO = timedelta(0)
+class Utc(tzinfo):
+ """UTC
+
+ """
+ def utcoffset(self, dt):
+ return ZERO
+
+ def tzname(self, dt):
+ return "UTC"
+
+ def dst(self, dt):
+ return ZERO
+UTC = Utc()
+
+class FixedOffset(tzinfo):
+ """Fixed offset in hours and minutes from UTC
+
+ """
+ def __init__(self, offset_hours, offset_minutes, name):
+ self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
+ self.__name = name
+
+ def utcoffset(self, dt):
+ return self.__offset
+
+ def tzname(self, dt):
+ return self.__name
+
+ def dst(self, dt):
+ return ZERO
+
+ def __repr__(self):
+ return "<FixedOffset %r>" % self.__name
+
+def parse_timezone(tzstring, default_timezone=UTC):
+ """Parses ISO 8601 time zone specs into tzinfo offsets
+
+ """
+ if tzstring == zulu:
+ return default_timezone
+ # This isn't strictly correct, but it's common to encounter dates without
+ # timezones so I'll assume the default (which defaults to UTC).
+ # Addresses issue 4.
+ if tzstring is None:
+ return default_timezone
+ m = TIMEZONE_REGEX.match(tzstring)
+ prefix, hours, minutes = m.groups()
+ hours, minutes = int(hours), int(minutes)
+ if prefix == minus:
+ hours = -hours
+ minutes = -minutes
+ return FixedOffset(hours, minutes, tzstring)
+
+def parse_date(datestring, default_timezone=UTC):
+ """Parses ISO 8601 dates into datetime objects
+
+ The timezone is parsed from the date string. However it is quite common to
+ have dates without a timezone (not strictly correct). In this case the
+ default timezone specified in default_timezone is used. This is UTC by
+ default.
+ """
+ if not isinstance(datestring, bytes):
+ raise ParseError("Expecting bytes %r" % datestring)
+ m = ISO8601_REGEX.match(datestring)
+ if not m:
+ raise ParseError("Unable to parse date string %r" % datestring)
+ groups = m.groupdict()
+ tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
+ if groups["fraction"] is None:
+ groups["fraction"] = 0
+ else:
+ groups["fraction"] = int(float("0.%s" % groups["fraction"].decode()) * 1e6)
+ return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
+ int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
+ int(groups["fraction"]), tz)
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/progress_model.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/progress_model.py
new file mode 100644
index 00000000000..3a6af89a33b
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/progress_model.py
@@ -0,0 +1,106 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Support for dealing with progress state."""
+
+class ProgressModel(object):
+ """A model of progress indicators as subunit defines it.
+
+ Instances of this class represent a single logical operation that is
+ progressing. The operation may have many steps, and some of those steps may
+ supply their own progress information. ProgressModel uses a nested concept
+ where the overall state can be pushed, creating new starting state, and
+ later pushed to return to the prior state. Many user interfaces will want
+ to display an overall summary though, and accordingly the pos() and width()
+ methods return overall summary information rather than information on the
+ current subtask.
+
+ The default state is 0/0 - indicating that the overall progress is unknown.
+ Anytime the denominator of pos/width is 0, rendering of a ProgressModel
+ should should take this into consideration.
+
+ :ivar: _tasks. This private attribute stores the subtasks. Each is a tuple:
+ pos, width, overall_numerator, overall_denominator. The overall fields
+ store the calculated overall numerator and denominator for the state
+ that was pushed.
+ """
+
+ def __init__(self):
+ """Create a ProgressModel.
+
+ The new model has no progress data at all - it will claim a summary
+ width of zero and position of 0.
+ """
+ self._tasks = []
+ self.push()
+
+ def adjust_width(self, offset):
+ """Adjust the with of the current subtask."""
+ self._tasks[-1][1] += offset
+
+ def advance(self):
+ """Advance the current subtask."""
+ self._tasks[-1][0] += 1
+
+ def pop(self):
+ """Pop a subtask off the ProgressModel.
+
+ See push for a description of how push and pop work.
+ """
+ self._tasks.pop()
+
+ def pos(self):
+ """Return how far through the operation has progressed."""
+ if not self._tasks:
+ return 0
+ task = self._tasks[-1]
+ if len(self._tasks) > 1:
+ # scale up the overall pos by the current task or preserve it if
+ # no current width is known.
+ offset = task[2] * (task[1] or 1)
+ else:
+ offset = 0
+ return offset + task[0]
+
+ def push(self):
+ """Push a new subtask.
+
+ After pushing a new subtask, the overall progress hasn't changed. Calls
+ to adjust_width, advance, set_width will only after the progress within
+ the range that calling 'advance' would have before - the subtask
+ represents progressing one step in the earlier task.
+
+ Call pop() to restore the progress model to the state before push was
+ called.
+ """
+ self._tasks.append([0, 0, self.pos(), self.width()])
+
+ def set_width(self, width):
+ """Set the width of the current subtask."""
+ self._tasks[-1][1] = width
+
+ def width(self):
+ """Return the total width of the operation."""
+ if not self._tasks:
+ return 0
+ task = self._tasks[-1]
+ if len(self._tasks) > 1:
+ # scale up the overall width by the current task or preserve it if
+ # no current width is known.
+ return task[3] * (task[1] or 1)
+ else:
+ return task[1]
+
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/run.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/run.py
new file mode 100755
index 00000000000..7e4d783bded
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/run.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+#
+# Simple subunit testrunner for python
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Run a unittest testcase reporting results as Subunit.
+
+ $ python -m subunit.run mylib.tests.test_suite
+"""
+
+import io
+import os
+import sys
+
+from testtools import ExtendedToStreamDecorator
+from testtools.testsuite import iterate_tests
+
+from subunit import StreamResultToBytes, get_default_formatter
+from subunit.test_results import AutoTimingTestResultDecorator
+from testtools.run import (
+ BUFFEROUTPUT,
+ CATCHBREAK,
+ FAILFAST,
+ list_test,
+ TestProgram,
+ USAGE_AS_MAIN,
+ )
+
+
+class SubunitTestRunner(object):
+ def __init__(self, verbosity=None, failfast=None, buffer=None, stream=None):
+ """Create a TestToolsTestRunner.
+
+ :param verbosity: Ignored.
+ :param failfast: Stop running tests at the first failure.
+ :param buffer: Ignored.
+ """
+ self.failfast = failfast
+ self.stream = stream or sys.stdout
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ result, _ = self._list(test)
+ result = ExtendedToStreamDecorator(result)
+ result = AutoTimingTestResultDecorator(result)
+ if self.failfast is not None:
+ result.failfast = self.failfast
+ result.startTestRun()
+ try:
+ test(result)
+ finally:
+ result.stopTestRun()
+ return result
+
+ def list(self, test):
+ "List the test."
+ result, errors = self._list(test)
+ if errors:
+ failed_descr = '\n'.join(errors).encode('utf8')
+ result.status(file_name="import errors", runnable=False,
+ file_bytes=failed_descr, mime_type="text/plain;charset=utf8")
+ sys.exit(2)
+
+ def _list(self, test):
+ test_ids, errors = list_test(test)
+ try:
+ fileno = self.stream.fileno()
+ except:
+ fileno = None
+ if fileno is not None:
+ stream = os.fdopen(fileno, 'wb', 0)
+ else:
+ stream = self.stream
+ result = StreamResultToBytes(stream)
+ for test_id in test_ids:
+ result.status(test_id=test_id, test_status='exists')
+ return result, errors
+
+
+class SubunitTestProgram(TestProgram):
+
+ USAGE = USAGE_AS_MAIN
+
+ def usageExit(self, msg=None):
+ if msg:
+ print (msg)
+ usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
+ 'buffer': ''}
+ if self.failfast != False:
+ usage['failfast'] = FAILFAST
+ if self.catchbreak != False:
+ usage['catchbreak'] = CATCHBREAK
+ if self.buffer != False:
+ usage['buffer'] = BUFFEROUTPUT
+ usage_text = self.USAGE % usage
+ usage_lines = usage_text.split('\n')
+ usage_lines.insert(2, "Run a test suite with a subunit reporter.")
+ usage_lines.insert(3, "")
+ print('\n'.join(usage_lines))
+ sys.exit(2)
+
+
+def main():
+ # Disable the default buffering, for Python 2.x where pdb doesn't do it
+ # on non-ttys.
+ stream = get_default_formatter()
+ runner = SubunitTestRunner
+ # Patch stdout to be unbuffered, so that pdb works well on 2.6/2.7.
+ binstdout = io.open(sys.stdout.fileno(), 'wb', 0)
+ if sys.version_info[0] > 2:
+ sys.stdout = io.TextIOWrapper(binstdout, encoding=sys.stdout.encoding)
+ else:
+ sys.stdout = binstdout
+ SubunitTestProgram(module=None, argv=sys.argv, testRunner=runner,
+ stdout=sys.stdout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/test_results.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/test_results.py
new file mode 100644
index 00000000000..8c89d9b5605
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/test_results.py
@@ -0,0 +1,729 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""TestResult helper classes used to by subunit."""
+
+import csv
+import datetime
+
+import testtools
+from testtools.compat import all
+from testtools.content import (
+ text_content,
+ TracebackContent,
+ )
+from testtools import StreamResult
+
+from subunit import iso8601
+import subunit
+
+
+# NOT a TestResult, because we are implementing the interface, not inheriting
+# it.
+class TestResultDecorator(object):
+ """General pass-through decorator.
+
+ This provides a base that other TestResults can inherit from to
+ gain basic forwarding functionality. It also takes care of
+ handling the case where the target doesn't support newer methods
+ or features by degrading them.
+ """
+
+ # XXX: Since lp:testtools r250, this is in testtools. Once it's released,
+ # we should gut this and just use that.
+
+ def __init__(self, decorated):
+ """Create a TestResultDecorator forwarding to decorated."""
+ # Make every decorator degrade gracefully.
+ self.decorated = testtools.ExtendedToOriginalDecorator(decorated)
+
+ def startTest(self, test):
+ return self.decorated.startTest(test)
+
+ def startTestRun(self):
+ return self.decorated.startTestRun()
+
+ def stopTest(self, test):
+ return self.decorated.stopTest(test)
+
+ def stopTestRun(self):
+ return self.decorated.stopTestRun()
+
+ def addError(self, test, err=None, details=None):
+ return self.decorated.addError(test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ return self.decorated.addFailure(test, err, details=details)
+
+ def addSuccess(self, test, details=None):
+ return self.decorated.addSuccess(test, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ return self.decorated.addSkip(test, reason, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ return self.decorated.addExpectedFailure(test, err, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ return self.decorated.addUnexpectedSuccess(test, details=details)
+
+ def _get_failfast(self):
+ return getattr(self.decorated, 'failfast', False)
+
+ def _set_failfast(self, value):
+ self.decorated.failfast = value
+ failfast = property(_get_failfast, _set_failfast)
+
+ def progress(self, offset, whence):
+ return self.decorated.progress(offset, whence)
+
+ def wasSuccessful(self):
+ return self.decorated.wasSuccessful()
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def stop(self):
+ return self.decorated.stop()
+
+ @property
+ def testsRun(self):
+ return self.decorated.testsRun
+
+ def tags(self, new_tags, gone_tags):
+ return self.decorated.tags(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ return self.decorated.time(a_datetime)
+
+
+class HookedTestResultDecorator(TestResultDecorator):
+ """A TestResult which calls a hook on every event."""
+
+ def __init__(self, decorated):
+ self.super = super(HookedTestResultDecorator, self)
+ self.super.__init__(decorated)
+
+ def startTest(self, test):
+ self._before_event()
+ return self.super.startTest(test)
+
+ def startTestRun(self):
+ self._before_event()
+ return self.super.startTestRun()
+
+ def stopTest(self, test):
+ self._before_event()
+ return self.super.stopTest(test)
+
+ def stopTestRun(self):
+ self._before_event()
+ return self.super.stopTestRun()
+
+ def addError(self, test, err=None, details=None):
+ self._before_event()
+ return self.super.addError(test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ self._before_event()
+ return self.super.addFailure(test, err, details=details)
+
+ def addSuccess(self, test, details=None):
+ self._before_event()
+ return self.super.addSuccess(test, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ self._before_event()
+ return self.super.addSkip(test, reason, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._before_event()
+ return self.super.addExpectedFailure(test, err, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._before_event()
+ return self.super.addUnexpectedSuccess(test, details=details)
+
+ def progress(self, offset, whence):
+ self._before_event()
+ return self.super.progress(offset, whence)
+
+ def wasSuccessful(self):
+ self._before_event()
+ return self.super.wasSuccessful()
+
+ @property
+ def shouldStop(self):
+ self._before_event()
+ return self.super.shouldStop
+
+ def stop(self):
+ self._before_event()
+ return self.super.stop()
+
+ def time(self, a_datetime):
+ self._before_event()
+ return self.super.time(a_datetime)
+
+
+class AutoTimingTestResultDecorator(HookedTestResultDecorator):
+ """Decorate a TestResult to add time events to a test run.
+
+ By default this will cause a time event before every test event,
+ but if explicit time data is being provided by the test run, then
+ this decorator will turn itself off to prevent causing confusion.
+ """
+
+ def __init__(self, decorated):
+ self._time = None
+ super(AutoTimingTestResultDecorator, self).__init__(decorated)
+
+ def _before_event(self):
+ time = self._time
+ if time is not None:
+ return
+ time = datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc())
+ self.decorated.time(time)
+
+ def progress(self, offset, whence):
+ return self.decorated.progress(offset, whence)
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def time(self, a_datetime):
+ """Provide a timestamp for the current test activity.
+
+ :param a_datetime: If None, automatically add timestamps before every
+ event (this is the default behaviour if time() is not called at
+ all). If not None, pass the provided time onto the decorated
+ result object and disable automatic timestamps.
+ """
+ self._time = a_datetime
+ return self.decorated.time(a_datetime)
+
+
+class TagsMixin(object):
+
+ def __init__(self):
+ self._clear_tags()
+
+ def _clear_tags(self):
+ self._global_tags = set(), set()
+ self._test_tags = None
+
+ def _get_active_tags(self):
+ global_new, global_gone = self._global_tags
+ if self._test_tags is None:
+ return set(global_new)
+ test_new, test_gone = self._test_tags
+ return global_new.difference(test_gone).union(test_new)
+
+ def _get_current_scope(self):
+ if self._test_tags:
+ return self._test_tags
+ return self._global_tags
+
+ def _flush_current_scope(self, tag_receiver):
+ new_tags, gone_tags = self._get_current_scope()
+ if new_tags or gone_tags:
+ tag_receiver.tags(new_tags, gone_tags)
+ if self._test_tags:
+ self._test_tags = set(), set()
+ else:
+ self._global_tags = set(), set()
+
+ def startTestRun(self):
+ self._clear_tags()
+
+ def startTest(self, test):
+ self._test_tags = set(), set()
+
+ def stopTest(self, test):
+ self._test_tags = None
+
+ def tags(self, new_tags, gone_tags):
+ """Handle tag instructions.
+
+ Adds and removes tags as appropriate. If a test is currently running,
+ tags are not affected for subsequent tests.
+
+ :param new_tags: Tags to add,
+ :param gone_tags: Tags to remove.
+ """
+ current_new_tags, current_gone_tags = self._get_current_scope()
+ current_new_tags.update(new_tags)
+ current_new_tags.difference_update(gone_tags)
+ current_gone_tags.update(gone_tags)
+ current_gone_tags.difference_update(new_tags)
+
+
+class TagCollapsingDecorator(HookedTestResultDecorator, TagsMixin):
+ """Collapses many 'tags' calls into one where possible."""
+
+ def __init__(self, result):
+ super(TagCollapsingDecorator, self).__init__(result)
+ self._clear_tags()
+
+ def _before_event(self):
+ self._flush_current_scope(self.decorated)
+
+ def tags(self, new_tags, gone_tags):
+ TagsMixin.tags(self, new_tags, gone_tags)
+
+
+class TimeCollapsingDecorator(HookedTestResultDecorator):
+ """Only pass on the first and last of a consecutive sequence of times."""
+
+ def __init__(self, decorated):
+ super(TimeCollapsingDecorator, self).__init__(decorated)
+ self._last_received_time = None
+ self._last_sent_time = None
+
+ def _before_event(self):
+ if self._last_received_time is None:
+ return
+ if self._last_received_time != self._last_sent_time:
+ self.decorated.time(self._last_received_time)
+ self._last_sent_time = self._last_received_time
+ self._last_received_time = None
+
+ def time(self, a_time):
+ # Don't upcall, because we don't want to call _before_event, it's only
+ # for non-time events.
+ if self._last_received_time is None:
+ self.decorated.time(a_time)
+ self._last_sent_time = a_time
+ self._last_received_time = a_time
+
+
+def and_predicates(predicates):
+ """Return a predicate that is true iff all predicates are true."""
+ # XXX: Should probably be in testtools to be better used by matchers. jml
+ return lambda *args, **kwargs: all(p(*args, **kwargs) for p in predicates)
+
+
+def make_tag_filter(with_tags, without_tags):
+ """Make a callback that checks tests against tags."""
+
+ with_tags = with_tags and set(with_tags) or None
+ without_tags = without_tags and set(without_tags) or None
+
+ def check_tags(test, outcome, err, details, tags):
+ if with_tags and not with_tags <= tags:
+ return False
+ if without_tags and bool(without_tags & tags):
+ return False
+ return True
+
+ return check_tags
+
+
+class _PredicateFilter(TestResultDecorator, TagsMixin):
+
+ def __init__(self, result, predicate):
+ super(_PredicateFilter, self).__init__(result)
+ self._clear_tags()
+ self.decorated = TimeCollapsingDecorator(
+ TagCollapsingDecorator(self.decorated))
+ self._predicate = predicate
+ # The current test (for filtering tags)
+ self._current_test = None
+ # Has the current test been filtered (for outputting test tags)
+ self._current_test_filtered = None
+ # Calls to this result that we don't know whether to forward on yet.
+ self._buffered_calls = []
+
+ def filter_predicate(self, test, outcome, error, details):
+ return self._predicate(
+ test, outcome, error, details, self._get_active_tags())
+
+ def addError(self, test, err=None, details=None):
+ if (self.filter_predicate(test, 'error', err, details)):
+ self._buffered_calls.append(
+ ('addError', [test, err], {'details': details}))
+ else:
+ self._filtered()
+
+ def addFailure(self, test, err=None, details=None):
+ if (self.filter_predicate(test, 'failure', err, details)):
+ self._buffered_calls.append(
+ ('addFailure', [test, err], {'details': details}))
+ else:
+ self._filtered()
+
+ def addSkip(self, test, reason=None, details=None):
+ if (self.filter_predicate(test, 'skip', reason, details)):
+ self._buffered_calls.append(
+ ('addSkip', [test, reason], {'details': details}))
+ else:
+ self._filtered()
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ if self.filter_predicate(test, 'expectedfailure', err, details):
+ self._buffered_calls.append(
+ ('addExpectedFailure', [test, err], {'details': details}))
+ else:
+ self._filtered()
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._buffered_calls.append(
+ ('addUnexpectedSuccess', [test], {'details': details}))
+
+ def addSuccess(self, test, details=None):
+ if (self.filter_predicate(test, 'success', None, details)):
+ self._buffered_calls.append(
+ ('addSuccess', [test], {'details': details}))
+ else:
+ self._filtered()
+
+ def _filtered(self):
+ self._current_test_filtered = True
+
+ def startTest(self, test):
+ """Start a test.
+
+ Not directly passed to the client, but used for handling of tags
+ correctly.
+ """
+ TagsMixin.startTest(self, test)
+ self._current_test = test
+ self._current_test_filtered = False
+ self._buffered_calls.append(('startTest', [test], {}))
+
+ def stopTest(self, test):
+ """Stop a test.
+
+ Not directly passed to the client, but used for handling of tags
+ correctly.
+ """
+ if not self._current_test_filtered:
+ for method, args, kwargs in self._buffered_calls:
+ getattr(self.decorated, method)(*args, **kwargs)
+ self.decorated.stopTest(test)
+ self._current_test = None
+ self._current_test_filtered = None
+ self._buffered_calls = []
+ TagsMixin.stopTest(self, test)
+
+ def tags(self, new_tags, gone_tags):
+ TagsMixin.tags(self, new_tags, gone_tags)
+ if self._current_test is not None:
+ self._buffered_calls.append(('tags', [new_tags, gone_tags], {}))
+ else:
+ return super(_PredicateFilter, self).tags(new_tags, gone_tags)
+
+ def time(self, a_time):
+ return self.decorated.time(a_time)
+
+ def id_to_orig_id(self, id):
+ if id.startswith("subunit.RemotedTestCase."):
+ return id[len("subunit.RemotedTestCase."):]
+ return id
+
+
+class TestResultFilter(TestResultDecorator):
+ """A pyunit TestResult interface implementation which filters tests.
+
+ Tests that pass the filter are handed on to another TestResult instance
+ for further processing/reporting. To obtain the filtered results,
+ the other instance must be interrogated.
+
+ :ivar result: The result that tests are passed to after filtering.
+ :ivar filter_predicate: The callback run to decide whether to pass
+ a result.
+ """
+
+ def __init__(self, result, filter_error=False, filter_failure=False,
+ filter_success=True, filter_skip=False, filter_xfail=False,
+ filter_predicate=None, fixup_expected_failures=None):
+ """Create a FilterResult object filtering to result.
+
+ :param filter_error: Filter out errors.
+ :param filter_failure: Filter out failures.
+ :param filter_success: Filter out successful tests.
+ :param filter_skip: Filter out skipped tests.
+ :param filter_xfail: Filter out expected failure tests.
+ :param filter_predicate: A callable taking (test, outcome, err,
+ details, tags) and returning True if the result should be passed
+ through. err and details may be none if no error or extra
+ metadata is available. outcome is the name of the outcome such
+ as 'success' or 'failure'. tags is new in 0.0.8; 0.0.7 filters
+ are still supported but should be updated to accept the tags
+ parameter for efficiency.
+ :param fixup_expected_failures: Set of test ids to consider known
+ failing.
+ """
+ predicates = []
+ if filter_error:
+ predicates.append(
+ lambda t, outcome, e, d, tags: outcome != 'error')
+ if filter_failure:
+ predicates.append(
+ lambda t, outcome, e, d, tags: outcome != 'failure')
+ if filter_success:
+ predicates.append(
+ lambda t, outcome, e, d, tags: outcome != 'success')
+ if filter_skip:
+ predicates.append(
+ lambda t, outcome, e, d, tags: outcome != 'skip')
+ if filter_xfail:
+ predicates.append(
+ lambda t, outcome, e, d, tags: outcome != 'expectedfailure')
+ if filter_predicate is not None:
+ def compat(test, outcome, error, details, tags):
+ # 0.0.7 and earlier did not support the 'tags' parameter.
+ try:
+ return filter_predicate(
+ test, outcome, error, details, tags)
+ except TypeError:
+ return filter_predicate(test, outcome, error, details)
+ predicates.append(compat)
+ predicate = and_predicates(predicates)
+ super(TestResultFilter, self).__init__(
+ _PredicateFilter(result, predicate))
+ if fixup_expected_failures is None:
+ self._fixup_expected_failures = frozenset()
+ else:
+ self._fixup_expected_failures = fixup_expected_failures
+
+ def addError(self, test, err=None, details=None):
+ if self._failure_expected(test):
+ self.addExpectedFailure(test, err=err, details=details)
+ else:
+ super(TestResultFilter, self).addError(
+ test, err=err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ if self._failure_expected(test):
+ self.addExpectedFailure(test, err=err, details=details)
+ else:
+ super(TestResultFilter, self).addFailure(
+ test, err=err, details=details)
+
+ def addSuccess(self, test, details=None):
+ if self._failure_expected(test):
+ self.addUnexpectedSuccess(test, details=details)
+ else:
+ super(TestResultFilter, self).addSuccess(test, details=details)
+
+ def _failure_expected(self, test):
+ return (test.id() in self._fixup_expected_failures)
+
+
+class TestIdPrintingResult(testtools.TestResult):
+ """Print test ids to a stream.
+
+ Implements both TestResult and StreamResult, for compatibility.
+ """
+
+ def __init__(self, stream, show_times=False, show_exists=False):
+ """Create a FilterResult object outputting to stream."""
+ super(TestIdPrintingResult, self).__init__()
+ self._stream = stream
+ self.show_exists = show_exists
+ self.show_times = show_times
+
+ def startTestRun(self):
+ self.failed_tests = 0
+ self.__time = None
+ self._test = None
+ self._test_duration = 0
+ self._active_tests = {}
+
+ def addError(self, test, err):
+ self.failed_tests += 1
+ self._test = test
+
+ def addFailure(self, test, err):
+ self.failed_tests += 1
+ self._test = test
+
+ def addSuccess(self, test):
+ self._test = test
+
+ def addSkip(self, test, reason=None, details=None):
+ self._test = test
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self.failed_tests += 1
+ self._test = test
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._test = test
+
+ def reportTest(self, test_id, duration):
+ if self.show_times:
+ seconds = duration.seconds
+ seconds += duration.days * 3600 * 24
+ seconds += duration.microseconds / 1000000.0
+ self._stream.write(test_id + ' %0.3f\n' % seconds)
+ else:
+ self._stream.write(test_id + '\n')
+
+ def startTest(self, test):
+ self._start_time = self._time()
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ if not test_id:
+ return
+ if timestamp is not None:
+ self.time(timestamp)
+ if test_status=='exists':
+ if self.show_exists:
+ self.reportTest(test_id, 0)
+ elif test_status in ('inprogress', None):
+ self._active_tests[test_id] = self._time()
+ else:
+ self._end_test(test_id)
+
+ def _end_test(self, test_id):
+ test_start = self._active_tests.pop(test_id, None)
+ if not test_start:
+ test_duration = 0
+ else:
+ test_duration = self._time() - test_start
+ self.reportTest(test_id, test_duration)
+
+ def stopTest(self, test):
+ test_duration = self._time() - self._start_time
+ self.reportTest(self._test.id(), test_duration)
+
+ def time(self, time):
+ self.__time = time
+
+ def _time(self):
+ return self.__time
+
+ def wasSuccessful(self):
+ "Tells whether or not this result was a success"
+ return self.failed_tests == 0
+
+ def stopTestRun(self):
+ for test_id in list(self._active_tests.keys()):
+ self._end_test(test_id)
+
+
+class TestByTestResult(testtools.TestResult):
+ """Call something every time a test completes."""
+
+# XXX: In testtools since lp:testtools r249. Once that's released, just
+# import that.
+
+ def __init__(self, on_test):
+ """Construct a ``TestByTestResult``.
+
+ :param on_test: A callable that take a test case, a status (one of
+ "success", "failure", "error", "skip", or "xfail"), a start time
+ (a ``datetime`` with timezone), a stop time, an iterable of tags,
+ and a details dict. Is called at the end of each test (i.e. on
+ ``stopTest``) with the accumulated values for that test.
+ """
+ super(TestByTestResult, self).__init__()
+ self._on_test = on_test
+
+ def startTest(self, test):
+ super(TestByTestResult, self).startTest(test)
+ self._start_time = self._now()
+ # There's no supported (i.e. tested) behaviour that relies on these
+ # being set, but it makes me more comfortable all the same. -- jml
+ self._status = None
+ self._details = None
+ self._stop_time = None
+
+ def stopTest(self, test):
+ self._stop_time = self._now()
+ super(TestByTestResult, self).stopTest(test)
+ self._on_test(
+ test=test,
+ status=self._status,
+ start_time=self._start_time,
+ stop_time=self._stop_time,
+ # current_tags is new in testtools 0.9.13.
+ tags=getattr(self, 'current_tags', None),
+ details=self._details)
+
+ def _err_to_details(self, test, err, details):
+ if details:
+ return details
+ return {'traceback': TracebackContent(err, test)}
+
+ def addSuccess(self, test, details=None):
+ super(TestByTestResult, self).addSuccess(test)
+ self._status = 'success'
+ self._details = details
+
+ def addFailure(self, test, err=None, details=None):
+ super(TestByTestResult, self).addFailure(test, err, details)
+ self._status = 'failure'
+ self._details = self._err_to_details(test, err, details)
+
+ def addError(self, test, err=None, details=None):
+ super(TestByTestResult, self).addError(test, err, details)
+ self._status = 'error'
+ self._details = self._err_to_details(test, err, details)
+
+ def addSkip(self, test, reason=None, details=None):
+ super(TestByTestResult, self).addSkip(test, reason, details)
+ self._status = 'skip'
+ if details is None:
+ details = {'reason': text_content(reason)}
+ elif reason:
+ # XXX: What if details already has 'reason' key?
+ details['reason'] = text_content(reason)
+ self._details = details
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ super(TestByTestResult, self).addExpectedFailure(test, err, details)
+ self._status = 'xfail'
+ self._details = self._err_to_details(test, err, details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ super(TestByTestResult, self).addUnexpectedSuccess(test, details)
+ self._status = 'success'
+ self._details = details
+
+
+class CsvResult(TestByTestResult):
+
+ def __init__(self, stream):
+ super(CsvResult, self).__init__(self._on_test)
+ self._write_row = csv.writer(stream).writerow
+
+ def _on_test(self, test, status, start_time, stop_time, tags, details):
+ self._write_row([test.id(), status, start_time, stop_time])
+
+ def startTestRun(self):
+ super(CsvResult, self).startTestRun()
+ self._write_row(['test', 'status', 'start_time', 'stop_time'])
+
+
+class CatFiles(StreamResult):
+ """Cat file attachments received to a stream."""
+
+ def __init__(self, byte_stream):
+ self.stream = subunit.make_stream_binary(byte_stream)
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ if file_name is not None:
+ self.stream.write(file_bytes)
+ self.stream.flush()
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/__init__.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/__init__.py
new file mode 100644
index 00000000000..b45d7f94569
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/__init__.py
@@ -0,0 +1,63 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import sys
+from unittest import TestLoader
+
+
+# Before the test module imports to avoid circularity.
+# For testing: different pythons have different str() implementations.
+if sys.version_info > (3, 0):
+ _remote_exception_repr = "testtools.testresult.real._StringException"
+ _remote_exception_str = "Traceback (most recent call last):\ntesttools.testresult.real._StringException"
+ _remote_exception_str_chunked = "57\r\n" + _remote_exception_str + ": boo qux\n0\r\n"
+else:
+ _remote_exception_repr = "_StringException"
+ _remote_exception_str = "Traceback (most recent call last):\n_StringException"
+ _remote_exception_str_chunked = "3D\r\n" + _remote_exception_str + ": boo qux\n0\r\n"
+
+
+from subunit.tests import (
+ test_chunked,
+ test_details,
+ test_filters,
+ test_progress_model,
+ test_run,
+ test_subunit_filter,
+ test_subunit_stats,
+ test_subunit_tags,
+ test_tap2subunit,
+ test_test_protocol,
+ test_test_protocol2,
+ test_test_results,
+ )
+
+
+def test_suite():
+ loader = TestLoader()
+ result = loader.loadTestsFromModule(test_chunked)
+ result.addTest(loader.loadTestsFromModule(test_details))
+ result.addTest(loader.loadTestsFromModule(test_filters))
+ result.addTest(loader.loadTestsFromModule(test_progress_model))
+ result.addTest(loader.loadTestsFromModule(test_test_results))
+ result.addTest(loader.loadTestsFromModule(test_test_protocol))
+ result.addTest(loader.loadTestsFromModule(test_test_protocol2))
+ result.addTest(loader.loadTestsFromModule(test_tap2subunit))
+ result.addTest(loader.loadTestsFromModule(test_subunit_filter))
+ result.addTest(loader.loadTestsFromModule(test_subunit_tags))
+ result.addTest(loader.loadTestsFromModule(test_subunit_stats))
+ result.addTest(loader.loadTestsFromModule(test_run))
+ return result
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-script.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-script.py
new file mode 100755
index 00000000000..91838f6d6fb
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-script.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+import sys
+if sys.platform == "win32":
+ import msvcrt, os
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+if len(sys.argv) == 2:
+ # subunit.tests.test_test_protocol.TestExecTestCase.test_sample_method_args
+ # uses this code path to be sure that the arguments were passed to
+ # sample-script.py
+ print("test fail")
+ print("error fail")
+ sys.exit(0)
+print("test old mcdonald")
+print("success old mcdonald")
+print("test bing crosby")
+print("failure bing crosby [")
+print("foo.c:53:ERROR invalid state")
+print("]")
+print("test an error")
+print("error an error")
+sys.exit(0)
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-two-script.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-two-script.py
new file mode 100755
index 00000000000..fc73dfc409d
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/sample-two-script.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+import sys
+print("test old mcdonald")
+print("success old mcdonald")
+print("test bing crosby")
+print("success bing crosby")
+sys.exit(0)
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_chunked.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_chunked.py
new file mode 100644
index 00000000000..5100b323892
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_chunked.py
@@ -0,0 +1,146 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+# Copyright (C) 2011 Martin Pool <mbp@sourcefrog.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import unittest
+
+from testtools.compat import _b, BytesIO
+
+import subunit.chunked
+
+
+class TestDecode(unittest.TestCase):
+
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ self.output = BytesIO()
+ self.decoder = subunit.chunked.Decoder(self.output)
+
+ def test_close_read_length_short_errors(self):
+ self.assertRaises(ValueError, self.decoder.close)
+
+ def test_close_body_short_errors(self):
+ self.assertEqual(None, self.decoder.write(_b('2\r\na')))
+ self.assertRaises(ValueError, self.decoder.close)
+
+ def test_close_body_buffered_data_errors(self):
+ self.assertEqual(None, self.decoder.write(_b('2\r')))
+ self.assertRaises(ValueError, self.decoder.close)
+
+ def test_close_after_finished_stream_safe(self):
+ self.assertEqual(None, self.decoder.write(_b('2\r\nab')))
+ self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
+ self.decoder.close()
+
+ def test_decode_nothing(self):
+ self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
+ self.assertEqual(_b(''), self.output.getvalue())
+
+ def test_decode_serialised_form(self):
+ self.assertEqual(None, self.decoder.write(_b("F\r\n")))
+ self.assertEqual(None, self.decoder.write(_b("serialised\n")))
+ self.assertEqual(_b(''), self.decoder.write(_b("form0\r\n")))
+
+ def test_decode_short(self):
+ self.assertEqual(_b(''), self.decoder.write(_b('3\r\nabc0\r\n')))
+ self.assertEqual(_b('abc'), self.output.getvalue())
+
+ def test_decode_combines_short(self):
+ self.assertEqual(_b(''), self.decoder.write(_b('6\r\nabcdef0\r\n')))
+ self.assertEqual(_b('abcdef'), self.output.getvalue())
+
+ def test_decode_excess_bytes_from_write(self):
+ self.assertEqual(_b('1234'), self.decoder.write(_b('3\r\nabc0\r\n1234')))
+ self.assertEqual(_b('abc'), self.output.getvalue())
+
+ def test_decode_write_after_finished_errors(self):
+ self.assertEqual(_b('1234'), self.decoder.write(_b('3\r\nabc0\r\n1234')))
+ self.assertRaises(ValueError, self.decoder.write, _b(''))
+
+ def test_decode_hex(self):
+ self.assertEqual(_b(''), self.decoder.write(_b('A\r\n12345678900\r\n')))
+ self.assertEqual(_b('1234567890'), self.output.getvalue())
+
+ def test_decode_long_ranges(self):
+ self.assertEqual(None, self.decoder.write(_b('10000\r\n')))
+ self.assertEqual(None, self.decoder.write(_b('1' * 65536)))
+ self.assertEqual(None, self.decoder.write(_b('10000\r\n')))
+ self.assertEqual(None, self.decoder.write(_b('2' * 65536)))
+ self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
+ self.assertEqual(_b('1' * 65536 + '2' * 65536), self.output.getvalue())
+
+ def test_decode_newline_nonstrict(self):
+ """Tolerate chunk markers with no CR character."""
+ # From <http://pad.lv/505078>
+ self.decoder = subunit.chunked.Decoder(self.output, strict=False)
+ self.assertEqual(None, self.decoder.write(_b('a\n')))
+ self.assertEqual(None, self.decoder.write(_b('abcdeabcde')))
+ self.assertEqual(_b(''), self.decoder.write(_b('0\n')))
+ self.assertEqual(_b('abcdeabcde'), self.output.getvalue())
+
+ def test_decode_strict_newline_only(self):
+ """Reject chunk markers with no CR character in strict mode."""
+ # From <http://pad.lv/505078>
+ self.assertRaises(ValueError,
+ self.decoder.write, _b('a\n'))
+
+ def test_decode_strict_multiple_crs(self):
+ self.assertRaises(ValueError,
+ self.decoder.write, _b('a\r\r\n'))
+
+ def test_decode_short_header(self):
+ self.assertRaises(ValueError,
+ self.decoder.write, _b('\n'))
+
+
+class TestEncode(unittest.TestCase):
+
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ self.output = BytesIO()
+ self.encoder = subunit.chunked.Encoder(self.output)
+
+ def test_encode_nothing(self):
+ self.encoder.close()
+ self.assertEqual(_b('0\r\n'), self.output.getvalue())
+
+ def test_encode_empty(self):
+ self.encoder.write(_b(''))
+ self.encoder.close()
+ self.assertEqual(_b('0\r\n'), self.output.getvalue())
+
+ def test_encode_short(self):
+ self.encoder.write(_b('abc'))
+ self.encoder.close()
+ self.assertEqual(_b('3\r\nabc0\r\n'), self.output.getvalue())
+
+ def test_encode_combines_short(self):
+ self.encoder.write(_b('abc'))
+ self.encoder.write(_b('def'))
+ self.encoder.close()
+ self.assertEqual(_b('6\r\nabcdef0\r\n'), self.output.getvalue())
+
+ def test_encode_over_9_is_in_hex(self):
+ self.encoder.write(_b('1234567890'))
+ self.encoder.close()
+ self.assertEqual(_b('A\r\n12345678900\r\n'), self.output.getvalue())
+
+ def test_encode_long_ranges_not_combined(self):
+ self.encoder.write(_b('1' * 65536))
+ self.encoder.write(_b('2' * 65536))
+ self.encoder.close()
+ self.assertEqual(_b('10000\r\n' + '1' * 65536 + '10000\r\n' +
+ '2' * 65536 + '0\r\n'), self.output.getvalue())
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_details.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_details.py
new file mode 100644
index 00000000000..8605c5ac951
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_details.py
@@ -0,0 +1,106 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import unittest
+
+from testtools.compat import _b, StringIO
+
+import subunit.tests
+from subunit import content, content_type, details
+
+
+class TestSimpleDetails(unittest.TestCase):
+
+ def test_lineReceived(self):
+ parser = details.SimpleDetailsParser(None)
+ parser.lineReceived(_b("foo\n"))
+ parser.lineReceived(_b("bar\n"))
+ self.assertEqual(_b("foo\nbar\n"), parser._message)
+
+ def test_lineReceived_escaped_bracket(self):
+ parser = details.SimpleDetailsParser(None)
+ parser.lineReceived(_b("foo\n"))
+ parser.lineReceived(_b(" ]are\n"))
+ parser.lineReceived(_b("bar\n"))
+ self.assertEqual(_b("foo\n]are\nbar\n"), parser._message)
+
+ def test_get_message(self):
+ parser = details.SimpleDetailsParser(None)
+ self.assertEqual(_b(""), parser.get_message())
+
+ def test_get_details(self):
+ parser = details.SimpleDetailsParser(None)
+ traceback = ""
+ expected = {}
+ expected['traceback'] = content.Content(
+ content_type.ContentType("text", "x-traceback",
+ {'charset': 'utf8'}),
+ lambda:[_b("")])
+ found = parser.get_details()
+ self.assertEqual(expected.keys(), found.keys())
+ self.assertEqual(expected['traceback'].content_type,
+ found['traceback'].content_type)
+ self.assertEqual(_b('').join(expected['traceback'].iter_bytes()),
+ _b('').join(found['traceback'].iter_bytes()))
+
+ def test_get_details_skip(self):
+ parser = details.SimpleDetailsParser(None)
+ traceback = ""
+ expected = {}
+ expected['reason'] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:[_b("")])
+ found = parser.get_details("skip")
+ self.assertEqual(expected, found)
+
+ def test_get_details_success(self):
+ parser = details.SimpleDetailsParser(None)
+ traceback = ""
+ expected = {}
+ expected['message'] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:[_b("")])
+ found = parser.get_details("success")
+ self.assertEqual(expected, found)
+
+
+class TestMultipartDetails(unittest.TestCase):
+
+ def test_get_message_is_None(self):
+ parser = details.MultipartDetailsParser(None)
+ self.assertEqual(None, parser.get_message())
+
+ def test_get_details(self):
+ parser = details.MultipartDetailsParser(None)
+ self.assertEqual({}, parser.get_details())
+
+ def test_parts(self):
+ parser = details.MultipartDetailsParser(None)
+ parser.lineReceived(_b("Content-Type: text/plain\n"))
+ parser.lineReceived(_b("something\n"))
+ parser.lineReceived(_b("F\r\n"))
+ parser.lineReceived(_b("serialised\n"))
+ parser.lineReceived(_b("form0\r\n"))
+ expected = {}
+ expected['something'] = content.Content(
+ content_type.ContentType("text", "plain"),
+ lambda:[_b("serialised\nform")])
+ found = parser.get_details()
+ self.assertEqual(expected.keys(), found.keys())
+ self.assertEqual(expected['something'].content_type,
+ found['something'].content_type)
+ self.assertEqual(_b('').join(expected['something'].iter_bytes()),
+ _b('').join(found['something'].iter_bytes()))
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_filters.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_filters.py
new file mode 100644
index 00000000000..0a5e7c74b71
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_filters.py
@@ -0,0 +1,35 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2013 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import sys
+from tempfile import NamedTemporaryFile
+
+from testtools import TestCase
+
+from subunit.filters import find_stream
+
+
+class TestFindStream(TestCase):
+
+ def test_no_argv(self):
+ self.assertEqual('foo', find_stream('foo', []))
+
+ def test_opens_file(self):
+ f = NamedTemporaryFile()
+ f.write(b'foo')
+ f.flush()
+ stream = find_stream('bar', [f.name])
+ self.assertEqual(b'foo', stream.read())
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_progress_model.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_progress_model.py
new file mode 100644
index 00000000000..2ca08888285
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_progress_model.py
@@ -0,0 +1,112 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import unittest
+
+import subunit
+from subunit.progress_model import ProgressModel
+
+
+class TestProgressModel(unittest.TestCase):
+
+ def assertProgressSummary(self, pos, total, progress):
+ """Assert that a progress model has reached a particular point."""
+ self.assertEqual(pos, progress.pos())
+ self.assertEqual(total, progress.width())
+
+ def test_new_progress_0_0(self):
+ progress = ProgressModel()
+ self.assertProgressSummary(0, 0, progress)
+
+ def test_advance_0_0(self):
+ progress = ProgressModel()
+ progress.advance()
+ self.assertProgressSummary(1, 0, progress)
+
+ def test_advance_1_0(self):
+ progress = ProgressModel()
+ progress.advance()
+ self.assertProgressSummary(1, 0, progress)
+
+ def test_set_width_absolute(self):
+ progress = ProgressModel()
+ progress.set_width(10)
+ self.assertProgressSummary(0, 10, progress)
+
+ def test_set_width_absolute_preserves_pos(self):
+ progress = ProgressModel()
+ progress.advance()
+ progress.set_width(2)
+ self.assertProgressSummary(1, 2, progress)
+
+ def test_adjust_width(self):
+ progress = ProgressModel()
+ progress.adjust_width(10)
+ self.assertProgressSummary(0, 10, progress)
+ progress.adjust_width(-10)
+ self.assertProgressSummary(0, 0, progress)
+
+ def test_adjust_width_preserves_pos(self):
+ progress = ProgressModel()
+ progress.advance()
+ progress.adjust_width(10)
+ self.assertProgressSummary(1, 10, progress)
+ progress.adjust_width(-10)
+ self.assertProgressSummary(1, 0, progress)
+
+ def test_push_preserves_progress(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ self.assertProgressSummary(1, 3, progress)
+
+ def test_advance_advances_substack(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.adjust_width(1)
+ progress.advance()
+ self.assertProgressSummary(2, 3, progress)
+
+ def test_adjust_width_adjusts_substack(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.adjust_width(2)
+ progress.advance()
+ self.assertProgressSummary(3, 6, progress)
+
+ def test_set_width_adjusts_substack(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.set_width(2)
+ progress.advance()
+ self.assertProgressSummary(3, 6, progress)
+
+ def test_pop_restores_progress(self):
+ progress = ProgressModel()
+ progress.adjust_width(3)
+ progress.advance()
+ progress.push()
+ progress.adjust_width(1)
+ progress.advance()
+ progress.pop()
+ self.assertProgressSummary(1, 3, progress)
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_run.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_run.py
new file mode 100644
index 00000000000..6ac84e15d63
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_run.py
@@ -0,0 +1,64 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2011 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+from testtools.compat import BytesIO
+import unittest
+
+from testtools import PlaceHolder, TestCase
+from testtools.testresult.doubles import StreamResult
+
+import subunit
+from subunit import run
+from subunit.run import SubunitTestRunner
+
+
+class TestSubunitTestRunner(TestCase):
+
+ def test_includes_timing_output(self):
+ io = BytesIO()
+ runner = SubunitTestRunner(stream=io)
+ test = PlaceHolder('name')
+ runner.run(test)
+ io.seek(0)
+ eventstream = StreamResult()
+ subunit.ByteStreamToStreamResult(io).run(eventstream)
+ timestamps = [event[-1] for event in eventstream._events
+ if event is not None]
+ self.assertNotEqual([], timestamps)
+
+ def test_enumerates_tests_before_run(self):
+ io = BytesIO()
+ runner = SubunitTestRunner(stream=io)
+ test1 = PlaceHolder('name1')
+ test2 = PlaceHolder('name2')
+ case = unittest.TestSuite([test1, test2])
+ runner.run(case)
+ io.seek(0)
+ eventstream = StreamResult()
+ subunit.ByteStreamToStreamResult(io).run(eventstream)
+ self.assertEqual([
+ ('status', 'name1', 'exists'),
+ ('status', 'name2', 'exists'),
+ ], [event[:3] for event in eventstream._events[:2]])
+
+ def test_list_errors_if_errors_from_list_test(self):
+ io = BytesIO()
+ runner = SubunitTestRunner(stream=io)
+ def list_test(test):
+ return [], ['failed import']
+ self.patch(run, 'list_test', list_test)
+ exc = self.assertRaises(SystemExit, runner.list, None)
+ self.assertEqual((2,), exc.args)
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_filter.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_filter.py
new file mode 100644
index 00000000000..5f34b3bc75d
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_filter.py
@@ -0,0 +1,346 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for subunit.TestResultFilter."""
+
+from datetime import datetime
+import os
+import subprocess
+import sys
+from subunit import iso8601
+import unittest
+
+from testtools import TestCase
+from testtools.compat import _b, BytesIO
+from testtools.testresult.doubles import ExtendedTestResult, StreamResult
+
+import subunit
+from subunit.test_results import make_tag_filter, TestResultFilter
+from subunit import ByteStreamToStreamResult, StreamResultToBytes
+
+
+class TestTestResultFilter(TestCase):
+ """Test for TestResultFilter, a TestResult object which filters tests."""
+
+ # While TestResultFilter works on python objects, using a subunit stream
+ # is an easy pithy way of getting a series of test objects to call into
+ # the TestResult, and as TestResultFilter is intended for use with subunit
+ # also has the benefit of detecting any interface skew issues.
+ example_subunit_stream = _b("""\
+tags: global
+test passed
+success passed
+test failed
+tags: local
+failure failed
+test error
+error error [
+error details
+]
+test skipped
+skip skipped
+test todo
+xfail todo
+""")
+
+ def run_tests(self, result_filter, input_stream=None):
+ """Run tests through the given filter.
+
+ :param result_filter: A filtering TestResult object.
+ :param input_stream: Bytes of subunit stream data. If not provided,
+ uses TestTestResultFilter.example_subunit_stream.
+ """
+ if input_stream is None:
+ input_stream = self.example_subunit_stream
+ test = subunit.ProtocolTestCase(BytesIO(input_stream))
+ test.run(result_filter)
+
+ def test_default(self):
+ """The default is to exclude success and include everything else."""
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result)
+ self.run_tests(result_filter)
+ # skips are seen as success by default python TestResult.
+ self.assertEqual(['error'],
+ [error[0].id() for error in filtered_result.errors])
+ self.assertEqual(['failed'],
+ [failure[0].id() for failure in
+ filtered_result.failures])
+ self.assertEqual(4, filtered_result.testsRun)
+
+ def test_tag_filter(self):
+ tag_filter = make_tag_filter(['global'], ['local'])
+ result = ExtendedTestResult()
+ result_filter = TestResultFilter(
+ result, filter_success=False, filter_predicate=tag_filter)
+ self.run_tests(result_filter)
+ tests_included = [
+ event[1] for event in result._events if event[0] == 'startTest']
+ tests_expected = list(map(
+ subunit.RemotedTestCase,
+ ['passed', 'error', 'skipped', 'todo']))
+ self.assertEquals(tests_expected, tests_included)
+
+ def test_tags_tracked_correctly(self):
+ tag_filter = make_tag_filter(['a'], [])
+ result = ExtendedTestResult()
+ result_filter = TestResultFilter(
+ result, filter_success=False, filter_predicate=tag_filter)
+ input_stream = _b(
+ "test: foo\n"
+ "tags: a\n"
+ "successful: foo\n"
+ "test: bar\n"
+ "successful: bar\n")
+ self.run_tests(result_filter, input_stream)
+ foo = subunit.RemotedTestCase('foo')
+ self.assertEquals(
+ [('startTest', foo),
+ ('tags', set(['a']), set()),
+ ('addSuccess', foo),
+ ('stopTest', foo),
+ ],
+ result._events)
+
+ def test_exclude_errors(self):
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result, filter_error=True)
+ self.run_tests(result_filter)
+ # skips are seen as errors by default python TestResult.
+ self.assertEqual([], filtered_result.errors)
+ self.assertEqual(['failed'],
+ [failure[0].id() for failure in
+ filtered_result.failures])
+ self.assertEqual(3, filtered_result.testsRun)
+
+ def test_fixup_expected_failures(self):
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result,
+ fixup_expected_failures=set(["failed"]))
+ self.run_tests(result_filter)
+ self.assertEqual(['failed', 'todo'],
+ [failure[0].id() for failure in filtered_result.expectedFailures])
+ self.assertEqual([], filtered_result.failures)
+ self.assertEqual(4, filtered_result.testsRun)
+
+ def test_fixup_expected_errors(self):
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result,
+ fixup_expected_failures=set(["error"]))
+ self.run_tests(result_filter)
+ self.assertEqual(['error', 'todo'],
+ [failure[0].id() for failure in filtered_result.expectedFailures])
+ self.assertEqual([], filtered_result.errors)
+ self.assertEqual(4, filtered_result.testsRun)
+
+ def test_fixup_unexpected_success(self):
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result, filter_success=False,
+ fixup_expected_failures=set(["passed"]))
+ self.run_tests(result_filter)
+ self.assertEqual(['passed'],
+ [passed.id() for passed in filtered_result.unexpectedSuccesses])
+ self.assertEqual(5, filtered_result.testsRun)
+
+ def test_exclude_failure(self):
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result, filter_failure=True)
+ self.run_tests(result_filter)
+ self.assertEqual(['error'],
+ [error[0].id() for error in filtered_result.errors])
+ self.assertEqual([],
+ [failure[0].id() for failure in
+ filtered_result.failures])
+ self.assertEqual(3, filtered_result.testsRun)
+
+ def test_exclude_skips(self):
+ filtered_result = subunit.TestResultStats(None)
+ result_filter = TestResultFilter(filtered_result, filter_skip=True)
+ self.run_tests(result_filter)
+ self.assertEqual(0, filtered_result.skipped_tests)
+ self.assertEqual(2, filtered_result.failed_tests)
+ self.assertEqual(3, filtered_result.testsRun)
+
+ def test_include_success(self):
+ """Successes can be included if requested."""
+ filtered_result = unittest.TestResult()
+ result_filter = TestResultFilter(filtered_result,
+ filter_success=False)
+ self.run_tests(result_filter)
+ self.assertEqual(['error'],
+ [error[0].id() for error in filtered_result.errors])
+ self.assertEqual(['failed'],
+ [failure[0].id() for failure in
+ filtered_result.failures])
+ self.assertEqual(5, filtered_result.testsRun)
+
+ def test_filter_predicate(self):
+ """You can filter by predicate callbacks"""
+ # 0.0.7 and earlier did not support the 'tags' parameter, so we need
+ # to test that we still support behaviour without it.
+ filtered_result = unittest.TestResult()
+ def filter_cb(test, outcome, err, details):
+ return outcome == 'success'
+ result_filter = TestResultFilter(filtered_result,
+ filter_predicate=filter_cb,
+ filter_success=False)
+ self.run_tests(result_filter)
+ # Only success should pass
+ self.assertEqual(1, filtered_result.testsRun)
+
+ def test_filter_predicate_with_tags(self):
+ """You can filter by predicate callbacks that accept tags"""
+ filtered_result = unittest.TestResult()
+ def filter_cb(test, outcome, err, details, tags):
+ return outcome == 'success'
+ result_filter = TestResultFilter(filtered_result,
+ filter_predicate=filter_cb,
+ filter_success=False)
+ self.run_tests(result_filter)
+ # Only success should pass
+ self.assertEqual(1, filtered_result.testsRun)
+
+ def test_time_ordering_preserved(self):
+ # Passing a subunit stream through TestResultFilter preserves the
+ # relative ordering of 'time' directives and any other subunit
+ # directives that are still included.
+ date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
+ date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
+ date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
+ subunit_stream = _b('\n'.join([
+ "time: %s",
+ "test: foo",
+ "time: %s",
+ "error: foo",
+ "time: %s",
+ ""]) % (date_a, date_b, date_c))
+ result = ExtendedTestResult()
+ result_filter = TestResultFilter(result)
+ self.run_tests(result_filter, subunit_stream)
+ foo = subunit.RemotedTestCase('foo')
+ self.maxDiff = None
+ self.assertEqual(
+ [('time', date_a),
+ ('time', date_b),
+ ('startTest', foo),
+ ('addError', foo, {}),
+ ('stopTest', foo),
+ ('time', date_c)], result._events)
+
+ def test_time_passes_through_filtered_tests(self):
+ # Passing a subunit stream through TestResultFilter preserves 'time'
+ # directives even if a specific test is filtered out.
+ date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
+ date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
+ date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
+ subunit_stream = _b('\n'.join([
+ "time: %s",
+ "test: foo",
+ "time: %s",
+ "success: foo",
+ "time: %s",
+ ""]) % (date_a, date_b, date_c))
+ result = ExtendedTestResult()
+ result_filter = TestResultFilter(result)
+ result_filter.startTestRun()
+ self.run_tests(result_filter, subunit_stream)
+ result_filter.stopTestRun()
+ foo = subunit.RemotedTestCase('foo')
+ self.maxDiff = None
+ self.assertEqual(
+ [('startTestRun',),
+ ('time', date_a),
+ ('time', date_c),
+ ('stopTestRun',),], result._events)
+
+ def test_skip_preserved(self):
+ subunit_stream = _b('\n'.join([
+ "test: foo",
+ "skip: foo",
+ ""]))
+ result = ExtendedTestResult()
+ result_filter = TestResultFilter(result)
+ self.run_tests(result_filter, subunit_stream)
+ foo = subunit.RemotedTestCase('foo')
+ self.assertEquals(
+ [('startTest', foo),
+ ('addSkip', foo, {}),
+ ('stopTest', foo), ], result._events)
+
+ if sys.version_info < (2, 7):
+ # These tests require Python >=2.7.
+ del test_fixup_expected_failures, test_fixup_expected_errors, test_fixup_unexpected_success
+
+
+class TestFilterCommand(TestCase):
+
+ def run_command(self, args, stream):
+ root = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
+ script_path = os.path.join(root, 'filters', 'subunit-filter')
+ command = [sys.executable, script_path] + list(args)
+ ps = subprocess.Popen(
+ command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = ps.communicate(stream)
+ if ps.returncode != 0:
+ raise RuntimeError("%s failed: %s" % (command, err))
+ return out
+
+ def test_default(self):
+ byte_stream = BytesIO()
+ stream = StreamResultToBytes(byte_stream)
+ stream.status(test_id="foo", test_status="inprogress")
+ stream.status(test_id="foo", test_status="skip")
+ output = self.run_command([], byte_stream.getvalue())
+ events = StreamResult()
+ ByteStreamToStreamResult(BytesIO(output)).run(events)
+ ids = set(event[1] for event in events._events)
+ self.assertEqual([
+ ('status', 'foo', 'inprogress'),
+ ('status', 'foo', 'skip'),
+ ], [event[:3] for event in events._events])
+
+ def test_tags(self):
+ byte_stream = BytesIO()
+ stream = StreamResultToBytes(byte_stream)
+ stream.status(
+ test_id="foo", test_status="inprogress", test_tags=set(["a"]))
+ stream.status(
+ test_id="foo", test_status="success", test_tags=set(["a"]))
+ stream.status(test_id="bar", test_status="inprogress")
+ stream.status(test_id="bar", test_status="inprogress")
+ stream.status(
+ test_id="baz", test_status="inprogress", test_tags=set(["a"]))
+ stream.status(
+ test_id="baz", test_status="success", test_tags=set(["a"]))
+ output = self.run_command(
+ ['-s', '--with-tag', 'a'], byte_stream.getvalue())
+ events = StreamResult()
+ ByteStreamToStreamResult(BytesIO(output)).run(events)
+ ids = set(event[1] for event in events._events)
+ self.assertEqual(set(['foo', 'baz']), ids)
+
+ def test_no_passthrough(self):
+ output = self.run_command(['--no-passthrough'], b'hi thar')
+ self.assertEqual(b'', output)
+
+ def test_passthrough(self):
+ output = self.run_command([], b'hi thar')
+ byte_stream = BytesIO()
+ stream = StreamResultToBytes(byte_stream)
+ stream.status(file_name="stdout", file_bytes=b'hi thar')
+ self.assertEqual(byte_stream.getvalue(), output)
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_stats.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_stats.py
new file mode 100644
index 00000000000..7c5e42dff82
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_stats.py
@@ -0,0 +1,78 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for subunit.TestResultStats."""
+
+import unittest
+
+from testtools.compat import _b, BytesIO, StringIO
+
+import subunit
+
+
+class TestTestResultStats(unittest.TestCase):
+ """Test for TestResultStats, a TestResult object that generates stats."""
+
+ def setUp(self):
+ self.output = StringIO()
+ self.result = subunit.TestResultStats(self.output)
+ self.input_stream = BytesIO()
+ self.test = subunit.ProtocolTestCase(self.input_stream)
+
+ def test_stats_empty(self):
+ self.test.run(self.result)
+ self.assertEqual(0, self.result.total_tests)
+ self.assertEqual(0, self.result.passed_tests)
+ self.assertEqual(0, self.result.failed_tests)
+ self.assertEqual(set(), self.result.seen_tags)
+
+ def setUpUsedStream(self):
+ self.input_stream.write(_b("""tags: global
+test passed
+success passed
+test failed
+tags: local
+failure failed
+test error
+error error
+test skipped
+skip skipped
+test todo
+xfail todo
+"""))
+ self.input_stream.seek(0)
+ self.test.run(self.result)
+
+ def test_stats_smoke_everything(self):
+ # Statistics are calculated usefully.
+ self.setUpUsedStream()
+ self.assertEqual(5, self.result.total_tests)
+ self.assertEqual(2, self.result.passed_tests)
+ self.assertEqual(2, self.result.failed_tests)
+ self.assertEqual(1, self.result.skipped_tests)
+ self.assertEqual(set(["global", "local"]), self.result.seen_tags)
+
+ def test_stat_formatting(self):
+ expected = ("""
+Total tests: 5
+Passed tests: 2
+Failed tests: 2
+Skipped tests: 1
+Seen tags: global, local
+""")[1:]
+ self.setUpUsedStream()
+ self.result.formatStats()
+ self.assertEqual(expected, self.output.getvalue())
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_tags.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_tags.py
new file mode 100644
index 00000000000..a16edc11591
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_subunit_tags.py
@@ -0,0 +1,85 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for subunit.tag_stream."""
+
+from io import BytesIO
+
+import testtools
+from testtools.matchers import Contains
+
+import subunit
+import subunit.test_results
+
+
+class TestSubUnitTags(testtools.TestCase):
+
+ def setUp(self):
+ super(TestSubUnitTags, self).setUp()
+ self.original = BytesIO()
+ self.filtered = BytesIO()
+
+ def test_add_tag(self):
+ # Literal values to avoid set sort-order dependencies. Python code show
+ # derivation.
+ # reference = BytesIO()
+ # stream = subunit.StreamResultToBytes(reference)
+ # stream.status(
+ # test_id='test', test_status='inprogress', test_tags=set(['quux', 'foo']))
+ # stream.status(
+ # test_id='test', test_status='success', test_tags=set(['bar', 'quux', 'foo']))
+ reference = [
+ b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+ b'\x83\x1b\x04test\x03\x03bar\x04quux\x03fooqn\xab)',
+ b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+ b'\x83\x1b\x04test\x03\x04quux\x03foo\x03bar\xaf\xbd\x9d\xd6',
+ b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+ b'\x83\x1b\x04test\x03\x04quux\x03bar\x03foo\x03\x04b\r',
+ b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+ b'\x83\x1b\x04test\x03\x03bar\x03foo\x04quux\xd2\x18\x1bC',
+ b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
+ b'\x83\x1b\x04test\x03\x03foo\x04quux\x03bar\x08\xc2X\x83',
+ b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
+ b'\x83\x1b\x04test\x03\x03bar\x03foo\x04quux\xd2\x18\x1bC',
+ b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
+ b'\x83\x1b\x04test\x03\x03foo\x03bar\x04quux:\x05e\x80',
+ ]
+ stream = subunit.StreamResultToBytes(self.original)
+ stream.status(
+ test_id='test', test_status='inprogress', test_tags=set(['foo']))
+ stream.status(
+ test_id='test', test_status='success', test_tags=set(['foo', 'bar']))
+ self.original.seek(0)
+ self.assertEqual(
+ 0, subunit.tag_stream(self.original, self.filtered, ["quux"]))
+ self.assertThat(reference, Contains(self.filtered.getvalue()))
+
+ def test_remove_tag(self):
+ reference = BytesIO()
+ stream = subunit.StreamResultToBytes(reference)
+ stream.status(
+ test_id='test', test_status='inprogress', test_tags=set(['foo']))
+ stream.status(
+ test_id='test', test_status='success', test_tags=set(['foo']))
+ stream = subunit.StreamResultToBytes(self.original)
+ stream.status(
+ test_id='test', test_status='inprogress', test_tags=set(['foo']))
+ stream.status(
+ test_id='test', test_status='success', test_tags=set(['foo', 'bar']))
+ self.original.seek(0)
+ self.assertEqual(
+ 0, subunit.tag_stream(self.original, self.filtered, ["-bar"]))
+ self.assertEqual(reference.getvalue(), self.filtered.getvalue())
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_tap2subunit.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_tap2subunit.py
new file mode 100644
index 00000000000..5b7c07a2eb3
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_tap2subunit.py
@@ -0,0 +1,387 @@
+#
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Tests for TAP2SubUnit."""
+
+from io import BytesIO, StringIO
+import unittest
+
+from testtools import TestCase
+from testtools.compat import _u
+from testtools.testresult.doubles import StreamResult
+
+import subunit
+
+UTF8_TEXT = 'text/plain; charset=UTF8'
+
+
+class TestTAP2SubUnit(TestCase):
+ """Tests for TAP2SubUnit.
+
+ These tests test TAP string data in, and subunit string data out.
+ This is ok because the subunit protocol is intended to be stable,
+ but it might be easier/pithier to write tests against TAP string in,
+ parsed subunit objects out (by hooking the subunit stream to a subunit
+ protocol server.
+ """
+
+ def setUp(self):
+ super(TestTAP2SubUnit, self).setUp()
+ self.tap = StringIO()
+ self.subunit = BytesIO()
+
+ def test_skip_entire_file(self):
+ # A file
+ # 1..- # Skipped: comment
+ # results in a single skipped test.
+ self.tap.write(_u("1..0 # Skipped: entire file skipped\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'file skip', 'skip', None, True,
+ 'tap comment', b'Skipped: entire file skipped', True, None, None,
+ None)])
+
+ def test_ok_test_pass(self):
+ # A file
+ # ok
+ # results in a passed test with name 'test 1' (a synthetic name as tap
+ # does not require named fixtures - it is the first test in the tap
+ # stream).
+ self.tap.write(_u("ok\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'success', None, False, None,
+ None, True, None, None, None)])
+
+ def test_ok_test_number_pass(self):
+ # A file
+ # ok 1
+ # results in a passed test with name 'test 1'
+ self.tap.write(_u("ok 1\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'success', None, False, None,
+ None, True, None, None, None)])
+
+ def test_ok_test_number_description_pass(self):
+ # A file
+ # ok 1 - There is a description
+ # results in a passed test with name 'test 1 - There is a description'
+ self.tap.write(_u("ok 1 - There is a description\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1 - There is a description',
+ 'success', None, False, None, None, True, None, None, None)])
+
+ def test_ok_test_description_pass(self):
+ # A file
+ # ok There is a description
+ # results in a passed test with name 'test 1 There is a description'
+ self.tap.write(_u("ok There is a description\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1 There is a description',
+ 'success', None, False, None, None, True, None, None, None)])
+
+ def test_ok_SKIP_skip(self):
+ # A file
+ # ok # SKIP
+ # results in a skkip test with name 'test 1'
+ self.tap.write(_u("ok # SKIP\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'skip', None, False, None,
+ None, True, None, None, None)])
+
+ def test_ok_skip_number_comment_lowercase(self):
+ self.tap.write(_u("ok 1 # skip no samba environment available, skipping compilation\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'skip', None, False, 'tap comment',
+ b'no samba environment available, skipping compilation', True,
+ 'text/plain; charset=UTF8', None, None)])
+
+ def test_ok_number_description_SKIP_skip_comment(self):
+ # A file
+ # ok 1 foo # SKIP Not done yet
+ # results in a skip test with name 'test 1 foo' and a log of
+ # Not done yet
+ self.tap.write(_u("ok 1 foo # SKIP Not done yet\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1 foo', 'skip', None, False,
+ 'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
+ None, None)])
+
+ def test_ok_SKIP_skip_comment(self):
+ # A file
+ # ok # SKIP Not done yet
+ # results in a skip test with name 'test 1' and a log of Not done yet
+ self.tap.write(_u("ok # SKIP Not done yet\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'skip', None, False,
+ 'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
+ None, None)])
+
+ def test_ok_TODO_xfail(self):
+ # A file
+ # ok # TODO
+ # results in a xfail test with name 'test 1'
+ self.tap.write(_u("ok # TODO\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'xfail', None, False, None,
+ None, True, None, None, None)])
+
+ def test_ok_TODO_xfail_comment(self):
+ # A file
+ # ok # TODO Not done yet
+ # results in a xfail test with name 'test 1' and a log of Not done yet
+ self.tap.write(_u("ok # TODO Not done yet\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([('status', 'test 1', 'xfail', None, False,
+ 'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
+ None, None)])
+
+ def test_bail_out_errors(self):
+ # A file with line in it
+ # Bail out! COMMENT
+ # is treated as an error
+ self.tap.write(_u("ok 1 foo\n"))
+ self.tap.write(_u("Bail out! Lifejacket engaged\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 foo', 'success', None, False, None, None, True,
+ None, None, None),
+ ('status', 'Bail out! Lifejacket engaged', 'fail', None, False,
+ None, None, True, None, None, None)])
+
+ def test_missing_test_at_end_with_plan_adds_error(self):
+ # A file
+ # 1..3
+ # ok first test
+ # not ok third test
+ # results in three tests, with the third being created
+ self.tap.write(_u('1..3\n'))
+ self.tap.write(_u('ok first test\n'))
+ self.tap.write(_u('not ok second test\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 first test', 'success', None, False, None,
+ None, True, None, None, None),
+ ('status', 'test 2 second test', 'fail', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 3', 'fail', None, False, 'tap meta',
+ b'test missing from TAP output', True, 'text/plain; charset=UTF8',
+ None, None)])
+
+ def test_missing_test_with_plan_adds_error(self):
+ # A file
+ # 1..3
+ # ok first test
+ # not ok 3 third test
+ # results in three tests, with the second being created
+ self.tap.write(_u('1..3\n'))
+ self.tap.write(_u('ok first test\n'))
+ self.tap.write(_u('not ok 3 third test\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 first test', 'success', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 2', 'fail', None, False, 'tap meta',
+ b'test missing from TAP output', True, 'text/plain; charset=UTF8',
+ None, None),
+ ('status', 'test 3 third test', 'fail', None, False, None, None,
+ True, None, None, None)])
+
+ def test_missing_test_no_plan_adds_error(self):
+ # A file
+ # ok first test
+ # not ok 3 third test
+ # results in three tests, with the second being created
+ self.tap.write(_u('ok first test\n'))
+ self.tap.write(_u('not ok 3 third test\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 first test', 'success', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 2', 'fail', None, False, 'tap meta',
+ b'test missing from TAP output', True, 'text/plain; charset=UTF8',
+ None, None),
+ ('status', 'test 3 third test', 'fail', None, False, None, None,
+ True, None, None, None)])
+
+ def test_four_tests_in_a_row_trailing_plan(self):
+ # A file
+ # ok 1 - first test in a script with no plan at all
+ # not ok 2 - second
+ # ok 3 - third
+ # not ok 4 - fourth
+ # 1..4
+ # results in four tests numbered and named
+ self.tap.write(_u('ok 1 - first test in a script with trailing plan\n'))
+ self.tap.write(_u('not ok 2 - second\n'))
+ self.tap.write(_u('ok 3 - third\n'))
+ self.tap.write(_u('not ok 4 - fourth\n'))
+ self.tap.write(_u('1..4\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 - first test in a script with trailing plan',
+ 'success', None, False, None, None, True, None, None, None),
+ ('status', 'test 2 - second', 'fail', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 3 - third', 'success', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 4 - fourth', 'fail', None, False, None, None,
+ True, None, None, None)])
+
+ def test_four_tests_in_a_row_with_plan(self):
+ # A file
+ # 1..4
+ # ok 1 - first test in a script with no plan at all
+ # not ok 2 - second
+ # ok 3 - third
+ # not ok 4 - fourth
+ # results in four tests numbered and named
+ self.tap.write(_u('1..4\n'))
+ self.tap.write(_u('ok 1 - first test in a script with a plan\n'))
+ self.tap.write(_u('not ok 2 - second\n'))
+ self.tap.write(_u('ok 3 - third\n'))
+ self.tap.write(_u('not ok 4 - fourth\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 - first test in a script with a plan',
+ 'success', None, False, None, None, True, None, None, None),
+ ('status', 'test 2 - second', 'fail', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 3 - third', 'success', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 4 - fourth', 'fail', None, False, None, None,
+ True, None, None, None)])
+
+ def test_four_tests_in_a_row_no_plan(self):
+ # A file
+ # ok 1 - first test in a script with no plan at all
+ # not ok 2 - second
+ # ok 3 - third
+ # not ok 4 - fourth
+ # results in four tests numbered and named
+ self.tap.write(_u('ok 1 - first test in a script with no plan at all\n'))
+ self.tap.write(_u('not ok 2 - second\n'))
+ self.tap.write(_u('ok 3 - third\n'))
+ self.tap.write(_u('not ok 4 - fourth\n'))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1 - first test in a script with no plan at all',
+ 'success', None, False, None, None, True, None, None, None),
+ ('status', 'test 2 - second', 'fail', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 3 - third', 'success', None, False, None, None,
+ True, None, None, None),
+ ('status', 'test 4 - fourth', 'fail', None, False, None, None,
+ True, None, None, None)])
+
+ def test_todo_and_skip(self):
+ # A file
+ # not ok 1 - a fail but # TODO but is TODO
+ # not ok 2 - another fail # SKIP instead
+ # results in two tests, numbered and commented.
+ self.tap.write(_u("not ok 1 - a fail but # TODO but is TODO\n"))
+ self.tap.write(_u("not ok 2 - another fail # SKIP instead\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.subunit.seek(0)
+ events = StreamResult()
+ subunit.ByteStreamToStreamResult(self.subunit).run(events)
+ self.check_events([
+ ('status', 'test 1 - a fail but', 'xfail', None, False,
+ 'tap comment', b'but is TODO', True, 'text/plain; charset=UTF8',
+ None, None),
+ ('status', 'test 2 - another fail', 'skip', None, False,
+ 'tap comment', b'instead', True, 'text/plain; charset=UTF8',
+ None, None)])
+
+ def test_leading_comments_add_to_next_test_log(self):
+ # A file
+ # # comment
+ # ok
+ # ok
+ # results in a single test with the comment included
+ # in the first test and not the second.
+ self.tap.write(_u("# comment\n"))
+ self.tap.write(_u("ok\n"))
+ self.tap.write(_u("ok\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1', 'success', None, False, 'tap comment',
+ b'# comment', True, 'text/plain; charset=UTF8', None, None),
+ ('status', 'test 2', 'success', None, False, None, None, True,
+ None, None, None)])
+
+ def test_trailing_comments_are_included_in_last_test_log(self):
+ # A file
+ # ok foo
+ # ok foo
+ # # comment
+ # results in a two tests, with the second having the comment
+ # attached to its log.
+ self.tap.write(_u("ok\n"))
+ self.tap.write(_u("ok\n"))
+ self.tap.write(_u("# comment\n"))
+ self.tap.seek(0)
+ result = subunit.TAP2SubUnit(self.tap, self.subunit)
+ self.assertEqual(0, result)
+ self.check_events([
+ ('status', 'test 1', 'success', None, False, None, None, True,
+ None, None, None),
+ ('status', 'test 2', 'success', None, False, 'tap comment',
+ b'# comment', True, 'text/plain; charset=UTF8', None, None)])
+
+ def check_events(self, events):
+ self.subunit.seek(0)
+ eventstream = StreamResult()
+ subunit.ByteStreamToStreamResult(self.subunit).run(eventstream)
+ self.assertEqual(events, eventstream._events)
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol.py
new file mode 100644
index 00000000000..c6008f42eb2
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol.py
@@ -0,0 +1,1362 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import datetime
+import unittest
+import os
+
+from testtools import PlaceHolder, skipIf, TestCase, TestResult
+from testtools.compat import _b, _u, BytesIO
+from testtools.content import Content, TracebackContent, text_content
+from testtools.content_type import ContentType
+try:
+ from testtools.testresult.doubles import (
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ )
+except ImportError:
+ from testtools.tests.helpers import (
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ )
+from testtools.matchers import Contains
+
+import subunit
+from subunit.tests import (
+ _remote_exception_repr,
+ _remote_exception_str,
+ _remote_exception_str_chunked,
+ )
+import subunit.iso8601 as iso8601
+
+
+def details_to_str(details):
+ return TestResult()._err_details_to_string(None, details=details)
+
+
+class TestTestImports(unittest.TestCase):
+
+ def test_imports(self):
+ from subunit import DiscardStream
+ from subunit import TestProtocolServer
+ from subunit import RemotedTestCase
+ from subunit import RemoteError
+ from subunit import ExecTestCase
+ from subunit import IsolatedTestCase
+ from subunit import TestProtocolClient
+ from subunit import ProtocolTestCase
+
+
+class TestDiscardStream(unittest.TestCase):
+
+ def test_write(self):
+ subunit.DiscardStream().write("content")
+
+
+class TestProtocolServerForward(unittest.TestCase):
+
+ def test_story(self):
+ client = unittest.TestResult()
+ out = BytesIO()
+ protocol = subunit.TestProtocolServer(client, forward_stream=out)
+ pipe = BytesIO(_b("test old mcdonald\n"
+ "success old mcdonald\n"))
+ protocol.readFrom(pipe)
+ self.assertEqual(client.testsRun, 1)
+ self.assertEqual(pipe.getvalue(), out.getvalue())
+
+ def test_not_command(self):
+ client = unittest.TestResult()
+ out = BytesIO()
+ protocol = subunit.TestProtocolServer(client,
+ stream=subunit.DiscardStream(), forward_stream=out)
+ pipe = BytesIO(_b("success old mcdonald\n"))
+ protocol.readFrom(pipe)
+ self.assertEqual(client.testsRun, 0)
+ self.assertEqual(_b(""), out.getvalue())
+
+
+class TestTestProtocolServerPipe(unittest.TestCase):
+
+ def test_story(self):
+ client = unittest.TestResult()
+ protocol = subunit.TestProtocolServer(client)
+ traceback = "foo.c:53:ERROR invalid state\n"
+ pipe = BytesIO(_b("test old mcdonald\n"
+ "success old mcdonald\n"
+ "test bing crosby\n"
+ "failure bing crosby [\n"
+ + traceback +
+ "]\n"
+ "test an error\n"
+ "error an error\n"))
+ protocol.readFrom(pipe)
+ bing = subunit.RemotedTestCase("bing crosby")
+ an_error = subunit.RemotedTestCase("an error")
+ self.assertEqual(client.errors,
+ [(an_error, _remote_exception_repr + '\n')])
+ self.assertEqual(
+ client.failures,
+ [(bing, _remote_exception_repr + ": "
+ + details_to_str({'traceback': text_content(traceback)}) + "\n")])
+ self.assertEqual(client.testsRun, 3)
+
+ def test_non_test_characters_forwarded_immediately(self):
+ pass
+
+
+class TestTestProtocolServerStartTest(unittest.TestCase):
+
+ def setUp(self):
+ self.client = Python26TestResult()
+ self.stream = BytesIO()
+ self.protocol = subunit.TestProtocolServer(self.client, self.stream)
+
+ def test_start_test(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+ def test_start_testing(self):
+ self.protocol.lineReceived(_b("testing old mcdonald\n"))
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+ def test_start_test_colon(self):
+ self.protocol.lineReceived(_b("test: old mcdonald\n"))
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+ def test_indented_test_colon_ignored(self):
+ ignored_line = _b(" test: old mcdonald\n")
+ self.protocol.lineReceived(ignored_line)
+ self.assertEqual([], self.client._events)
+ self.assertEqual(self.stream.getvalue(), ignored_line)
+
+ def test_start_testing_colon(self):
+ self.protocol.lineReceived(_b("testing: old mcdonald\n"))
+ self.assertEqual(self.client._events,
+ [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+
+class TestTestProtocolServerPassThrough(unittest.TestCase):
+
+ def setUp(self):
+ self.stdout = BytesIO()
+ self.test = subunit.RemotedTestCase("old mcdonald")
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client, self.stdout)
+
+ def keywords_before_test(self):
+ self.protocol.lineReceived(_b("failure a\n"))
+ self.protocol.lineReceived(_b("failure: a\n"))
+ self.protocol.lineReceived(_b("error a\n"))
+ self.protocol.lineReceived(_b("error: a\n"))
+ self.protocol.lineReceived(_b("success a\n"))
+ self.protocol.lineReceived(_b("success: a\n"))
+ self.protocol.lineReceived(_b("successful a\n"))
+ self.protocol.lineReceived(_b("successful: a\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.assertEqual(self.stdout.getvalue(), _b("failure a\n"
+ "failure: a\n"
+ "error a\n"
+ "error: a\n"
+ "success a\n"
+ "success: a\n"
+ "successful a\n"
+ "successful: a\n"
+ "]\n"))
+
+ def test_keywords_before_test(self):
+ self.keywords_before_test()
+ self.assertEqual(self.client._events, [])
+
+ def test_keywords_after_error(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("error old mcdonald\n"))
+ self.keywords_before_test()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, {}),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_keywords_after_failure(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("failure old mcdonald\n"))
+ self.keywords_before_test()
+ self.assertEqual(self.client._events, [
+ ('startTest', self.test),
+ ('addFailure', self.test, {}),
+ ('stopTest', self.test),
+ ])
+
+ def test_keywords_after_success(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("success old mcdonald\n"))
+ self.keywords_before_test()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_keywords_after_test(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("failure a\n"))
+ self.protocol.lineReceived(_b("failure: a\n"))
+ self.protocol.lineReceived(_b("error a\n"))
+ self.protocol.lineReceived(_b("error: a\n"))
+ self.protocol.lineReceived(_b("success a\n"))
+ self.protocol.lineReceived(_b("success: a\n"))
+ self.protocol.lineReceived(_b("successful a\n"))
+ self.protocol.lineReceived(_b("successful: a\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.protocol.lineReceived(_b("failure old mcdonald\n"))
+ self.assertEqual(self.stdout.getvalue(), _b("test old mcdonald\n"
+ "failure a\n"
+ "failure: a\n"
+ "error a\n"
+ "error: a\n"
+ "success a\n"
+ "success: a\n"
+ "successful a\n"
+ "successful: a\n"
+ "]\n"))
+ self.assertEqual(self.client._events, [
+ ('startTest', self.test),
+ ('addFailure', self.test, {}),
+ ('stopTest', self.test),
+ ])
+
+ def test_keywords_during_failure(self):
+ # A smoke test to make sure that the details parsers have control
+ # appropriately.
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("failure: old mcdonald [\n"))
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("failure a\n"))
+ self.protocol.lineReceived(_b("failure: a\n"))
+ self.protocol.lineReceived(_b("error a\n"))
+ self.protocol.lineReceived(_b("error: a\n"))
+ self.protocol.lineReceived(_b("success a\n"))
+ self.protocol.lineReceived(_b("success: a\n"))
+ self.protocol.lineReceived(_b("successful a\n"))
+ self.protocol.lineReceived(_b("successful: a\n"))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.assertEqual(self.stdout.getvalue(), _b(""))
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}),
+ lambda:[_b(
+ "test old mcdonald\n"
+ "failure a\n"
+ "failure: a\n"
+ "error a\n"
+ "error: a\n"
+ "success a\n"
+ "success: a\n"
+ "successful a\n"
+ "successful: a\n"
+ "]\n")])
+ self.assertEqual(self.client._events, [
+ ('startTest', self.test),
+ ('addFailure', self.test, details),
+ ('stopTest', self.test),
+ ])
+
+ def test_stdout_passthrough(self):
+ """Lines received which cannot be interpreted as any protocol action
+ should be passed through to sys.stdout.
+ """
+ bytes = _b("randombytes\n")
+ self.protocol.lineReceived(bytes)
+ self.assertEqual(self.stdout.getvalue(), bytes)
+
+
+class TestTestProtocolServerLostConnection(unittest.TestCase):
+
+ def setUp(self):
+ self.client = Python26TestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.test = subunit.RemotedTestCase("old mcdonald")
+
+ def test_lost_connection_no_input(self):
+ self.protocol.lostConnection()
+ self.assertEqual([], self.client._events)
+
+ def test_lost_connection_after_start(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lostConnection()
+ failure = subunit.RemoteError(
+ _u("lost connection during test 'old mcdonald'"))
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, failure),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_lost_connected_after_error(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("error old mcdonald\n"))
+ self.protocol.lostConnection()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, subunit.RemoteError(_u(""))),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def do_connection_lost(self, outcome, opening):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("%s old mcdonald %s" % (outcome, opening)))
+ self.protocol.lostConnection()
+ failure = subunit.RemoteError(
+ _u("lost connection during %s report of test 'old mcdonald'") %
+ outcome)
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, failure),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_lost_connection_during_error(self):
+ self.do_connection_lost("error", "[\n")
+
+ def test_lost_connection_during_error_details(self):
+ self.do_connection_lost("error", "[ multipart\n")
+
+ def test_lost_connected_after_failure(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("failure old mcdonald\n"))
+ self.protocol.lostConnection()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addFailure', self.test, subunit.RemoteError(_u(""))),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_lost_connection_during_failure(self):
+ self.do_connection_lost("failure", "[\n")
+
+ def test_lost_connection_during_failure_details(self):
+ self.do_connection_lost("failure", "[ multipart\n")
+
+ def test_lost_connection_after_success(self):
+ self.protocol.lineReceived(_b("test old mcdonald\n"))
+ self.protocol.lineReceived(_b("success old mcdonald\n"))
+ self.protocol.lostConnection()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_lost_connection_during_success(self):
+ self.do_connection_lost("success", "[\n")
+
+ def test_lost_connection_during_success_details(self):
+ self.do_connection_lost("success", "[ multipart\n")
+
+ def test_lost_connection_during_skip(self):
+ self.do_connection_lost("skip", "[\n")
+
+ def test_lost_connection_during_skip_details(self):
+ self.do_connection_lost("skip", "[ multipart\n")
+
+ def test_lost_connection_during_xfail(self):
+ self.do_connection_lost("xfail", "[\n")
+
+ def test_lost_connection_during_xfail_details(self):
+ self.do_connection_lost("xfail", "[ multipart\n")
+
+ def test_lost_connection_during_uxsuccess(self):
+ self.do_connection_lost("uxsuccess", "[\n")
+
+ def test_lost_connection_during_uxsuccess_details(self):
+ self.do_connection_lost("uxsuccess", "[ multipart\n")
+
+
+class TestInTestMultipart(unittest.TestCase):
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = subunit.RemotedTestCase(_u("mcdonalds farm"))
+
+ def test__outcome_sets_details_parser(self):
+ self.protocol._reading_success_details.details_parser = None
+ self.protocol._state._outcome(0, _b("mcdonalds farm [ multipart\n"),
+ None, self.protocol._reading_success_details)
+ parser = self.protocol._reading_success_details.details_parser
+ self.assertNotEqual(None, parser)
+ self.assertTrue(isinstance(parser,
+ subunit.details.MultipartDetailsParser))
+
+
+class TestTestProtocolServerAddError(unittest.TestCase):
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = subunit.RemotedTestCase("mcdonalds farm")
+
+ def simple_error_keyword(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ details = {}
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_simple_error(self):
+ self.simple_error_keyword("error")
+
+ def test_simple_error_colon(self):
+ self.simple_error_keyword("error:")
+
+ def test_error_empty_message(self):
+ self.protocol.lineReceived(_b("error mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[_b("")])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def error_quoted_bracket(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[_b("]\n")])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addError', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_error_quoted_bracket(self):
+ self.error_quoted_bracket("error")
+
+ def test_error_colon_quoted_bracket(self):
+ self.error_quoted_bracket("error:")
+
+
+class TestTestProtocolServerAddFailure(unittest.TestCase):
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = subunit.RemotedTestCase("mcdonalds farm")
+
+ def assertFailure(self, details):
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addFailure', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def simple_failure_keyword(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ details = {}
+ self.assertFailure(details)
+
+ def test_simple_failure(self):
+ self.simple_failure_keyword("failure")
+
+ def test_simple_failure_colon(self):
+ self.simple_failure_keyword("failure:")
+
+ def test_failure_empty_message(self):
+ self.protocol.lineReceived(_b("failure mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[_b("")])
+ self.assertFailure(details)
+
+ def failure_quoted_bracket(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[_b("]\n")])
+ self.assertFailure(details)
+
+ def test_failure_quoted_bracket(self):
+ self.failure_quoted_bracket("failure")
+
+ def test_failure_colon_quoted_bracket(self):
+ self.failure_quoted_bracket("failure:")
+
+
+class TestTestProtocolServerAddxFail(unittest.TestCase):
+ """Tests for the xfail keyword.
+
+ In Python this can thunk through to Success due to stdlib limitations (see
+ README).
+ """
+
+ def capture_expected_failure(self, test, err):
+ self._events.append((test, err))
+
+ def setup_python26(self):
+ """Setup a test object ready to be xfailed and thunk to success."""
+ self.client = Python26TestResult()
+ self.setup_protocol()
+
+ def setup_python27(self):
+ """Setup a test object ready to be xfailed."""
+ self.client = Python27TestResult()
+ self.setup_protocol()
+
+ def setup_python_ex(self):
+ """Setup a test object ready to be xfailed with details."""
+ self.client = ExtendedTestResult()
+ self.setup_protocol()
+
+ def setup_protocol(self):
+ """Setup the protocol based on self.client."""
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = self.client._events[-1][-1]
+
+ def simple_xfail_keyword(self, keyword, as_success):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ self.check_success_or_xfail(as_success)
+
+ def check_success_or_xfail(self, as_success, error_message=None):
+ if as_success:
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+ else:
+ details = {}
+ if error_message is not None:
+ details['traceback'] = Content(
+ ContentType("text", "x-traceback", {'charset': 'utf8'}),
+ lambda:[_b(error_message)])
+ if isinstance(self.client, ExtendedTestResult):
+ value = details
+ else:
+ if error_message is not None:
+ value = subunit.RemoteError(details_to_str(details))
+ else:
+ value = subunit.RemoteError()
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addExpectedFailure', self.test, value),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_simple_xfail(self):
+ self.setup_python26()
+ self.simple_xfail_keyword("xfail", True)
+ self.setup_python27()
+ self.simple_xfail_keyword("xfail", False)
+ self.setup_python_ex()
+ self.simple_xfail_keyword("xfail", False)
+
+ def test_simple_xfail_colon(self):
+ self.setup_python26()
+ self.simple_xfail_keyword("xfail:", True)
+ self.setup_python27()
+ self.simple_xfail_keyword("xfail:", False)
+ self.setup_python_ex()
+ self.simple_xfail_keyword("xfail:", False)
+
+ def test_xfail_empty_message(self):
+ self.setup_python26()
+ self.empty_message(True)
+ self.setup_python27()
+ self.empty_message(False)
+ self.setup_python_ex()
+ self.empty_message(False, error_message="")
+
+ def empty_message(self, as_success, error_message="\n"):
+ self.protocol.lineReceived(_b("xfail mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.check_success_or_xfail(as_success, error_message)
+
+ def xfail_quoted_bracket(self, keyword, as_success):
+ # This tests it is accepted, but cannot test it is used today, because
+ # of not having a way to expose it in Python so far.
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.check_success_or_xfail(as_success, "]\n")
+
+ def test_xfail_quoted_bracket(self):
+ self.setup_python26()
+ self.xfail_quoted_bracket("xfail", True)
+ self.setup_python27()
+ self.xfail_quoted_bracket("xfail", False)
+ self.setup_python_ex()
+ self.xfail_quoted_bracket("xfail", False)
+
+ def test_xfail_colon_quoted_bracket(self):
+ self.setup_python26()
+ self.xfail_quoted_bracket("xfail:", True)
+ self.setup_python27()
+ self.xfail_quoted_bracket("xfail:", False)
+ self.setup_python_ex()
+ self.xfail_quoted_bracket("xfail:", False)
+
+
+class TestTestProtocolServerAddunexpectedSuccess(TestCase):
+ """Tests for the uxsuccess keyword."""
+
+ def capture_expected_failure(self, test, err):
+ self._events.append((test, err))
+
+ def setup_python26(self):
+ """Setup a test object ready to be xfailed and thunk to success."""
+ self.client = Python26TestResult()
+ self.setup_protocol()
+
+ def setup_python27(self):
+ """Setup a test object ready to be xfailed."""
+ self.client = Python27TestResult()
+ self.setup_protocol()
+
+ def setup_python_ex(self):
+ """Setup a test object ready to be xfailed with details."""
+ self.client = ExtendedTestResult()
+ self.setup_protocol()
+
+ def setup_protocol(self):
+ """Setup the protocol based on self.client."""
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = self.client._events[-1][-1]
+
+ def simple_uxsuccess_keyword(self, keyword, as_fail):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ self.check_fail_or_uxsuccess(as_fail)
+
+ def check_fail_or_uxsuccess(self, as_fail, error_message=None):
+ details = {}
+ if error_message is not None:
+ details['traceback'] = Content(
+ ContentType("text", "x-traceback", {'charset': 'utf8'}),
+ lambda:[_b(error_message)])
+ if isinstance(self.client, ExtendedTestResult):
+ value = details
+ else:
+ value = None
+ if as_fail:
+ self.client._events[1] = self.client._events[1][:2]
+ # The value is generated within the extended to original decorator:
+ # todo use the testtools matcher to check on this.
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addFailure', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+ elif value:
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addUnexpectedSuccess', self.test, value),
+ ('stopTest', self.test),
+ ], self.client._events)
+ else:
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addUnexpectedSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_simple_uxsuccess(self):
+ self.setup_python26()
+ self.simple_uxsuccess_keyword("uxsuccess", True)
+ self.setup_python27()
+ self.simple_uxsuccess_keyword("uxsuccess", False)
+ self.setup_python_ex()
+ self.simple_uxsuccess_keyword("uxsuccess", False)
+
+ def test_simple_uxsuccess_colon(self):
+ self.setup_python26()
+ self.simple_uxsuccess_keyword("uxsuccess:", True)
+ self.setup_python27()
+ self.simple_uxsuccess_keyword("uxsuccess:", False)
+ self.setup_python_ex()
+ self.simple_uxsuccess_keyword("uxsuccess:", False)
+
+ def test_uxsuccess_empty_message(self):
+ self.setup_python26()
+ self.empty_message(True)
+ self.setup_python27()
+ self.empty_message(False)
+ self.setup_python_ex()
+ self.empty_message(False, error_message="")
+
+ def empty_message(self, as_fail, error_message="\n"):
+ self.protocol.lineReceived(_b("uxsuccess mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.check_fail_or_uxsuccess(as_fail, error_message)
+
+ def uxsuccess_quoted_bracket(self, keyword, as_fail):
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.check_fail_or_uxsuccess(as_fail, "]\n")
+
+ def test_uxsuccess_quoted_bracket(self):
+ self.setup_python26()
+ self.uxsuccess_quoted_bracket("uxsuccess", True)
+ self.setup_python27()
+ self.uxsuccess_quoted_bracket("uxsuccess", False)
+ self.setup_python_ex()
+ self.uxsuccess_quoted_bracket("uxsuccess", False)
+
+ def test_uxsuccess_colon_quoted_bracket(self):
+ self.setup_python26()
+ self.uxsuccess_quoted_bracket("uxsuccess:", True)
+ self.setup_python27()
+ self.uxsuccess_quoted_bracket("uxsuccess:", False)
+ self.setup_python_ex()
+ self.uxsuccess_quoted_bracket("uxsuccess:", False)
+
+
+class TestTestProtocolServerAddSkip(unittest.TestCase):
+ """Tests for the skip keyword.
+
+ In Python this meets the testtools extended TestResult contract.
+ (See https://launchpad.net/testtools).
+ """
+
+ def setUp(self):
+ """Setup a test object ready to be skipped."""
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = self.client._events[-1][-1]
+
+ def assertSkip(self, reason):
+ details = {}
+ if reason is not None:
+ details['reason'] = Content(
+ ContentType("text", "plain"), lambda:[reason])
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSkip', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def simple_skip_keyword(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ self.assertSkip(None)
+
+ def test_simple_skip(self):
+ self.simple_skip_keyword("skip")
+
+ def test_simple_skip_colon(self):
+ self.simple_skip_keyword("skip:")
+
+ def test_skip_empty_message(self):
+ self.protocol.lineReceived(_b("skip mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.assertSkip(_b(""))
+
+ def skip_quoted_bracket(self, keyword):
+ # This tests it is accepted, but cannot test it is used today, because
+ # of not having a way to expose it in Python so far.
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ self.assertSkip(_b("]\n"))
+
+ def test_skip_quoted_bracket(self):
+ self.skip_quoted_bracket("skip")
+
+ def test_skip_colon_quoted_bracket(self):
+ self.skip_quoted_bracket("skip:")
+
+
+class TestTestProtocolServerAddSuccess(unittest.TestCase):
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ self.test = subunit.RemotedTestCase("mcdonalds farm")
+
+ def simple_success_keyword(self, keyword):
+ self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_simple_success(self):
+ self.simple_success_keyword("successful")
+
+ def test_simple_success_colon(self):
+ self.simple_success_keyword("successful:")
+
+ def assertSuccess(self, details):
+ self.assertEqual([
+ ('startTest', self.test),
+ ('addSuccess', self.test, details),
+ ('stopTest', self.test),
+ ], self.client._events)
+
+ def test_success_empty_message(self):
+ self.protocol.lineReceived(_b("success mcdonalds farm [\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['message'] = Content(ContentType("text", "plain"),
+ lambda:[_b("")])
+ self.assertSuccess(details)
+
+ def success_quoted_bracket(self, keyword):
+ # This tests it is accepted, but cannot test it is used today, because
+ # of not having a way to expose it in Python so far.
+ self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+ self.protocol.lineReceived(_b(" ]\n"))
+ self.protocol.lineReceived(_b("]\n"))
+ details = {}
+ details['message'] = Content(ContentType("text", "plain"),
+ lambda:[_b("]\n")])
+ self.assertSuccess(details)
+
+ def test_success_quoted_bracket(self):
+ self.success_quoted_bracket("success")
+
+ def test_success_colon_quoted_bracket(self):
+ self.success_quoted_bracket("success:")
+
+
+class TestTestProtocolServerProgress(unittest.TestCase):
+ """Test receipt of progress: directives."""
+
+ def test_progress_accepted_stdlib(self):
+ self.result = Python26TestResult()
+ self.stream = BytesIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived(_b("progress: 23"))
+ self.protocol.lineReceived(_b("progress: -2"))
+ self.protocol.lineReceived(_b("progress: +4"))
+ self.assertEqual(_b(""), self.stream.getvalue())
+
+ def test_progress_accepted_extended(self):
+ # With a progress capable TestResult, progress events are emitted.
+ self.result = ExtendedTestResult()
+ self.stream = BytesIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived(_b("progress: 23"))
+ self.protocol.lineReceived(_b("progress: push"))
+ self.protocol.lineReceived(_b("progress: -2"))
+ self.protocol.lineReceived(_b("progress: pop"))
+ self.protocol.lineReceived(_b("progress: +4"))
+ self.assertEqual(_b(""), self.stream.getvalue())
+ self.assertEqual([
+ ('progress', 23, subunit.PROGRESS_SET),
+ ('progress', None, subunit.PROGRESS_PUSH),
+ ('progress', -2, subunit.PROGRESS_CUR),
+ ('progress', None, subunit.PROGRESS_POP),
+ ('progress', 4, subunit.PROGRESS_CUR),
+ ], self.result._events)
+
+
+class TestTestProtocolServerStreamTags(unittest.TestCase):
+ """Test managing tags on the protocol level."""
+
+ def setUp(self):
+ self.client = ExtendedTestResult()
+ self.protocol = subunit.TestProtocolServer(self.client)
+
+ def test_initial_tags(self):
+ self.protocol.lineReceived(_b("tags: foo bar:baz quux\n"))
+ self.assertEqual([
+ ('tags', set(["foo", "bar:baz", "quux"]), set()),
+ ], self.client._events)
+
+ def test_minus_removes_tags(self):
+ self.protocol.lineReceived(_b("tags: -bar quux\n"))
+ self.assertEqual([
+ ('tags', set(["quux"]), set(["bar"])),
+ ], self.client._events)
+
+ def test_tags_do_not_get_set_on_test(self):
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ test = self.client._events[0][-1]
+ self.assertEqual(None, getattr(test, 'tags', None))
+
+ def test_tags_do_not_get_set_on_global_tags(self):
+ self.protocol.lineReceived(_b("tags: foo bar\n"))
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ test = self.client._events[-1][-1]
+ self.assertEqual(None, getattr(test, 'tags', None))
+
+ def test_tags_get_set_on_test_tags(self):
+ self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+ test = self.client._events[-1][-1]
+ self.protocol.lineReceived(_b("tags: foo bar\n"))
+ self.protocol.lineReceived(_b("success mcdonalds farm\n"))
+ self.assertEqual(None, getattr(test, 'tags', None))
+
+
+class TestTestProtocolServerStreamTime(unittest.TestCase):
+ """Test managing time information at the protocol level."""
+
+ def test_time_accepted_stdlib(self):
+ self.result = Python26TestResult()
+ self.stream = BytesIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived(_b("time: 2001-12-12 12:59:59Z\n"))
+ self.assertEqual(_b(""), self.stream.getvalue())
+
+ def test_time_accepted_extended(self):
+ self.result = ExtendedTestResult()
+ self.stream = BytesIO()
+ self.protocol = subunit.TestProtocolServer(self.result,
+ stream=self.stream)
+ self.protocol.lineReceived(_b("time: 2001-12-12 12:59:59Z\n"))
+ self.assertEqual(_b(""), self.stream.getvalue())
+ self.assertEqual([
+ ('time', datetime.datetime(2001, 12, 12, 12, 59, 59, 0,
+ iso8601.Utc()))
+ ], self.result._events)
+
+
+class TestRemotedTestCase(unittest.TestCase):
+
+ def test_simple(self):
+ test = subunit.RemotedTestCase("A test description")
+ self.assertRaises(NotImplementedError, test.setUp)
+ self.assertRaises(NotImplementedError, test.tearDown)
+ self.assertEqual("A test description",
+ test.shortDescription())
+ self.assertEqual("A test description",
+ test.id())
+ self.assertEqual("A test description (subunit.RemotedTestCase)", "%s" % test)
+ self.assertEqual("<subunit.RemotedTestCase description="
+ "'A test description'>", "%r" % test)
+ result = unittest.TestResult()
+ test.run(result)
+ self.assertEqual([(test, _remote_exception_repr + ": "
+ "Cannot run RemotedTestCases.\n\n")],
+ result.errors)
+ self.assertEqual(1, result.testsRun)
+ another_test = subunit.RemotedTestCase("A test description")
+ self.assertEqual(test, another_test)
+ different_test = subunit.RemotedTestCase("ofo")
+ self.assertNotEqual(test, different_test)
+ self.assertNotEqual(another_test, different_test)
+
+
+class TestRemoteError(unittest.TestCase):
+
+ def test_eq(self):
+ error = subunit.RemoteError(_u("Something went wrong"))
+ another_error = subunit.RemoteError(_u("Something went wrong"))
+ different_error = subunit.RemoteError(_u("boo!"))
+ self.assertEqual(error, another_error)
+ self.assertNotEqual(error, different_error)
+ self.assertNotEqual(different_error, another_error)
+
+ def test_empty_constructor(self):
+ self.assertEqual(subunit.RemoteError(), subunit.RemoteError(_u("")))
+
+
+class TestExecTestCase(unittest.TestCase):
+
+ class SampleExecTestCase(subunit.ExecTestCase):
+
+ def test_sample_method(self):
+ """sample-script.py"""
+ # the sample script runs three tests, one each
+ # that fails, errors and succeeds
+
+ def test_sample_method_args(self):
+ """sample-script.py foo"""
+ # sample that will run just one test.
+
+ def test_construct(self):
+ test = self.SampleExecTestCase("test_sample_method")
+ self.assertEqual(test.script,
+ subunit.join_dir(__file__, 'sample-script.py'))
+
+ def test_args(self):
+ result = unittest.TestResult()
+ test = self.SampleExecTestCase("test_sample_method_args")
+ test.run(result)
+ self.assertEqual(1, result.testsRun)
+
+ def test_run(self):
+ result = ExtendedTestResult()
+ test = self.SampleExecTestCase("test_sample_method")
+ test.run(result)
+ mcdonald = subunit.RemotedTestCase("old mcdonald")
+ bing = subunit.RemotedTestCase("bing crosby")
+ bing_details = {}
+ bing_details['traceback'] = Content(ContentType("text", "x-traceback",
+ {'charset': 'utf8'}), lambda:[_b("foo.c:53:ERROR invalid state\n")])
+ an_error = subunit.RemotedTestCase("an error")
+ error_details = {}
+ self.assertEqual([
+ ('startTest', mcdonald),
+ ('addSuccess', mcdonald),
+ ('stopTest', mcdonald),
+ ('startTest', bing),
+ ('addFailure', bing, bing_details),
+ ('stopTest', bing),
+ ('startTest', an_error),
+ ('addError', an_error, error_details),
+ ('stopTest', an_error),
+ ], result._events)
+
+ def test_debug(self):
+ test = self.SampleExecTestCase("test_sample_method")
+ test.debug()
+
+ def test_count_test_cases(self):
+ """TODO run the child process and count responses to determine the count."""
+
+ def test_join_dir(self):
+ sibling = subunit.join_dir(__file__, 'foo')
+ filedir = os.path.abspath(os.path.dirname(__file__))
+ expected = os.path.join(filedir, 'foo')
+ self.assertEqual(sibling, expected)
+
+
+class DoExecTestCase(subunit.ExecTestCase):
+
+ def test_working_script(self):
+ """sample-two-script.py"""
+
+
+class TestIsolatedTestCase(TestCase):
+
+ class SampleIsolatedTestCase(subunit.IsolatedTestCase):
+
+ SETUP = False
+ TEARDOWN = False
+ TEST = False
+
+ def setUp(self):
+ TestIsolatedTestCase.SampleIsolatedTestCase.SETUP = True
+
+ def tearDown(self):
+ TestIsolatedTestCase.SampleIsolatedTestCase.TEARDOWN = True
+
+ def test_sets_global_state(self):
+ TestIsolatedTestCase.SampleIsolatedTestCase.TEST = True
+
+
+ def test_construct(self):
+ self.SampleIsolatedTestCase("test_sets_global_state")
+
+ @skipIf(os.name != "posix", "Need a posix system for forking tests")
+ def test_run(self):
+ result = unittest.TestResult()
+ test = self.SampleIsolatedTestCase("test_sets_global_state")
+ test.run(result)
+ self.assertEqual(result.testsRun, 1)
+ self.assertEqual(self.SampleIsolatedTestCase.SETUP, False)
+ self.assertEqual(self.SampleIsolatedTestCase.TEARDOWN, False)
+ self.assertEqual(self.SampleIsolatedTestCase.TEST, False)
+
+ def test_debug(self):
+ pass
+ #test = self.SampleExecTestCase("test_sample_method")
+ #test.debug()
+
+
+class TestIsolatedTestSuite(TestCase):
+
+ class SampleTestToIsolate(unittest.TestCase):
+
+ SETUP = False
+ TEARDOWN = False
+ TEST = False
+
+ def setUp(self):
+ TestIsolatedTestSuite.SampleTestToIsolate.SETUP = True
+
+ def tearDown(self):
+ TestIsolatedTestSuite.SampleTestToIsolate.TEARDOWN = True
+
+ def test_sets_global_state(self):
+ TestIsolatedTestSuite.SampleTestToIsolate.TEST = True
+
+
+ def test_construct(self):
+ subunit.IsolatedTestSuite()
+
+ @skipIf(os.name != "posix", "Need a posix system for forking tests")
+ def test_run(self):
+ result = unittest.TestResult()
+ suite = subunit.IsolatedTestSuite()
+ sub_suite = unittest.TestSuite()
+ sub_suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
+ sub_suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
+ suite.addTest(sub_suite)
+ suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
+ suite.run(result)
+ self.assertEqual(result.testsRun, 3)
+ self.assertEqual(self.SampleTestToIsolate.SETUP, False)
+ self.assertEqual(self.SampleTestToIsolate.TEARDOWN, False)
+ self.assertEqual(self.SampleTestToIsolate.TEST, False)
+
+
+class TestTestProtocolClient(TestCase):
+
+ def setUp(self):
+ super(TestTestProtocolClient, self).setUp()
+ self.io = BytesIO()
+ self.protocol = subunit.TestProtocolClient(self.io)
+ self.unicode_test = PlaceHolder(_u('\u2603'))
+ self.test = TestTestProtocolClient("test_start_test")
+ self.sample_details = {'something':Content(
+ ContentType('text', 'plain'), lambda:[_b('serialised\nform')])}
+ self.sample_tb_details = dict(self.sample_details)
+ self.sample_tb_details['traceback'] = TracebackContent(
+ subunit.RemoteError(_u("boo qux")), self.test)
+
+ def test_start_test(self):
+ """Test startTest on a TestProtocolClient."""
+ self.protocol.startTest(self.test)
+ self.assertEqual(self.io.getvalue(), _b("test: %s\n" % self.test.id()))
+
+ def test_start_test_unicode_id(self):
+ """Test startTest on a TestProtocolClient."""
+ self.protocol.startTest(self.unicode_test)
+ expected = _b("test: ") + _u('\u2603').encode('utf8') + _b("\n")
+ self.assertEqual(expected, self.io.getvalue())
+
+ def test_stop_test(self):
+ # stopTest doesn't output anything.
+ self.protocol.stopTest(self.test)
+ self.assertEqual(self.io.getvalue(), _b(""))
+
+ def test_add_success(self):
+ """Test addSuccess on a TestProtocolClient."""
+ self.protocol.addSuccess(self.test)
+ self.assertEqual(
+ self.io.getvalue(), _b("successful: %s\n" % self.test.id()))
+
+ def test_add_outcome_unicode_id(self):
+ """Test addSuccess on a TestProtocolClient."""
+ self.protocol.addSuccess(self.unicode_test)
+ expected = _b("successful: ") + _u('\u2603').encode('utf8') + _b("\n")
+ self.assertEqual(expected, self.io.getvalue())
+
+ def test_add_success_details(self):
+ """Test addSuccess on a TestProtocolClient with details."""
+ self.protocol.addSuccess(self.test, details=self.sample_details)
+ self.assertEqual(
+ self.io.getvalue(), _b("successful: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n]\n" % self.test.id()))
+
+ def test_add_failure(self):
+ """Test addFailure on a TestProtocolClient."""
+ self.protocol.addFailure(
+ self.test, subunit.RemoteError(_u("boo qux")))
+ self.assertEqual(
+ self.io.getvalue(),
+ _b(('failure: %s [\n' + _remote_exception_str + ': boo qux\n]\n')
+ % self.test.id()))
+
+ def test_add_failure_details(self):
+ """Test addFailure on a TestProtocolClient with details."""
+ self.protocol.addFailure(
+ self.test, details=self.sample_tb_details)
+ self.assertThat([
+ _b(("failure: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ _b(("failure: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;language=python,charset=utf8\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ ],
+ Contains(self.io.getvalue())),
+
+ def test_add_error(self):
+ """Test stopTest on a TestProtocolClient."""
+ self.protocol.addError(
+ self.test, subunit.RemoteError(_u("phwoar crikey")))
+ self.assertEqual(
+ self.io.getvalue(),
+ _b(('error: %s [\n' +
+ _remote_exception_str + ": phwoar crikey\n"
+ "]\n") % self.test.id()))
+
+ def test_add_error_details(self):
+ """Test stopTest on a TestProtocolClient with details."""
+ self.protocol.addError(
+ self.test, details=self.sample_tb_details)
+ self.assertThat([
+ _b(("error: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ _b(("error: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;language=python,charset=utf8\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ ],
+ Contains(self.io.getvalue())),
+
+ def test_add_expected_failure(self):
+ """Test addExpectedFailure on a TestProtocolClient."""
+ self.protocol.addExpectedFailure(
+ self.test, subunit.RemoteError(_u("phwoar crikey")))
+ self.assertEqual(
+ self.io.getvalue(),
+ _b(('xfail: %s [\n' +
+ _remote_exception_str + ": phwoar crikey\n"
+ "]\n") % self.test.id()))
+
+ def test_add_expected_failure_details(self):
+ """Test addExpectedFailure on a TestProtocolClient with details."""
+ self.protocol.addExpectedFailure(
+ self.test, details=self.sample_tb_details)
+ self.assertThat([
+ _b(("xfail: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ _b(("xfail: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n"
+ "Content-Type: text/x-traceback;language=python,charset=utf8\n"
+ "traceback\n" + _remote_exception_str_chunked +
+ "]\n") % self.test.id()),
+ ],
+ Contains(self.io.getvalue())),
+
+ def test_add_skip(self):
+ """Test addSkip on a TestProtocolClient."""
+ self.protocol.addSkip(
+ self.test, "Has it really?")
+ self.assertEqual(
+ self.io.getvalue(),
+ _b('skip: %s [\nHas it really?\n]\n' % self.test.id()))
+
+ def test_add_skip_details(self):
+ """Test addSkip on a TestProtocolClient with details."""
+ details = {'reason':Content(
+ ContentType('text', 'plain'), lambda:[_b('Has it really?')])}
+ self.protocol.addSkip(self.test, details=details)
+ self.assertEqual(
+ self.io.getvalue(),
+ _b("skip: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "reason\n"
+ "E\r\nHas it really?0\r\n"
+ "]\n" % self.test.id()))
+
+ def test_progress_set(self):
+ self.protocol.progress(23, subunit.PROGRESS_SET)
+ self.assertEqual(self.io.getvalue(), _b('progress: 23\n'))
+
+ def test_progress_neg_cur(self):
+ self.protocol.progress(-23, subunit.PROGRESS_CUR)
+ self.assertEqual(self.io.getvalue(), _b('progress: -23\n'))
+
+ def test_progress_pos_cur(self):
+ self.protocol.progress(23, subunit.PROGRESS_CUR)
+ self.assertEqual(self.io.getvalue(), _b('progress: +23\n'))
+
+ def test_progress_pop(self):
+ self.protocol.progress(1234, subunit.PROGRESS_POP)
+ self.assertEqual(self.io.getvalue(), _b('progress: pop\n'))
+
+ def test_progress_push(self):
+ self.protocol.progress(1234, subunit.PROGRESS_PUSH)
+ self.assertEqual(self.io.getvalue(), _b('progress: push\n'))
+
+ def test_time(self):
+ # Calling time() outputs a time signal immediately.
+ self.protocol.time(
+ datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc()))
+ self.assertEqual(
+ _b("time: 2009-10-11 12:13:14.000015Z\n"),
+ self.io.getvalue())
+
+ def test_add_unexpected_success(self):
+ """Test addUnexpectedSuccess on a TestProtocolClient."""
+ self.protocol.addUnexpectedSuccess(self.test)
+ self.assertEqual(
+ self.io.getvalue(), _b("uxsuccess: %s\n" % self.test.id()))
+
+ def test_add_unexpected_success_details(self):
+ """Test addUnexpectedSuccess on a TestProtocolClient with details."""
+ self.protocol.addUnexpectedSuccess(self.test, details=self.sample_details)
+ self.assertEqual(
+ self.io.getvalue(), _b("uxsuccess: %s [ multipart\n"
+ "Content-Type: text/plain\n"
+ "something\n"
+ "F\r\nserialised\nform0\r\n]\n" % self.test.id()))
+
+ def test_tags_empty(self):
+ self.protocol.tags(set(), set())
+ self.assertEqual(_b(""), self.io.getvalue())
+
+ def test_tags_add(self):
+ self.protocol.tags(set(['foo']), set())
+ self.assertEqual(_b("tags: foo\n"), self.io.getvalue())
+
+ def test_tags_both(self):
+ self.protocol.tags(set(['quux']), set(['bar']))
+ self.assertThat(
+ [b"tags: quux -bar\n", b"tags: -bar quux\n"],
+ Contains(self.io.getvalue()))
+
+ def test_tags_gone(self):
+ self.protocol.tags(set(), set(['bar']))
+ self.assertEqual(_b("tags: -bar\n"), self.io.getvalue())
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol2.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol2.py
new file mode 100644
index 00000000000..c21392ceb9c
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_protocol2.py
@@ -0,0 +1,436 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2013 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+from io import BytesIO
+import datetime
+
+from testtools import TestCase
+from testtools.matchers import Contains, HasLength
+from testtools.tests.test_testresult import TestStreamResultContract
+from testtools.testresult.doubles import StreamResult
+
+import subunit
+import subunit.iso8601 as iso8601
+
+CONSTANT_ENUM = b'\xb3)\x01\x0c\x03foo\x08U_\x1b'
+CONSTANT_INPROGRESS = b'\xb3)\x02\x0c\x03foo\x8e\xc1-\xb5'
+CONSTANT_SUCCESS = b'\xb3)\x03\x0c\x03fooE\x9d\xfe\x10'
+CONSTANT_UXSUCCESS = b'\xb3)\x04\x0c\x03fooX\x98\xce\xa8'
+CONSTANT_SKIP = b'\xb3)\x05\x0c\x03foo\x93\xc4\x1d\r'
+CONSTANT_FAIL = b'\xb3)\x06\x0c\x03foo\x15Po\xa3'
+CONSTANT_XFAIL = b'\xb3)\x07\x0c\x03foo\xde\x0c\xbc\x06'
+CONSTANT_EOF = b'\xb3!\x10\x08S\x15\x88\xdc'
+CONSTANT_FILE_CONTENT = b'\xb3!@\x13\x06barney\x03wooA5\xe3\x8c'
+CONSTANT_MIME = b'\xb3! #\x1aapplication/foo; charset=1x3Q\x15'
+CONSTANT_TIMESTAMP = b'\xb3+\x03\x13<\x17T\xcf\x80\xaf\xc8\x03barI\x96>-'
+CONSTANT_ROUTE_CODE = b'\xb3-\x03\x13\x03bar\x06source\x9cY9\x19'
+CONSTANT_RUNNABLE = b'\xb3(\x03\x0c\x03foo\xe3\xea\xf5\xa4'
+CONSTANT_TAGS = [
+ b'\xb3)\x80\x15\x03bar\x02\x03foo\x03barTHn\xb4',
+ b'\xb3)\x80\x15\x03bar\x02\x03bar\x03foo\xf8\xf1\x91o',
+ ]
+
+
+class TestStreamResultToBytesContract(TestCase, TestStreamResultContract):
+ """Check that StreamResult behaves as testtools expects."""
+
+ def _make_result(self):
+ return subunit.StreamResultToBytes(BytesIO())
+
+
+class TestStreamResultToBytes(TestCase):
+
+ def _make_result(self):
+ output = BytesIO()
+ return subunit.StreamResultToBytes(output), output
+
+ def test_numbers(self):
+ result = subunit.StreamResultToBytes(BytesIO())
+ packet = []
+ self.assertRaises(Exception, result._write_number, -1, packet)
+ self.assertEqual([], packet)
+ result._write_number(0, packet)
+ self.assertEqual([b'\x00'], packet)
+ del packet[:]
+ result._write_number(63, packet)
+ self.assertEqual([b'\x3f'], packet)
+ del packet[:]
+ result._write_number(64, packet)
+ self.assertEqual([b'\x40\x40'], packet)
+ del packet[:]
+ result._write_number(16383, packet)
+ self.assertEqual([b'\x7f\xff'], packet)
+ del packet[:]
+ result._write_number(16384, packet)
+ self.assertEqual([b'\x80\x40', b'\x00'], packet)
+ del packet[:]
+ result._write_number(4194303, packet)
+ self.assertEqual([b'\xbf\xff', b'\xff'], packet)
+ del packet[:]
+ result._write_number(4194304, packet)
+ self.assertEqual([b'\xc0\x40\x00\x00'], packet)
+ del packet[:]
+ result._write_number(1073741823, packet)
+ self.assertEqual([b'\xff\xff\xff\xff'], packet)
+ del packet[:]
+ self.assertRaises(Exception, result._write_number, 1073741824, packet)
+ self.assertEqual([], packet)
+
+ def test_volatile_length(self):
+ # if the length of the packet data before the length itself is
+ # considered is right on the boundary for length's variable length
+ # encoding, it is easy to get the length wrong by not accounting for
+ # length itself.
+ # that is, the encoder has to ensure that length == sum (length_of_rest
+ # + length_of_length)
+ result, output = self._make_result()
+ # 1 byte short:
+ result.status(file_name="", file_bytes=b'\xff'*0)
+ self.assertThat(output.getvalue(), HasLength(10))
+ self.assertEqual(b'\x0a', output.getvalue()[3:4])
+ output.seek(0)
+ output.truncate()
+ # 1 byte long:
+ result.status(file_name="", file_bytes=b'\xff'*53)
+ self.assertThat(output.getvalue(), HasLength(63))
+ self.assertEqual(b'\x3f', output.getvalue()[3:4])
+ output.seek(0)
+ output.truncate()
+ # 2 bytes short
+ result.status(file_name="", file_bytes=b'\xff'*54)
+ self.assertThat(output.getvalue(), HasLength(65))
+ self.assertEqual(b'\x40\x41', output.getvalue()[3:5])
+ output.seek(0)
+ output.truncate()
+ # 2 bytes long
+ result.status(file_name="", file_bytes=b'\xff'*16371)
+ self.assertThat(output.getvalue(), HasLength(16383))
+ self.assertEqual(b'\x7f\xff', output.getvalue()[3:5])
+ output.seek(0)
+ output.truncate()
+ # 3 bytes short
+ result.status(file_name="", file_bytes=b'\xff'*16372)
+ self.assertThat(output.getvalue(), HasLength(16385))
+ self.assertEqual(b'\x80\x40\x01', output.getvalue()[3:6])
+ output.seek(0)
+ output.truncate()
+ # 3 bytes long
+ result.status(file_name="", file_bytes=b'\xff'*4194289)
+ self.assertThat(output.getvalue(), HasLength(4194303))
+ self.assertEqual(b'\xbf\xff\xff', output.getvalue()[3:6])
+ output.seek(0)
+ output.truncate()
+ self.assertRaises(Exception, result.status, file_name="",
+ file_bytes=b'\xff'*4194290)
+
+ def test_trivial_enumeration(self):
+ result, output = self._make_result()
+ result.status("foo", 'exists')
+ self.assertEqual(CONSTANT_ENUM, output.getvalue())
+
+ def test_inprogress(self):
+ result, output = self._make_result()
+ result.status("foo", 'inprogress')
+ self.assertEqual(CONSTANT_INPROGRESS, output.getvalue())
+
+ def test_success(self):
+ result, output = self._make_result()
+ result.status("foo", 'success')
+ self.assertEqual(CONSTANT_SUCCESS, output.getvalue())
+
+ def test_uxsuccess(self):
+ result, output = self._make_result()
+ result.status("foo", 'uxsuccess')
+ self.assertEqual(CONSTANT_UXSUCCESS, output.getvalue())
+
+ def test_skip(self):
+ result, output = self._make_result()
+ result.status("foo", 'skip')
+ self.assertEqual(CONSTANT_SKIP, output.getvalue())
+
+ def test_fail(self):
+ result, output = self._make_result()
+ result.status("foo", 'fail')
+ self.assertEqual(CONSTANT_FAIL, output.getvalue())
+
+ def test_xfail(self):
+ result, output = self._make_result()
+ result.status("foo", 'xfail')
+ self.assertEqual(CONSTANT_XFAIL, output.getvalue())
+
+ def test_unknown_status(self):
+ result, output = self._make_result()
+ self.assertRaises(Exception, result.status, "foo", 'boo')
+ self.assertEqual(b'', output.getvalue())
+
+ def test_eof(self):
+ result, output = self._make_result()
+ result.status(eof=True)
+ self.assertEqual(CONSTANT_EOF, output.getvalue())
+
+ def test_file_content(self):
+ result, output = self._make_result()
+ result.status(file_name="barney", file_bytes=b"woo")
+ self.assertEqual(CONSTANT_FILE_CONTENT, output.getvalue())
+
+ def test_mime(self):
+ result, output = self._make_result()
+ result.status(mime_type="application/foo; charset=1")
+ self.assertEqual(CONSTANT_MIME, output.getvalue())
+
+ def test_route_code(self):
+ result, output = self._make_result()
+ result.status(test_id="bar", test_status='success',
+ route_code="source")
+ self.assertEqual(CONSTANT_ROUTE_CODE, output.getvalue())
+
+ def test_runnable(self):
+ result, output = self._make_result()
+ result.status("foo", 'success', runnable=False)
+ self.assertEqual(CONSTANT_RUNNABLE, output.getvalue())
+
+ def test_tags(self):
+ result, output = self._make_result()
+ result.status(test_id="bar", test_tags=set(['foo', 'bar']))
+ self.assertThat(CONSTANT_TAGS, Contains(output.getvalue()))
+
+ def test_timestamp(self):
+ timestamp = datetime.datetime(2001, 12, 12, 12, 59, 59, 45,
+ iso8601.Utc())
+ result, output = self._make_result()
+ result.status(test_id="bar", test_status='success', timestamp=timestamp)
+ self.assertEqual(CONSTANT_TIMESTAMP, output.getvalue())
+
+
+class TestByteStreamToStreamResult(TestCase):
+
+ def test_non_subunit_encapsulated(self):
+ source = BytesIO(b"foo\nbar\n")
+ result = StreamResult()
+ subunit.ByteStreamToStreamResult(
+ source, non_subunit_name="stdout").run(result)
+ self.assertEqual([
+ ('status', None, None, None, True, 'stdout', b'f', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'o', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'o', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'\n', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'b', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'a', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'r', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'\n', False, None, None, None),
+ ], result._events)
+ self.assertEqual(b'', source.read())
+
+ def test_signature_middle_utf8_char(self):
+ utf8_bytes = b'\xe3\xb3\x8a'
+ source = BytesIO(utf8_bytes)
+ # Should be treated as one character (it is u'\u3cca') and wrapped
+ result = StreamResult()
+ subunit.ByteStreamToStreamResult(
+ source, non_subunit_name="stdout").run(
+ result)
+ self.assertEqual([
+ ('status', None, None, None, True, 'stdout', b'\xe3', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'\xb3', False, None, None, None),
+ ('status', None, None, None, True, 'stdout', b'\x8a', False, None, None, None),
+ ], result._events)
+
+ def test_non_subunit_disabled_raises(self):
+ source = BytesIO(b"foo\nbar\n")
+ result = StreamResult()
+ case = subunit.ByteStreamToStreamResult(source)
+ e = self.assertRaises(Exception, case.run, result)
+ self.assertEqual(b'f', e.args[1])
+ self.assertEqual(b'oo\nbar\n', source.read())
+ self.assertEqual([], result._events)
+
+ def test_trivial_enumeration(self):
+ source = BytesIO(CONSTANT_ENUM)
+ result = StreamResult()
+ subunit.ByteStreamToStreamResult(
+ source, non_subunit_name="stdout").run(result)
+ self.assertEqual(b'', source.read())
+ self.assertEqual([
+ ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
+ ], result._events)
+
+ def test_multiple_events(self):
+ source = BytesIO(CONSTANT_ENUM + CONSTANT_ENUM)
+ result = StreamResult()
+ subunit.ByteStreamToStreamResult(
+ source, non_subunit_name="stdout").run(result)
+ self.assertEqual(b'', source.read())
+ self.assertEqual([
+ ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
+ ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
+ ], result._events)
+
+ def test_inprogress(self):
+ self.check_event(CONSTANT_INPROGRESS, 'inprogress')
+
+ def test_success(self):
+ self.check_event(CONSTANT_SUCCESS, 'success')
+
+ def test_uxsuccess(self):
+ self.check_event(CONSTANT_UXSUCCESS, 'uxsuccess')
+
+ def test_skip(self):
+ self.check_event(CONSTANT_SKIP, 'skip')
+
+ def test_fail(self):
+ self.check_event(CONSTANT_FAIL, 'fail')
+
+ def test_xfail(self):
+ self.check_event(CONSTANT_XFAIL, 'xfail')
+
+ def check_events(self, source_bytes, events):
+ source = BytesIO(source_bytes)
+ result = StreamResult()
+ subunit.ByteStreamToStreamResult(
+ source, non_subunit_name="stdout").run(result)
+ self.assertEqual(b'', source.read())
+ self.assertEqual(events, result._events)
+ #- any file attachments should be byte contents [as users assume that].
+ for event in result._events:
+ if event[5] is not None:
+ self.assertIsInstance(event[6], bytes)
+
+ def check_event(self, source_bytes, test_status=None, test_id="foo",
+ route_code=None, timestamp=None, tags=None, mime_type=None,
+ file_name=None, file_bytes=None, eof=False, runnable=True):
+ event = self._event(test_id=test_id, test_status=test_status,
+ tags=tags, runnable=runnable, file_name=file_name,
+ file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+ route_code=route_code, timestamp=timestamp)
+ self.check_events(source_bytes, [event])
+
+ def _event(self, test_status=None, test_id=None, route_code=None,
+ timestamp=None, tags=None, mime_type=None, file_name=None,
+ file_bytes=None, eof=False, runnable=True):
+ return ('status', test_id, test_status, tags, runnable, file_name,
+ file_bytes, eof, mime_type, route_code, timestamp)
+
+ def test_eof(self):
+ self.check_event(CONSTANT_EOF, test_id=None, eof=True)
+
+ def test_file_content(self):
+ self.check_event(CONSTANT_FILE_CONTENT,
+ test_id=None, file_name="barney", file_bytes=b"woo")
+
+ def test_file_content_length_into_checksum(self):
+ # A bad file content length which creeps into the checksum.
+ bad_file_length_content = b'\xb3!@\x13\x06barney\x04woo\xdc\xe2\xdb\x35'
+ self.check_events(bad_file_length_content, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=bad_file_length_content,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b"File content extends past end of packet: claimed 4 bytes, 3 available",
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_packet_length_4_word_varint(self):
+ packet_data = b'\xb3!@\xc0\x00\x11'
+ self.check_events(packet_data, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=packet_data,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b"3 byte maximum given but 4 byte value found.",
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_mime(self):
+ self.check_event(CONSTANT_MIME,
+ test_id=None, mime_type='application/foo; charset=1')
+
+ def test_route_code(self):
+ self.check_event(CONSTANT_ROUTE_CODE,
+ 'success', route_code="source", test_id="bar")
+
+ def test_runnable(self):
+ self.check_event(CONSTANT_RUNNABLE,
+ test_status='success', runnable=False)
+
+ def test_tags(self):
+ self.check_event(CONSTANT_TAGS[0],
+ None, tags=set(['foo', 'bar']), test_id="bar")
+
+ def test_timestamp(self):
+ timestamp = datetime.datetime(2001, 12, 12, 12, 59, 59, 45,
+ iso8601.Utc())
+ self.check_event(CONSTANT_TIMESTAMP,
+ 'success', test_id='bar', timestamp=timestamp)
+
+ def test_bad_crc_errors_via_status(self):
+ file_bytes = CONSTANT_MIME[:-1] + b'\x00'
+ self.check_events( file_bytes, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=file_bytes,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b'Bad checksum - calculated (0x78335115), '
+ b'stored (0x78335100)',
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_not_utf8_in_string(self):
+ file_bytes = CONSTANT_ROUTE_CODE[:5] + b'\xb4' + CONSTANT_ROUTE_CODE[6:-4] + b'\xce\x56\xc6\x17'
+ self.check_events(file_bytes, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=file_bytes,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b'UTF8 string at offset 2 is not UTF8',
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_NULL_in_string(self):
+ file_bytes = CONSTANT_ROUTE_CODE[:6] + b'\x00' + CONSTANT_ROUTE_CODE[7:-4] + b'\xd7\x41\xac\xfe'
+ self.check_events(file_bytes, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=file_bytes,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b'UTF8 string at offset 2 contains NUL byte',
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_bad_utf8_stringlength(self):
+ file_bytes = CONSTANT_ROUTE_CODE[:4] + b'\x3f' + CONSTANT_ROUTE_CODE[5:-4] + b'\xbe\x29\xe0\xc2'
+ self.check_events(file_bytes, [
+ self._event(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=file_bytes,
+ mime_type="application/octet-stream"),
+ self._event(test_id="subunit.parser", test_status="fail", eof=True,
+ file_name="Parser Error",
+ file_bytes=b'UTF8 string at offset 2 extends past end of '
+ b'packet: claimed 63 bytes, 10 available',
+ mime_type="text/plain;charset=utf8"),
+ ])
+
+ def test_route_code_and_file_content(self):
+ content = BytesIO()
+ subunit.StreamResultToBytes(content).status(
+ route_code='0', mime_type='text/plain', file_name='bar',
+ file_bytes=b'foo')
+ self.check_event(content.getvalue(), test_id=None, file_name='bar',
+ route_code='0', mime_type='text/plain', file_bytes=b'foo')
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_results.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_results.py
new file mode 100644
index 00000000000..44f95b34c97
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/tests/test_test_results.py
@@ -0,0 +1,566 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import csv
+import datetime
+import sys
+import unittest
+
+from testtools import TestCase
+from testtools.compat import StringIO
+from testtools.content import (
+ text_content,
+ TracebackContent,
+ )
+from testtools.testresult.doubles import ExtendedTestResult
+
+import subunit
+import subunit.iso8601 as iso8601
+import subunit.test_results
+
+import testtools
+
+
+class LoggingDecorator(subunit.test_results.HookedTestResultDecorator):
+
+ def __init__(self, decorated):
+ self._calls = 0
+ super(LoggingDecorator, self).__init__(decorated)
+
+ def _before_event(self):
+ self._calls += 1
+
+
+class AssertBeforeTestResult(LoggingDecorator):
+ """A TestResult for checking preconditions."""
+
+ def __init__(self, decorated, test):
+ self.test = test
+ super(AssertBeforeTestResult, self).__init__(decorated)
+
+ def _before_event(self):
+ self.test.assertEqual(1, self.earlier._calls)
+ super(AssertBeforeTestResult, self)._before_event()
+
+
+class TimeCapturingResult(unittest.TestResult):
+
+ def __init__(self):
+ super(TimeCapturingResult, self).__init__()
+ self._calls = []
+ self.failfast = False
+
+ def time(self, a_datetime):
+ self._calls.append(a_datetime)
+
+
+class TestHookedTestResultDecorator(unittest.TestCase):
+
+ def setUp(self):
+ # An end to the chain
+ terminal = unittest.TestResult()
+ # Asserts that the call was made to self.result before asserter was
+ # called.
+ asserter = AssertBeforeTestResult(terminal, self)
+ # The result object we call, which much increase its call count.
+ self.result = LoggingDecorator(asserter)
+ asserter.earlier = self.result
+ self.decorated = asserter
+
+ def tearDown(self):
+ # The hook in self.result must have been called
+ self.assertEqual(1, self.result._calls)
+ # The hook in asserter must have been called too, otherwise the
+ # assertion about ordering won't have completed.
+ self.assertEqual(1, self.decorated._calls)
+
+ def test_startTest(self):
+ self.result.startTest(self)
+
+ def test_startTestRun(self):
+ self.result.startTestRun()
+
+ def test_stopTest(self):
+ self.result.stopTest(self)
+
+ def test_stopTestRun(self):
+ self.result.stopTestRun()
+
+ def test_addError(self):
+ self.result.addError(self, subunit.RemoteError())
+
+ def test_addError_details(self):
+ self.result.addError(self, details={})
+
+ def test_addFailure(self):
+ self.result.addFailure(self, subunit.RemoteError())
+
+ def test_addFailure_details(self):
+ self.result.addFailure(self, details={})
+
+ def test_addSuccess(self):
+ self.result.addSuccess(self)
+
+ def test_addSuccess_details(self):
+ self.result.addSuccess(self, details={})
+
+ def test_addSkip(self):
+ self.result.addSkip(self, "foo")
+
+ def test_addSkip_details(self):
+ self.result.addSkip(self, details={})
+
+ def test_addExpectedFailure(self):
+ self.result.addExpectedFailure(self, subunit.RemoteError())
+
+ def test_addExpectedFailure_details(self):
+ self.result.addExpectedFailure(self, details={})
+
+ def test_addUnexpectedSuccess(self):
+ self.result.addUnexpectedSuccess(self)
+
+ def test_addUnexpectedSuccess_details(self):
+ self.result.addUnexpectedSuccess(self, details={})
+
+ def test_progress(self):
+ self.result.progress(1, subunit.PROGRESS_SET)
+
+ def test_wasSuccessful(self):
+ self.result.wasSuccessful()
+
+ def test_shouldStop(self):
+ self.result.shouldStop
+
+ def test_stop(self):
+ self.result.stop()
+
+ def test_time(self):
+ self.result.time(None)
+
+
+class TestAutoTimingTestResultDecorator(unittest.TestCase):
+
+ def setUp(self):
+ # And end to the chain which captures time events.
+ terminal = TimeCapturingResult()
+ # The result object under test.
+ self.result = subunit.test_results.AutoTimingTestResultDecorator(
+ terminal)
+ self.decorated = terminal
+
+ def test_without_time_calls_time_is_called_and_not_None(self):
+ self.result.startTest(self)
+ self.assertEqual(1, len(self.decorated._calls))
+ self.assertNotEqual(None, self.decorated._calls[0])
+
+ def test_no_time_from_progress(self):
+ self.result.progress(1, subunit.PROGRESS_CUR)
+ self.assertEqual(0, len(self.decorated._calls))
+
+ def test_no_time_from_shouldStop(self):
+ self.decorated.stop()
+ self.result.shouldStop
+ self.assertEqual(0, len(self.decorated._calls))
+
+ def test_calling_time_inhibits_automatic_time(self):
+ # Calling time() outputs a time signal immediately and prevents
+ # automatically adding one when other methods are called.
+ time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
+ self.result.time(time)
+ self.result.startTest(self)
+ self.result.stopTest(self)
+ self.assertEqual(1, len(self.decorated._calls))
+ self.assertEqual(time, self.decorated._calls[0])
+
+ def test_calling_time_None_enables_automatic_time(self):
+ time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
+ self.result.time(time)
+ self.assertEqual(1, len(self.decorated._calls))
+ self.assertEqual(time, self.decorated._calls[0])
+ # Calling None passes the None through, in case other results care.
+ self.result.time(None)
+ self.assertEqual(2, len(self.decorated._calls))
+ self.assertEqual(None, self.decorated._calls[1])
+ # Calling other methods doesn't generate an automatic time event.
+ self.result.startTest(self)
+ self.assertEqual(3, len(self.decorated._calls))
+ self.assertNotEqual(None, self.decorated._calls[2])
+
+ def test_set_failfast_True(self):
+ self.assertFalse(self.decorated.failfast)
+ self.result.failfast = True
+ self.assertTrue(self.decorated.failfast)
+
+
+class TestTagCollapsingDecorator(TestCase):
+
+ def test_tags_collapsed_outside_of_tests(self):
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ tag_collapser.tags(set(['a']), set())
+ tag_collapser.tags(set(['b']), set())
+ tag_collapser.startTest(self)
+ self.assertEquals(
+ [('tags', set(['a', 'b']), set([])),
+ ('startTest', self),
+ ], result._events)
+
+ def test_tags_collapsed_outside_of_tests_are_flushed(self):
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ tag_collapser.startTestRun()
+ tag_collapser.tags(set(['a']), set())
+ tag_collapser.tags(set(['b']), set())
+ tag_collapser.startTest(self)
+ tag_collapser.addSuccess(self)
+ tag_collapser.stopTest(self)
+ tag_collapser.stopTestRun()
+ self.assertEquals(
+ [('startTestRun',),
+ ('tags', set(['a', 'b']), set([])),
+ ('startTest', self),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ('stopTestRun',),
+ ], result._events)
+
+ def test_tags_forwarded_after_tests(self):
+ test = subunit.RemotedTestCase('foo')
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ tag_collapser.startTestRun()
+ tag_collapser.startTest(test)
+ tag_collapser.addSuccess(test)
+ tag_collapser.stopTest(test)
+ tag_collapser.tags(set(['a']), set(['b']))
+ tag_collapser.stopTestRun()
+ self.assertEqual(
+ [('startTestRun',),
+ ('startTest', test),
+ ('addSuccess', test),
+ ('stopTest', test),
+ ('tags', set(['a']), set(['b'])),
+ ('stopTestRun',),
+ ],
+ result._events)
+
+ def test_tags_collapsed_inside_of_tests(self):
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ test = subunit.RemotedTestCase('foo')
+ tag_collapser.startTest(test)
+ tag_collapser.tags(set(['a']), set())
+ tag_collapser.tags(set(['b']), set(['a']))
+ tag_collapser.tags(set(['c']), set())
+ tag_collapser.stopTest(test)
+ self.assertEquals(
+ [('startTest', test),
+ ('tags', set(['b', 'c']), set(['a'])),
+ ('stopTest', test)],
+ result._events)
+
+ def test_tags_collapsed_inside_of_tests_different_ordering(self):
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ test = subunit.RemotedTestCase('foo')
+ tag_collapser.startTest(test)
+ tag_collapser.tags(set(), set(['a']))
+ tag_collapser.tags(set(['a', 'b']), set())
+ tag_collapser.tags(set(['c']), set())
+ tag_collapser.stopTest(test)
+ self.assertEquals(
+ [('startTest', test),
+ ('tags', set(['a', 'b', 'c']), set()),
+ ('stopTest', test)],
+ result._events)
+
+ def test_tags_sent_before_result(self):
+ # Because addSuccess and friends tend to send subunit output
+ # immediately, and because 'tags:' before a result line means
+ # something different to 'tags:' after a result line, we need to be
+ # sure that tags are emitted before 'addSuccess' (or whatever).
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+ test = subunit.RemotedTestCase('foo')
+ tag_collapser.startTest(test)
+ tag_collapser.tags(set(['a']), set())
+ tag_collapser.addSuccess(test)
+ tag_collapser.stopTest(test)
+ self.assertEquals(
+ [('startTest', test),
+ ('tags', set(['a']), set()),
+ ('addSuccess', test),
+ ('stopTest', test)],
+ result._events)
+
+
+class TestTimeCollapsingDecorator(TestCase):
+
+ def make_time(self):
+ # Heh heh.
+ return datetime.datetime(
+ 2000, 1, self.getUniqueInteger(), tzinfo=iso8601.UTC)
+
+ def test_initial_time_forwarded(self):
+ # We always forward the first time event we see.
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+ a_time = self.make_time()
+ tag_collapser.time(a_time)
+ self.assertEquals([('time', a_time)], result._events)
+
+ def test_time_collapsed_to_first_and_last(self):
+ # If there are many consecutive time events, only the first and last
+ # are sent through.
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+ times = [self.make_time() for i in range(5)]
+ for a_time in times:
+ tag_collapser.time(a_time)
+ tag_collapser.startTest(subunit.RemotedTestCase('foo'))
+ self.assertEquals(
+ [('time', times[0]), ('time', times[-1])], result._events[:-1])
+
+ def test_only_one_time_sent(self):
+ # If we receive a single time event followed by a non-time event, we
+ # send exactly one time event.
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+ a_time = self.make_time()
+ tag_collapser.time(a_time)
+ tag_collapser.startTest(subunit.RemotedTestCase('foo'))
+ self.assertEquals([('time', a_time)], result._events[:-1])
+
+ def test_duplicate_times_not_sent(self):
+ # Many time events with the exact same time are collapsed into one
+ # time event.
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+ a_time = self.make_time()
+ for i in range(5):
+ tag_collapser.time(a_time)
+ tag_collapser.startTest(subunit.RemotedTestCase('foo'))
+ self.assertEquals([('time', a_time)], result._events[:-1])
+
+ def test_no_times_inserted(self):
+ result = ExtendedTestResult()
+ tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+ a_time = self.make_time()
+ tag_collapser.time(a_time)
+ foo = subunit.RemotedTestCase('foo')
+ tag_collapser.startTest(foo)
+ tag_collapser.addSuccess(foo)
+ tag_collapser.stopTest(foo)
+ self.assertEquals(
+ [('time', a_time),
+ ('startTest', foo),
+ ('addSuccess', foo),
+ ('stopTest', foo)], result._events)
+
+
+class TestByTestResultTests(testtools.TestCase):
+
+ def setUp(self):
+ super(TestByTestResultTests, self).setUp()
+ self.log = []
+ self.result = subunit.test_results.TestByTestResult(self.on_test)
+ if sys.version_info >= (3, 0):
+ self.result._now = iter(range(5)).__next__
+ else:
+ self.result._now = iter(range(5)).next
+
+ def assertCalled(self, **kwargs):
+ defaults = {
+ 'test': self,
+ 'tags': set(),
+ 'details': None,
+ 'start_time': 0,
+ 'stop_time': 1,
+ }
+ defaults.update(kwargs)
+ self.assertEqual([defaults], self.log)
+
+ def on_test(self, **kwargs):
+ self.log.append(kwargs)
+
+ def test_no_tests_nothing_reported(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertEqual([], self.log)
+
+ def test_add_success(self):
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success')
+
+ def test_add_success_details(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addSuccess(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', details=details)
+
+ def test_tags(self):
+ if not getattr(self.result, 'tags', None):
+ self.skipTest("No tags in testtools")
+ self.result.tags(['foo'], [])
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', tags=set(['foo']))
+
+ def test_add_error(self):
+ self.result.startTest(self)
+ try:
+ 1/0
+ except ZeroDivisionError:
+ error = sys.exc_info()
+ self.result.addError(self, error)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='error',
+ details={'traceback': TracebackContent(error, self)})
+
+ def test_add_error_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addError(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='error', details=details)
+
+ def test_add_failure(self):
+ self.result.startTest(self)
+ try:
+ self.fail("intentional failure")
+ except self.failureException:
+ failure = sys.exc_info()
+ self.result.addFailure(self, failure)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='failure',
+ details={'traceback': TracebackContent(failure, self)})
+
+ def test_add_failure_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addFailure(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='failure', details=details)
+
+ def test_add_xfail(self):
+ self.result.startTest(self)
+ try:
+ 1/0
+ except ZeroDivisionError:
+ error = sys.exc_info()
+ self.result.addExpectedFailure(self, error)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='xfail',
+ details={'traceback': TracebackContent(error, self)})
+
+ def test_add_xfail_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addExpectedFailure(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='xfail', details=details)
+
+ def test_add_unexpected_success(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addUnexpectedSuccess(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', details=details)
+
+ def test_add_skip_reason(self):
+ self.result.startTest(self)
+ reason = self.getUniqueString()
+ self.result.addSkip(self, reason)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='skip', details={'reason': text_content(reason)})
+
+ def test_add_skip_details(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addSkip(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='skip', details=details)
+
+ def test_twice(self):
+ self.result.startTest(self)
+ self.result.addSuccess(self, details={'foo': 'bar'})
+ self.result.stopTest(self)
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertEqual(
+ [{'test': self,
+ 'status': 'success',
+ 'start_time': 0,
+ 'stop_time': 1,
+ 'tags': set(),
+ 'details': {'foo': 'bar'}},
+ {'test': self,
+ 'status': 'success',
+ 'start_time': 2,
+ 'stop_time': 3,
+ 'tags': set(),
+ 'details': None},
+ ],
+ self.log)
+
+
+class TestCsvResult(testtools.TestCase):
+
+ def parse_stream(self, stream):
+ stream.seek(0)
+ reader = csv.reader(stream)
+ return list(reader)
+
+ def test_csv_output(self):
+ stream = StringIO()
+ result = subunit.test_results.CsvResult(stream)
+ if sys.version_info >= (3, 0):
+ result._now = iter(range(5)).__next__
+ else:
+ result._now = iter(range(5)).next
+ result.startTestRun()
+ result.startTest(self)
+ result.addSuccess(self)
+ result.stopTest(self)
+ result.stopTestRun()
+ self.assertEqual(
+ [['test', 'status', 'start_time', 'stop_time'],
+ [self.id(), 'success', '0', '1'],
+ ],
+ self.parse_stream(stream))
+
+ def test_just_header_when_no_tests(self):
+ stream = StringIO()
+ result = subunit.test_results.CsvResult(stream)
+ result.startTestRun()
+ result.stopTestRun()
+ self.assertEqual(
+ [['test', 'status', 'start_time', 'stop_time']],
+ self.parse_stream(stream))
+
+ def test_no_output_before_events(self):
+ stream = StringIO()
+ subunit.test_results.CsvResult(stream)
+ self.assertEqual([], self.parse_stream(stream))
diff --git a/test/3rdparty/python-subunit-0.0.16/python/subunit/v2.py b/test/3rdparty/python-subunit-0.0.16/python/subunit/v2.py
new file mode 100644
index 00000000000..057f65c3bdd
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python/subunit/v2.py
@@ -0,0 +1,495 @@
+#
+# subunit: extensions to Python unittest to get test results from subprocesses.
+# Copyright (C) 2013 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+import codecs
+utf_8_decode = codecs.utf_8_decode
+import datetime
+from io import UnsupportedOperation
+import os
+import select
+import struct
+import zlib
+
+from extras import safe_hasattr, try_imports
+builtins = try_imports(['__builtin__', 'builtins'])
+
+import subunit
+import subunit.iso8601 as iso8601
+
+__all__ = [
+ 'ByteStreamToStreamResult',
+ 'StreamResultToBytes',
+ ]
+
+SIGNATURE = b'\xb3'
+FMT_8 = '>B'
+FMT_16 = '>H'
+FMT_24 = '>HB'
+FMT_32 = '>I'
+FMT_TIMESTAMP = '>II'
+FLAG_TEST_ID = 0x0800
+FLAG_ROUTE_CODE = 0x0400
+FLAG_TIMESTAMP = 0x0200
+FLAG_RUNNABLE = 0x0100
+FLAG_TAGS = 0x0080
+FLAG_MIME_TYPE = 0x0020
+FLAG_EOF = 0x0010
+FLAG_FILE_CONTENT = 0x0040
+EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=iso8601.Utc())
+NUL_ELEMENT = b'\0'[0]
+# Contains True for types for which 'nul in thing' falsely returns false.
+_nul_test_broken = {}
+
+
+def has_nul(buffer_or_bytes):
+ """Return True if a null byte is present in buffer_or_bytes."""
+ # Simple "if NUL_ELEMENT in utf8_bytes:" fails on Python 3.1 and 3.2 with
+ # memoryviews. See https://bugs.launchpad.net/subunit/+bug/1216246
+ buffer_type = type(buffer_or_bytes)
+ broken = _nul_test_broken.get(buffer_type)
+ if broken is None:
+ reference = buffer_type(b'\0')
+ broken = not NUL_ELEMENT in reference
+ _nul_test_broken[buffer_type] = broken
+ if broken:
+ return b'\0' in buffer_or_bytes
+ else:
+ return NUL_ELEMENT in buffer_or_bytes
+
+
+class ParseError(Exception):
+ """Used to pass error messages within the parser."""
+
+
+class StreamResultToBytes(object):
+ """Convert StreamResult API calls to bytes.
+
+ The StreamResult API is defined by testtools.StreamResult.
+ """
+
+ status_mask = {
+ None: 0,
+ 'exists': 0x1,
+ 'inprogress': 0x2,
+ 'success': 0x3,
+ 'uxsuccess': 0x4,
+ 'skip': 0x5,
+ 'fail': 0x6,
+ 'xfail': 0x7,
+ }
+
+ zero_b = b'\0'[0]
+
+ def __init__(self, output_stream):
+ """Create a StreamResultToBytes with output written to output_stream.
+
+ :param output_stream: A file-like object. Must support write(bytes)
+ and flush() methods. Flush will be called after each write.
+ The stream will be passed through subunit.make_stream_binary,
+ to handle regular cases such as stdout.
+ """
+ self.output_stream = subunit.make_stream_binary(output_stream)
+
+ def startTestRun(self):
+ pass
+
+ def stopTestRun(self):
+ pass
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ self._write_packet(test_id=test_id, test_status=test_status,
+ test_tags=test_tags, runnable=runnable, file_name=file_name,
+ file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+ route_code=route_code, timestamp=timestamp)
+
+ def _write_utf8(self, a_string, packet):
+ utf8 = a_string.encode('utf-8')
+ self._write_number(len(utf8), packet)
+ packet.append(utf8)
+
+ def _write_len16(self, length, packet):
+ assert length < 65536
+ packet.append(struct.pack(FMT_16, length))
+
+ def _write_number(self, value, packet):
+ packet.extend(self._encode_number(value))
+
+ def _encode_number(self, value):
+ assert value >= 0
+ if value < 64:
+ return [struct.pack(FMT_8, value)]
+ elif value < 16384:
+ value = value | 0x4000
+ return [struct.pack(FMT_16, value)]
+ elif value < 4194304:
+ value = value | 0x800000
+ return [struct.pack(FMT_16, value >> 8),
+ struct.pack(FMT_8, value & 0xff)]
+ elif value < 1073741824:
+ value = value | 0xc0000000
+ return [struct.pack(FMT_32, value)]
+ else:
+ raise ValueError('value too large to encode: %r' % (value,))
+
+ def _write_packet(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ packet = [SIGNATURE]
+ packet.append(b'FF') # placeholder for flags
+ # placeholder for length, but see below as length is variable.
+ packet.append(b'')
+ flags = 0x2000 # Version 0x2
+ if timestamp is not None:
+ flags = flags | FLAG_TIMESTAMP
+ since_epoch = timestamp - EPOCH
+ nanoseconds = since_epoch.microseconds * 1000
+ seconds = (since_epoch.seconds + since_epoch.days * 24 * 3600)
+ packet.append(struct.pack(FMT_32, seconds))
+ self._write_number(nanoseconds, packet)
+ if test_id is not None:
+ flags = flags | FLAG_TEST_ID
+ self._write_utf8(test_id, packet)
+ if test_tags:
+ flags = flags | FLAG_TAGS
+ self._write_number(len(test_tags), packet)
+ for tag in test_tags:
+ self._write_utf8(tag, packet)
+ if runnable:
+ flags = flags | FLAG_RUNNABLE
+ if mime_type:
+ flags = flags | FLAG_MIME_TYPE
+ self._write_utf8(mime_type, packet)
+ if file_name is not None:
+ flags = flags | FLAG_FILE_CONTENT
+ self._write_utf8(file_name, packet)
+ self._write_number(len(file_bytes), packet)
+ packet.append(file_bytes)
+ if eof:
+ flags = flags | FLAG_EOF
+ if route_code is not None:
+ flags = flags | FLAG_ROUTE_CODE
+ self._write_utf8(route_code, packet)
+ # 0x0008 - not used in v2.
+ flags = flags | self.status_mask[test_status]
+ packet[1] = struct.pack(FMT_16, flags)
+ base_length = sum(map(len, packet)) + 4
+ if base_length <= 62:
+ # one byte to encode length, 62+1 = 63
+ length_length = 1
+ elif base_length <= 16381:
+ # two bytes to encode length, 16381+2 = 16383
+ length_length = 2
+ elif base_length <= 4194300:
+ # three bytes to encode length, 419430+3=4194303
+ length_length = 3
+ else:
+ # Longer than policy:
+ # TODO: chunk the packet automatically?
+ # - strip all but file data
+ # - do 4M chunks of that till done
+ # - include original data in final chunk.
+ raise ValueError("Length too long: %r" % base_length)
+ packet[2:3] = self._encode_number(base_length + length_length)
+ # We could either do a partial application of crc32 over each chunk
+ # or a single join to a temp variable then a final join
+ # or two writes (that python might then split).
+ # For now, simplest code: join, crc32, join, output
+ content = b''.join(packet)
+ self.output_stream.write(content + struct.pack(
+ FMT_32, zlib.crc32(content) & 0xffffffff))
+ self.output_stream.flush()
+
+
+class ByteStreamToStreamResult(object):
+ """Parse a subunit byte stream.
+
+ Mixed streams that contain non-subunit content is supported when a
+ non_subunit_name is passed to the contructor. The default is to raise an
+ error containing the non-subunit byte after it has been read from the
+ stream.
+
+ Typical use:
+
+ >>> case = ByteStreamToStreamResult(sys.stdin.buffer)
+ >>> result = StreamResult()
+ >>> result.startTestRun()
+ >>> case.run(result)
+ >>> result.stopTestRun()
+ """
+
+ status_lookup = {
+ 0x0: None,
+ 0x1: 'exists',
+ 0x2: 'inprogress',
+ 0x3: 'success',
+ 0x4: 'uxsuccess',
+ 0x5: 'skip',
+ 0x6: 'fail',
+ 0x7: 'xfail',
+ }
+
+ def __init__(self, source, non_subunit_name=None):
+ """Create a ByteStreamToStreamResult.
+
+ :param source: A file like object to read bytes from. Must support
+ read(<count>) and return bytes. The file is not closed by
+ ByteStreamToStreamResult. subunit.make_stream_binary() is
+ called on the stream to get it into bytes mode.
+ :param non_subunit_name: If set to non-None, non subunit content
+ encountered in the stream will be converted into file packets
+ labelled with this name.
+ """
+ self.non_subunit_name = non_subunit_name
+ self.source = subunit.make_stream_binary(source)
+ self.codec = codecs.lookup('utf8').incrementaldecoder()
+
+ def run(self, result):
+ """Parse source and emit events to result.
+
+ This is a blocking call: it will run until EOF is detected on source.
+ """
+ self.codec.reset()
+ mid_character = False
+ while True:
+ # We're in blocking mode; read one char
+ content = self.source.read(1)
+ if not content:
+ # EOF
+ return
+ if not mid_character and content[0] == SIGNATURE[0]:
+ self._parse_packet(result)
+ continue
+ if self.non_subunit_name is None:
+ raise Exception("Non subunit content", content)
+ try:
+ if self.codec.decode(content):
+ # End of a character
+ mid_character = False
+ else:
+ mid_character = True
+ except UnicodeDecodeError:
+ # Bad unicode, not our concern.
+ mid_character = False
+ # Aggregate all content that is not subunit until either
+ # 1MiB is accumulated or 50ms has passed with no input.
+ # Both are arbitrary amounts intended to give a simple
+ # balance between efficiency (avoiding death by a thousand
+ # one-byte packets), buffering (avoiding overlarge state
+ # being hidden on intermediary nodes) and interactivity
+ # (when driving a debugger, slow response to typing is
+ # annoying).
+ buffered = [content]
+ while len(buffered[-1]):
+ try:
+ self.source.fileno()
+ except:
+ # Won't be able to select, fallback to
+ # one-byte-at-a-time.
+ break
+ # Note: this has a very low timeout because with stdin, the
+ # BufferedIO layer typically has all the content available
+ # from the stream when e.g. pdb is dropped into, leading to
+ # select always timing out when in fact we could have read
+ # (from the buffer layer) - we typically fail to aggregate
+ # any content on 3.x Pythons.
+ readable = select.select([self.source], [], [], 0.000001)[0]
+ if readable:
+ content = self.source.read(1)
+ if not len(content):
+ # EOF - break and emit buffered.
+ break
+ if not mid_character and content[0] == SIGNATURE[0]:
+ # New packet, break, emit buffered, then parse.
+ break
+ buffered.append(content)
+ # Feed into the codec.
+ try:
+ if self.codec.decode(content):
+ # End of a character
+ mid_character = False
+ else:
+ mid_character = True
+ except UnicodeDecodeError:
+ # Bad unicode, not our concern.
+ mid_character = False
+ if not readable or len(buffered) >= 1048576:
+ # timeout or too much data, emit what we have.
+ break
+ result.status(
+ file_name=self.non_subunit_name,
+ file_bytes=b''.join(buffered))
+ if mid_character or not len(content) or content[0] != SIGNATURE[0]:
+ continue
+ # Otherwise, parse a data packet.
+ self._parse_packet(result)
+
+ def _parse_packet(self, result):
+ try:
+ packet = [SIGNATURE]
+ self._parse(packet, result)
+ except ParseError as error:
+ result.status(test_id="subunit.parser", eof=True,
+ file_name="Packet data", file_bytes=b''.join(packet),
+ mime_type="application/octet-stream")
+ result.status(test_id="subunit.parser", test_status='fail',
+ eof=True, file_name="Parser Error",
+ file_bytes=(error.args[0]).encode('utf8'),
+ mime_type="text/plain;charset=utf8")
+
+ def _to_bytes(self, data, pos, length):
+ """Return a slice of data from pos for length as bytes."""
+ # memoryview in 2.7.3 and 3.2 isn't directly usable with struct :(.
+ # see https://bugs.launchpad.net/subunit/+bug/1216163
+ result = data[pos:pos+length]
+ if type(result) is not bytes:
+ return result.tobytes()
+ return result
+
+ def _parse_varint(self, data, pos, max_3_bytes=False):
+ # because the only incremental IO we do is at the start, and the 32 bit
+ # CRC means we can always safely read enough to cover any varint, we
+ # can be sure that there should be enough data - and if not it is an
+ # error not a normal situation.
+ data_0 = struct.unpack(FMT_8, self._to_bytes(data, pos, 1))[0]
+ typeenum = data_0 & 0xc0
+ value_0 = data_0 & 0x3f
+ if typeenum == 0x00:
+ return value_0, 1
+ elif typeenum == 0x40:
+ data_1 = struct.unpack(FMT_8, self._to_bytes(data, pos+1, 1))[0]
+ return (value_0 << 8) | data_1, 2
+ elif typeenum == 0x80:
+ data_1 = struct.unpack(FMT_16, self._to_bytes(data, pos+1, 2))[0]
+ return (value_0 << 16) | data_1, 3
+ else:
+ if max_3_bytes:
+ raise ParseError('3 byte maximum given but 4 byte value found.')
+ data_1, data_2 = struct.unpack(FMT_24, self._to_bytes(data, pos+1, 3))
+ result = (value_0 << 24) | data_1 << 8 | data_2
+ return result, 4
+
+ def _parse(self, packet, result):
+ # 2 bytes flags, at most 3 bytes length.
+ packet.append(self.source.read(5))
+ flags = struct.unpack(FMT_16, packet[-1][:2])[0]
+ length, consumed = self._parse_varint(
+ packet[-1], 2, max_3_bytes=True)
+ remainder = self.source.read(length - 6)
+ if len(remainder) != length - 6:
+ raise ParseError(
+ 'Short read - got %d bytes, wanted %d bytes' % (
+ len(remainder), length - 6))
+ if consumed != 3:
+ # Avoid having to parse torn values
+ packet[-1] += remainder
+ pos = 2 + consumed
+ else:
+ # Avoid copying potentially lots of data.
+ packet.append(remainder)
+ pos = 0
+ crc = zlib.crc32(packet[0])
+ for fragment in packet[1:-1]:
+ crc = zlib.crc32(fragment, crc)
+ crc = zlib.crc32(packet[-1][:-4], crc) & 0xffffffff
+ packet_crc = struct.unpack(FMT_32, packet[-1][-4:])[0]
+ if crc != packet_crc:
+ # Bad CRC, report it and stop parsing the packet.
+ raise ParseError(
+ 'Bad checksum - calculated (0x%x), stored (0x%x)'
+ % (crc, packet_crc))
+ if safe_hasattr(builtins, 'memoryview'):
+ body = memoryview(packet[-1])
+ else:
+ body = packet[-1]
+ # Discard CRC-32
+ body = body[:-4]
+ # One packet could have both file and status data; the Python API
+ # presents these separately (perhaps it shouldn't?)
+ if flags & FLAG_TIMESTAMP:
+ seconds = struct.unpack(FMT_32, self._to_bytes(body, pos, 4))[0]
+ nanoseconds, consumed = self._parse_varint(body, pos+4)
+ pos = pos + 4 + consumed
+ timestamp = EPOCH + datetime.timedelta(
+ seconds=seconds, microseconds=nanoseconds/1000)
+ else:
+ timestamp = None
+ if flags & FLAG_TEST_ID:
+ test_id, pos = self._read_utf8(body, pos)
+ else:
+ test_id = None
+ if flags & FLAG_TAGS:
+ tag_count, consumed = self._parse_varint(body, pos)
+ pos += consumed
+ test_tags = set()
+ for _ in range(tag_count):
+ tag, pos = self._read_utf8(body, pos)
+ test_tags.add(tag)
+ else:
+ test_tags = None
+ if flags & FLAG_MIME_TYPE:
+ mime_type, pos = self._read_utf8(body, pos)
+ else:
+ mime_type = None
+ if flags & FLAG_FILE_CONTENT:
+ file_name, pos = self._read_utf8(body, pos)
+ content_length, consumed = self._parse_varint(body, pos)
+ pos += consumed
+ file_bytes = self._to_bytes(body, pos, content_length)
+ if len(file_bytes) != content_length:
+ raise ParseError('File content extends past end of packet: '
+ 'claimed %d bytes, %d available' % (
+ content_length, len(file_bytes)))
+ pos += content_length
+ else:
+ file_name = None
+ file_bytes = None
+ if flags & FLAG_ROUTE_CODE:
+ route_code, pos = self._read_utf8(body, pos)
+ else:
+ route_code = None
+ runnable = bool(flags & FLAG_RUNNABLE)
+ eof = bool(flags & FLAG_EOF)
+ test_status = self.status_lookup[flags & 0x0007]
+ result.status(test_id=test_id, test_status=test_status,
+ test_tags=test_tags, runnable=runnable, mime_type=mime_type,
+ eof=eof, file_name=file_name, file_bytes=file_bytes,
+ route_code=route_code, timestamp=timestamp)
+ __call__ = run
+
+ def _read_utf8(self, buf, pos):
+ length, consumed = self._parse_varint(buf, pos)
+ pos += consumed
+ utf8_bytes = buf[pos:pos+length]
+ if length != len(utf8_bytes):
+ raise ParseError(
+ 'UTF8 string at offset %d extends past end of packet: '
+ 'claimed %d bytes, %d available' % (pos - 2, length,
+ len(utf8_bytes)))
+ if has_nul(utf8_bytes):
+ raise ParseError('UTF8 string at offset %d contains NUL byte' % (
+ pos-2,))
+ try:
+ utf8, decoded_bytes = utf_8_decode(utf8_bytes)
+ if decoded_bytes != length:
+ raise ParseError("Invalid (partially decodable) string at "
+ "offset %d, %d undecoded bytes" % (
+ pos-2, length - decoded_bytes))
+ return utf8, length+pos
+ except UnicodeDecodeError:
+ raise ParseError('UTF8 string at offset %d is not UTF8' % (pos-2,))
+
diff --git a/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/PKG-INFO b/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/PKG-INFO
new file mode 100644
index 00000000000..de79389b594
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/PKG-INFO
@@ -0,0 +1,483 @@
+Metadata-Version: 1.0
+Name: python-subunit
+Version: 0.0.16
+Summary: Python implementation of subunit test streaming protocol
+Home-page: http://launchpad.net/subunit
+Author: Robert Collins
+Author-email: subunit-dev@lists.launchpad.net
+License: UNKNOWN
+Description:
+ subunit: A streaming protocol for test results
+ Copyright (C) 2005-2013 Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+ See the COPYING file for full details on the licensing of Subunit.
+
+ subunit reuses iso8601 by Michael Twomey, distributed under an MIT style
+ licence - see python/iso8601/LICENSE for details.
+
+ Subunit
+ -------
+
+ Subunit is a streaming protocol for test results.
+
+ There are two major revisions of the protocol. Version 1 was trivially human
+ readable but had significant defects as far as highly parallel testing was
+ concerned - it had no room for doing discovery and execution in parallel,
+ required substantial buffering when multiplexing and was fragile - a corrupt
+ byte could cause an entire stream to be misparsed. Version 1.1 added
+ encapsulation of binary streams which mitigated some of the issues but the
+ core remained.
+
+ Version 2 shares many of the good characteristics of Version 1 - it can be
+ embedded into a regular text stream (e.g. from a build system) and it still
+ models xUnit style test execution. It also fixes many of the issues with
+ Version 1 - Version 2 can be multiplexed without excessive buffering (in
+ time or space), it has a well defined recovery mechanism for dealing with
+ corrupted streams (e.g. where two processes write to the same stream
+ concurrently, or where the stream generator suffers a bug).
+
+ More details on both protocol version s can be found in the 'Protocol' section
+ of this document.
+
+ Subunit comes with command line filters to process a subunit stream and
+ language bindings for python, C, C++ and shell. Bindings are easy to write
+ for other languages.
+
+ A number of useful things can be done easily with subunit:
+ * Test aggregation: Tests run separately can be combined and then
+ reported/displayed together. For instance, tests from different languages
+ can be shown as a seamless whole, and tests running on multiple machines
+ can be aggregated into a single stream through a multiplexer.
+ * Test archiving: A test run may be recorded and replayed later.
+ * Test isolation: Tests that may crash or otherwise interact badly with each
+ other can be run seperately and then aggregated, rather than interfering
+ with each other or requiring an adhoc test->runner reporting protocol.
+ * Grid testing: subunit can act as the necessary serialisation and
+ deserialiation to get test runs on distributed machines to be reported in
+ real time.
+
+ Subunit supplies the following filters:
+ * tap2subunit - convert perl's TestAnythingProtocol to subunit.
+ * subunit2csv - convert a subunit stream to csv.
+ * subunit2pyunit - convert a subunit stream to pyunit test results.
+ * subunit2gtk - show a subunit stream in GTK.
+ * subunit2junitxml - convert a subunit stream to JUnit's XML format.
+ * subunit-diff - compare two subunit streams.
+ * subunit-filter - filter out tests from a subunit stream.
+ * subunit-ls - list info about tests present in a subunit stream.
+ * subunit-stats - generate a summary of a subunit stream.
+ * subunit-tags - add or remove tags from a stream.
+
+ Integration with other tools
+ ----------------------------
+
+ Subunit's language bindings act as integration with various test runners like
+ 'check', 'cppunit', Python's 'unittest'. Beyond that a small amount of glue
+ (typically a few lines) will allow Subunit to be used in more sophisticated
+ ways.
+
+ Python
+ ======
+
+ Subunit has excellent Python support: most of the filters and tools are written
+ in python and there are facilities for using Subunit to increase test isolation
+ seamlessly within a test suite.
+
+ The most common way is to run an existing python test suite and have it output
+ subunit via the ``subunit.run`` module::
+
+ $ python -m subunit.run mypackage.tests.test_suite
+
+ For more information on the Python support Subunit offers , please see
+ ``pydoc subunit``, or the source in ``python/subunit/``
+
+ C
+ =
+
+ Subunit has C bindings to emit the protocol. The 'check' C unit testing project
+ has included subunit support in their project for some years now. See
+ 'c/README' for more details.
+
+ C++
+ ===
+
+ The C library is includable and usable directly from C++. A TestListener for
+ CPPUnit is included in the Subunit distribution. See 'c++/README' for details.
+
+ shell
+ =====
+
+ There are two sets of shell tools. There are filters, which accept a subunit
+ stream on stdin and output processed data (or a transformed stream) on stdout.
+
+ Then there are unittest facilities similar to those for C : shell bindings
+ consisting of simple functions to output protocol elements, and a patch for
+ adding subunit output to the 'ShUnit' shell test runner. See 'shell/README' for
+ details.
+
+ Filter recipes
+ --------------
+
+ To ignore some failing tests whose root cause is already known::
+
+ subunit-filter --without 'AttributeError.*flavor'
+
+
+ The xUnit test model
+ --------------------
+
+ Subunit implements a slightly modified xUnit test model. The stock standard
+ model is that there are tests, which have an id(), can be run, and when run
+ start, emit an outcome (like success or failure) and then finish.
+
+ Subunit extends this with the idea of test enumeration (find out about tests
+ a runner has without running them), tags (allow users to describe tests in
+ ways the test framework doesn't apply any semantic value to), file attachments
+ (allow arbitrary data to make analysing a failure easy) and timestamps.
+
+ The protocol
+ ------------
+
+ Version 2, or v2 is new and still under development, but is intended to
+ supercede version 1 in the very near future. Subunit's bundled tools accept
+ only version 2 and only emit version 2, but the new filters subunit-1to2 and
+ subunit-2to1 can be used to interoperate with older third party libraries.
+
+ Version 2
+ =========
+
+ Version 2 is a binary protocol consisting of independent packets that can be
+ embedded in the output from tools like make - as long as each packet has no
+ other bytes mixed in with it (which 'make -j N>1' has a tendency of doing).
+ Version 2 is currently in draft form, and early adopters should be willing
+ to either discard stored results (if protocol changes are made), or bulk
+ convert them back to v1 and then to a newer edition of v2.
+
+ The protocol synchronises at the start of the stream, after a packet, or
+ after any 0x0A byte. That is, a subunit v2 packet starts after a newline or
+ directly after the end of the prior packet.
+
+ Subunit is intended to be transported over a reliable streaming protocol such
+ as TCP. As such it does not concern itself with out of order delivery of
+ packets. However, because of the possibility of corruption due to either
+ bugs in the sender, or due to mixed up data from concurrent writes to the same
+ fd when being embedded, subunit strives to recover reasonably gracefully from
+ damaged data.
+
+ A key design goal for Subunit version 2 is to allow processing and multiplexing
+ without forcing buffering for semantic correctness, as buffering tends to hide
+ hung or otherwise misbehaving tests. That said, limited time based buffering
+ for network efficiency is a good idea - this is ultimately implementator
+ choice. Line buffering is also discouraged for subunit streams, as dropping
+ into a debugger or other tool may require interactive traffic even if line
+ buffering would not otherwise be a problem.
+
+ In version two there are two conceptual events - a test status event and a file
+ attachment event. Events may have timestamps, and the path of multiplexers that
+ an event is routed through is recorded to permit sending actions back to the
+ source (such as new tests to run or stdin for driving debuggers and other
+ interactive input). Test status events are used to enumerate tests, to report
+ tests and test helpers as they run. Tests may have tags, used to allow
+ tunnelling extra meanings through subunit without requiring parsing of
+ arbitrary file attachments. Things that are not standalone tests get marked
+ as such by setting the 'Runnable' flag to false. (For instance, individual
+ assertions in TAP are not runnable tests, only the top level TAP test script
+ is runnable).
+
+ File attachments are used to provide rich detail about the nature of a failure.
+ File attachments can also be used to encapsulate stdout and stderr both during
+ and outside tests.
+
+ Most numbers are stored in network byte order - Most Significant Byte first
+ encoded using a variation of http://www.dlugosz.com/ZIP2/VLI.html. The first
+ byte's top 2 high order bits encode the total number of octets in the number.
+ This encoding can encode values from 0 to 2**30-1, enough to encode a
+ nanosecond. Numbers that are not variable length encoded are still stored in
+ MSB order.
+
+ prefix octets max max
+ +-------+--------+---------+------------+
+ | 00 | 1 | 2**6-1 | 63 |
+ | 01 | 2 | 2**14-1 | 16383 |
+ | 10 | 3 | 2**22-1 | 4194303 |
+ | 11 | 4 | 2**30-1 | 1073741823 |
+ +-------+--------+---------+------------+
+
+ All variable length elements of the packet are stored with a length prefix
+ number allowing them to be skipped over for consumers that don't need to
+ interpret them.
+
+ UTF-8 strings are with no terminating NUL and should not have any embedded NULs
+ (implementations SHOULD validate any such strings that they process and take
+ some remedial action (such as discarding the packet as corrupt).
+
+ In short the structure of a packet is:
+ PACKET := SIGNATURE FLAGS PACKET_LENGTH TIMESTAMP? TESTID? TAGS? MIME?
+ FILECONTENT? ROUTING_CODE? CRC32
+
+ In more detail...
+
+ Packets are identified by a single byte signature - 0xB3, which is never legal
+ in a UTF-8 stream as the first byte of a character. 0xB3 starts with the first
+ bit set and the second not, which is the UTF-8 signature for a continuation
+ byte. 0xB3 was chosen as 0x73 ('s' in ASCII') with the top two bits replaced by
+ the 1 and 0 for a continuation byte.
+
+ If subunit packets are being embedded in a non-UTF-8 text stream, where 0x73 is
+ a legal character, consider either recoding the text to UTF-8, or using
+ subunit's 'file' packets to embed the text stream in subunit, rather than the
+ other way around.
+
+ Following the signature byte comes a 16-bit flags field, which includes a
+ 4-bit version field - if the version is not 0x2 then the packet cannot be
+ read. It is recommended to signal an error at this point (e.g. by emitting
+ a synthetic error packet and returning to the top level loop to look for
+ new packets, or exiting with an error). If recovery is desired, treat the
+ packet signature as an opaque byte and scan for a new synchronisation point.
+ NB: Subunit V1 and V2 packets may legitimately included 0xB3 internally,
+ as they are an 8-bit safe container format, so recovery from this situation
+ may involve an arbitrary number of false positives until an actual packet
+ is encountered : and even then it may still be false, failing after passing
+ the version check due to coincidence.
+
+ Flags are stored in network byte order too.
+ +-------------------------+------------------------+
+ | High byte | Low byte |
+ | 15 14 13 12 11 10 9 8 | 7 6 5 4 3 2 1 0 |
+ | VERSION |feature bits| |
+ +------------+------------+------------------------+
+
+ Valid version values are:
+ 0x2 - version 2
+
+ Feature bits:
+ Bit 11 - mask 0x0800 - Test id present.
+ Bit 10 - mask 0x0400 - Routing code present.
+ Bit 9 - mask 0x0200 - Timestamp present.
+ Bit 8 - mask 0x0100 - Test is 'runnable'.
+ Bit 7 - mask 0x0080 - Tags are present.
+ Bit 6 - mask 0x0040 - File content is present.
+ Bit 5 - mask 0x0020 - File MIME type is present.
+ Bit 4 - mask 0x0010 - EOF marker.
+ Bit 3 - mask 0x0008 - Must be zero in version 2.
+
+ Test status gets three bits:
+ Bit 2 | Bit 1 | Bit 0 - mask 0x0007 - A test status enum lookup:
+ 000 - undefined / no test
+ 001 - Enumeration / existence
+ 002 - In progress
+ 003 - Success
+ 004 - Unexpected Success
+ 005 - Skipped
+ 006 - Failed
+ 007 - Expected failure
+
+ After the flags field is a number field giving the length in bytes for the
+ entire packet including the signature and the checksum. This length must
+ be less than 4MiB - 4194303 bytes. The encoding can obviously record a larger
+ number but one of the goals is to avoid requiring large buffers, or causing
+ large latency in the packet forward/processing pipeline. Larger file
+ attachments can be communicated in multiple packets, and the overhead in such a
+ 4MiB packet is approximately 0.2%.
+
+ The rest of the packet is a series of optional features as specified by the set
+ feature bits in the flags field. When absent they are entirely absent.
+
+ Forwarding and multiplexing of packets can be done without interpreting the
+ remainder of the packet until the routing code and checksum (which are both at
+ the end of the packet). Additionally, routers can often avoid copying or moving
+ the bulk of the packet, as long as the routing code size increase doesn't force
+ the length encoding to take up a new byte (which will only happen to packets
+ less than or equal to 16KiB in length) - large packets are very efficient to
+ route.
+
+ Timestamp when present is a 32 bit unsigned integer for secnods, and a variable
+ length number for nanoseconds, representing UTC time since Unix Epoch in
+ seconds and nanoseconds.
+
+ Test id when present is a UTF-8 string. The test id should uniquely identify
+ runnable tests such that they can be selected individually. For tests and other
+ actions which cannot be individually run (such as test
+ fixtures/layers/subtests) uniqueness is not required (though being human
+ meaningful is highly recommended).
+
+ Tags when present is a length prefixed vector of UTF-8 strings, one per tag.
+ There are no restrictions on tag content (other than the restrictions on UTF-8
+ strings in subunit in general). Tags have no ordering.
+
+ When a MIME type is present, it defines the MIME type for the file across all
+ packets same file (routing code + testid + name uniquely identifies a file,
+ reset when EOF is flagged). If a file never has a MIME type set, it should be
+ treated as application/octet-stream.
+
+ File content when present is a UTF-8 string for the name followed by the length
+ in bytes of the content, and then the content octets.
+
+ If present routing code is a UTF-8 string. The routing code is used to
+ determine which test backend a test was running on when doing data analysis,
+ and to route stdin to the test process if interaction is required.
+
+ Multiplexers SHOULD add a routing code if none is present, and prefix any
+ existing routing code with a routing code ('/' separated) if one is already
+ present. For example, a multiplexer might label each stream it is multiplexing
+ with a simple ordinal ('0', '1' etc), and given an incoming packet with route
+ code '3' from stream '0' would adjust the route code when forwarding the packet
+ to be '0/3'.
+
+ Following the end of the packet is a CRC-32 checksum of the contents of the
+ packet including the signature.
+
+ Example packets
+ ~~~~~~~~~~~~~~~
+
+ Trivial test "foo" enumeration packet, with test id, runnable set,
+ status=enumeration. Spaces below are to visually break up signature / flags /
+ length / testid / crc32
+
+ b3 2901 0c 03666f6f 08555f1b
+
+
+ Version 1 (and 1.1)
+ ===================
+
+ Version 1 (and 1.1) are mostly human readable protocols.
+
+ Sample subunit wire contents
+ ----------------------------
+
+ The following::
+ test: test foo works
+ success: test foo works.
+ test: tar a file.
+ failure: tar a file. [
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+ ]
+ a writeln to stdout
+
+ When run through subunit2pyunit::
+ .F
+ a writeln to stdout
+
+ ========================
+ FAILURE: tar a file.
+ -------------------
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+
+
+ Subunit protocol description
+ ============================
+
+ This description is being ported to an EBNF style. Currently its only partly in
+ that style, but should be fairly clear all the same. When in doubt, refer the
+ source (and ideally help fix up the description!). Generally the protocol is
+ line orientated and consists of either directives and their parameters, or
+ when outside a DETAILS region unexpected lines which are not interpreted by
+ the parser - they should be forwarded unaltered.
+
+ test|testing|test:|testing: test LABEL
+ success|success:|successful|successful: test LABEL
+ success|success:|successful|successful: test LABEL DETAILS
+ failure: test LABEL
+ failure: test LABEL DETAILS
+ error: test LABEL
+ error: test LABEL DETAILS
+ skip[:] test LABEL
+ skip[:] test LABEL DETAILS
+ xfail[:] test LABEL
+ xfail[:] test LABEL DETAILS
+ uxsuccess[:] test LABEL
+ uxsuccess[:] test LABEL DETAILS
+ progress: [+|-]X
+ progress: push
+ progress: pop
+ tags: [-]TAG ...
+ time: YYYY-MM-DD HH:MM:SSZ
+
+ LABEL: UTF8*
+ NAME: UTF8*
+ DETAILS ::= BRACKETED | MULTIPART
+ BRACKETED ::= '[' CR UTF8-lines ']' CR
+ MULTIPART ::= '[ multipart' CR PART* ']' CR
+ PART ::= PART_TYPE CR NAME CR PART_BYTES CR
+ PART_TYPE ::= Content-Type: type/sub-type(;parameter=value,parameter=value)
+ PART_BYTES ::= (DIGITS CR LF BYTE{DIGITS})* '0' CR LF
+
+ unexpected output on stdout -> stdout.
+ exit w/0 or last test completing -> error
+
+ Tags given outside a test are applied to all following tests
+ Tags given after a test: line and before the result line for the same test
+ apply only to that test, and inherit the current global tags.
+ A '-' before a tag is used to remove tags - e.g. to prevent a global tag
+ applying to a single test, or to cancel a global tag.
+
+ The progress directive is used to provide progress information about a stream
+ so that stream consumer can provide completion estimates, progress bars and so
+ on. Stream generators that know how many tests will be present in the stream
+ should output "progress: COUNT". Stream filters that add tests should output
+ "progress: +COUNT", and those that remove tests should output
+ "progress: -COUNT". An absolute count should reset the progress indicators in
+ use - it indicates that two separate streams from different generators have
+ been trivially concatenated together, and there is no knowledge of how many
+ more complete streams are incoming. Smart concatenation could scan each stream
+ for their count and sum them, or alternatively translate absolute counts into
+ relative counts inline. It is recommended that outputters avoid absolute counts
+ unless necessary. The push and pop directives are used to provide local regions
+ for progress reporting. This fits with hierarchically operating test
+ environments - such as those that organise tests into suites - the top-most
+ runner can report on the number of suites, and each suite surround its output
+ with a (push, pop) pair. Interpreters should interpret a pop as also advancing
+ the progress of the restored level by one step. Encountering progress
+ directives between the start and end of a test pair indicates that a previous
+ test was interrupted and did not cleanly terminate: it should be implicitly
+ closed with an error (the same as when a stream ends with no closing test
+ directive for the most recently started test).
+
+ The time directive acts as a clock event - it sets the time for all future
+ events. The value should be a valid ISO8601 time.
+
+ The skip, xfail and uxsuccess outcomes are not supported by all testing
+ environments. In Python the testttools (https://launchpad.net/testtools)
+ library is used to translate these automatically if an older Python version
+ that does not support them is in use. See the testtools documentation for the
+ translation policy.
+
+ skip is used to indicate a test was discovered but not executed. xfail is used
+ to indicate a test that errored in some expected fashion (also know as "TODO"
+ tests in some frameworks). uxsuccess is used to indicate and unexpected success
+ where a test though to be failing actually passes. It is complementary to
+ xfail.
+
+ Hacking on subunit
+ ------------------
+
+ Releases
+ ========
+
+ * Update versions in configure.ac and python/subunit/__init__.py.
+ * Make PyPI and regular tarball releases. Upload the regular one to LP, the
+ PyPI one to PyPI.
+ * Push a tagged commit.
+
+
+Keywords: python test streaming
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Testing
diff --git a/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/SOURCES.txt b/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/SOURCES.txt
new file mode 100644
index 00000000000..59a1de8f12c
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/SOURCES.txt
@@ -0,0 +1,44 @@
+MANIFEST.in
+NEWS
+README
+setup.py
+filters/subunit-1to2
+filters/subunit-2to1
+filters/subunit-filter
+filters/subunit-ls
+filters/subunit-notify
+filters/subunit-stats
+filters/subunit-tags
+filters/subunit2gtk
+filters/subunit2junitxml
+filters/subunit2pyunit
+filters/tap2subunit
+python/subunit/__init__.py
+python/subunit/chunked.py
+python/subunit/details.py
+python/subunit/filters.py
+python/subunit/iso8601.py
+python/subunit/progress_model.py
+python/subunit/run.py
+python/subunit/test_results.py
+python/subunit/v2.py
+python/subunit/tests/__init__.py
+python/subunit/tests/sample-script.py
+python/subunit/tests/sample-two-script.py
+python/subunit/tests/test_chunked.py
+python/subunit/tests/test_details.py
+python/subunit/tests/test_filters.py
+python/subunit/tests/test_progress_model.py
+python/subunit/tests/test_run.py
+python/subunit/tests/test_subunit_filter.py
+python/subunit/tests/test_subunit_stats.py
+python/subunit/tests/test_subunit_tags.py
+python/subunit/tests/test_tap2subunit.py
+python/subunit/tests/test_test_protocol.py
+python/subunit/tests/test_test_protocol2.py
+python/subunit/tests/test_test_results.py
+python_subunit.egg-info/PKG-INFO
+python_subunit.egg-info/SOURCES.txt
+python_subunit.egg-info/dependency_links.txt
+python_subunit.egg-info/requires.txt
+python_subunit.egg-info/top_level.txt \ No newline at end of file
diff --git a/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/dependency_links.txt b/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/dependency_links.txt
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/requires.txt b/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/requires.txt
new file mode 100644
index 00000000000..865fcc9f52e
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/requires.txt
@@ -0,0 +1,2 @@
+extras
+testtools>=0.9.34 \ No newline at end of file
diff --git a/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/top_level.txt b/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/top_level.txt
new file mode 100644
index 00000000000..d12b7b93e10
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/python_subunit.egg-info/top_level.txt
@@ -0,0 +1 @@
+subunit
diff --git a/test/3rdparty/python-subunit-0.0.16/setup.cfg b/test/3rdparty/python-subunit-0.0.16/setup.cfg
new file mode 100644
index 00000000000..861a9f55426
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/test/3rdparty/python-subunit-0.0.16/setup.py b/test/3rdparty/python-subunit-0.0.16/setup.py
new file mode 100755
index 00000000000..9917977556e
--- /dev/null
+++ b/test/3rdparty/python-subunit-0.0.16/setup.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+try:
+ # If the user has setuptools / distribute installed, use it
+ from setuptools import setup
+except ImportError:
+ # Otherwise, fall back to distutils.
+ from distutils.core import setup
+ extra = {}
+else:
+ extra = {
+ 'install_requires': [
+ 'extras',
+ 'testtools>=0.9.34',
+ ]
+ }
+
+
+def _get_version_from_file(filename, start_of_line, split_marker):
+ """Extract version from file, giving last matching value or None"""
+ try:
+ return [x for x in open(filename)
+ if x.startswith(start_of_line)][-1].split(split_marker)[1].strip()
+ except (IOError, IndexError):
+ return None
+
+
+VERSION = (
+ # Assume we are in a distribution, which has PKG-INFO
+ _get_version_from_file('PKG-INFO', 'Version:', ':')
+ # Must be a development checkout, so use the Makefile
+ or _get_version_from_file('Makefile', 'VERSION', '=')
+ or "0.0")
+
+
+setup(
+ name='python-subunit',
+ version=VERSION,
+ description=('Python implementation of subunit test streaming protocol'),
+ long_description=open('README').read(),
+ classifiers=[
+ 'Intended Audience :: Developers',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python',
+ 'Topic :: Software Development :: Testing',
+ ],
+ keywords='python test streaming',
+ author='Robert Collins',
+ author_email='subunit-dev@lists.launchpad.net',
+ url='http://launchpad.net/subunit',
+ packages=['subunit', 'subunit.tests'],
+ package_dir={'subunit': 'python/subunit'},
+ scripts = [
+ 'filters/subunit-1to2',
+ 'filters/subunit-2to1',
+ 'filters/subunit2gtk',
+ 'filters/subunit2junitxml',
+ 'filters/subunit2pyunit',
+ 'filters/subunit-filter',
+ 'filters/subunit-ls',
+ 'filters/subunit-notify',
+ 'filters/subunit-stats',
+ 'filters/subunit-tags',
+ 'filters/tap2subunit',
+ ],
+ **extra
+)
diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/scenarios.py b/test/3rdparty/testscenarios-0.2/lib/testscenarios/scenarios.py
deleted file mode 100644
index e531b2e0da1..00000000000
--- a/test/3rdparty/testscenarios-0.2/lib/testscenarios/scenarios.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# testscenarios: extensions to python unittest to allow declarative
-# dependency injection ('scenarios') by tests.
-#
-# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-
-__all__ = [
- 'apply_scenario',
- 'apply_scenarios',
- 'generate_scenarios',
- ]
-
-import unittest
-
-from testtools.testcase import clone_test_with_new_id
-from testtools import iterate_tests
-
-
-def apply_scenario((name, parameters), test):
- """Apply scenario to test.
-
- :param scenario: A tuple (name, parameters) to apply to the test. The test
- is cloned, its id adjusted to have (name) after it, and the parameters
- dict is used to update the new test.
- :param test: The test to apply the scenario to. This test is unaltered.
- :return: A new test cloned from test, with the scenario applied.
- """
- scenario_suffix = '(' + name + ')'
- newtest = clone_test_with_new_id(test,
- test.id() + scenario_suffix)
- test_desc = test.shortDescription()
- if test_desc is not None:
- newtest_desc = "%(test_desc)s %(scenario_suffix)s" % vars()
- newtest.shortDescription = (lambda: newtest_desc)
- for key, value in parameters.iteritems():
- setattr(newtest, key, value)
- return newtest
-
-
-def apply_scenarios(scenarios, test):
- """Apply many scenarios to a test.
-
- :param scenarios: An iterable of scenarios.
- :param test: A test to apply the scenarios to.
- :return: A generator of tests.
- """
- for scenario in scenarios:
- yield apply_scenario(scenario, test)
-
-
-def generate_scenarios(test_or_suite):
- """Yield the tests in test_or_suite with scenario multiplication done.
-
- TestCase objects with no scenarios specified are yielded unaltered. Tests
- with scenarios are not yielded at all, instead the results of multiplying
- them by the scenarios they specified gets yielded.
-
- :param test_or_suite: A TestCase or TestSuite.
- :return: A generator of tests - objects satisfying the TestCase protocol.
- """
- for test in iterate_tests(test_or_suite):
- scenarios = getattr(test, 'scenarios', None)
- if scenarios:
- for newtest in apply_scenarios(scenarios, test):
- newtest.scenarios = None
- yield newtest
- else:
- yield test
diff --git a/test/3rdparty/testscenarios-0.2/.bzrignore b/test/3rdparty/testscenarios-0.4/.bzrignore
index 336aaca369d..336aaca369d 100644
--- a/test/3rdparty/testscenarios-0.2/.bzrignore
+++ b/test/3rdparty/testscenarios-0.4/.bzrignore
diff --git a/test/3rdparty/testscenarios-0.2/Apache-2.0 b/test/3rdparty/testscenarios-0.4/Apache-2.0
index d6456956733..d6456956733 100644
--- a/test/3rdparty/testscenarios-0.2/Apache-2.0
+++ b/test/3rdparty/testscenarios-0.4/Apache-2.0
diff --git a/test/3rdparty/testscenarios-0.2/BSD b/test/3rdparty/testscenarios-0.4/BSD
index 0e75db647b3..0e75db647b3 100644
--- a/test/3rdparty/testscenarios-0.2/BSD
+++ b/test/3rdparty/testscenarios-0.4/BSD
diff --git a/test/3rdparty/testscenarios-0.2/COPYING b/test/3rdparty/testscenarios-0.4/COPYING
index ee16c4ecaf9..ee16c4ecaf9 100644
--- a/test/3rdparty/testscenarios-0.2/COPYING
+++ b/test/3rdparty/testscenarios-0.4/COPYING
diff --git a/test/3rdparty/testscenarios-0.2/GOALS b/test/3rdparty/testscenarios-0.4/GOALS
index 68be00129b2..68be00129b2 100644
--- a/test/3rdparty/testscenarios-0.2/GOALS
+++ b/test/3rdparty/testscenarios-0.4/GOALS
diff --git a/test/3rdparty/testscenarios-0.2/HACKING b/test/3rdparty/testscenarios-0.4/HACKING
index 0c68ee7da90..0c68ee7da90 100644
--- a/test/3rdparty/testscenarios-0.2/HACKING
+++ b/test/3rdparty/testscenarios-0.4/HACKING
diff --git a/test/3rdparty/testscenarios-0.2/MANIFEST.in b/test/3rdparty/testscenarios-0.4/MANIFEST.in
index 0edefa19588..0edefa19588 100644
--- a/test/3rdparty/testscenarios-0.2/MANIFEST.in
+++ b/test/3rdparty/testscenarios-0.4/MANIFEST.in
diff --git a/test/3rdparty/testscenarios-0.2/Makefile b/test/3rdparty/testscenarios-0.4/Makefile
index c38edf6bfe7..c38edf6bfe7 100644
--- a/test/3rdparty/testscenarios-0.2/Makefile
+++ b/test/3rdparty/testscenarios-0.4/Makefile
diff --git a/test/3rdparty/testscenarios-0.2/NEWS b/test/3rdparty/testscenarios-0.4/NEWS
index 311d57664b2..fc3a10c469a 100644
--- a/test/3rdparty/testscenarios-0.2/NEWS
+++ b/test/3rdparty/testscenarios-0.4/NEWS
@@ -6,17 +6,44 @@ testscenarios release notes
IN DEVELOPMENT
~~~~~~~~~~~~~~
+0.4
+~~~
+
+IMPROVEMENTS
+------------
+
+* Python 3.2 support added. (Robert Collins)
+
+0.3
+~~~
+
+CHANGES
+-------
+
+* New function ``per_module_scenarios`` for tests that should be applied across
+ multiple modules providing the same interface, some of which may not be
+ available at run time. (Martin Pool)
+
+* ``TestWithScenarios`` is now backed by a mixin - WithScenarios - which can be
+ mixed into different unittest implementations more cleanly (e.g. unittest2).
+ (James Polley, Robert Collins)
+
0.2
~~~
-CHANGES:
+CHANGES
+-------
* Adjust the cloned tests ``shortDescription`` if one is present. (Ben Finney)
+* Provide a load_tests implementation for easy use, and multiply_scenarios to
+ create the cross product of scenarios. (Martin Pool)
+
0.1
~~~
-CHANGES:
+CHANGES
+-------
* Created project. The primary interfaces are
``testscenarios.TestWithScenarios`` and
@@ -27,11 +54,3 @@ CHANGES:
Also various presentation and language touchups. (Martin Pool)
(Adjusted to use doctest directly, and to not print the demo runners
output to stderror during make check - Robert Collins)
-
-IMPROVEMENTS:
-
-BUG FIXES:
-
-API CHANGES:
-
-INTERNALS:
diff --git a/test/3rdparty/testscenarios-0.2/PKG-INFO b/test/3rdparty/testscenarios-0.4/PKG-INFO
index 4408c965685..f3ab96a5653 100644
--- a/test/3rdparty/testscenarios-0.2/PKG-INFO
+++ b/test/3rdparty/testscenarios-0.4/PKG-INFO
@@ -1,6 +1,6 @@
-Metadata-Version: 1.0
+Metadata-Version: 1.1
Name: testscenarios
-Version: 0.2
+Version: 0.4
Summary: Testscenarios, a pyunit extension for dependency injection
Home-page: https://launchpad.net/testscenarios
Author: Robert Collins
@@ -10,18 +10,18 @@ Description: *****************************************************************
testscenarios: extensions to python unittest to support scenarios
*****************************************************************
- Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
-
- Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- license at the users choice. A copy of both licenses are available in the
- project source as Apache-2.0 and BSD. You may not use this file except in
- compliance with one of these two licences.
-
- Unless required by applicable law or agreed to in writing, software
- distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- license you chose for the specific language governing permissions and
- limitations under that license.
+ Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
testscenarios provides clean dependency injection for python unittest style
@@ -77,20 +77,20 @@ Description: *****************************************************************
* Clone the test to a new test with a new id uniquely distinguishing it.
* Apply the scenario to the test by setting each key, value in the scenario
- as attributes on the test object.
+ as attributes on the test object.
There are some complicating factors around making this happen seamlessly. These
factors are in two areas:
* Choosing what scenarios to use. (See Setting Scenarios For A Test).
- * Getting the multiplication to happen.
+ * Getting the multiplication to happen.
Subclasssing
++++++++++++
If you can subclass TestWithScenarios, then the ``run()`` method in
TestWithScenarios will take care of test multiplication. It will at test
- execution act as a generator causing multiple tests to execute. For this to
+ execution act as a generator causing multiple tests to execute. For this to
work reliably TestWithScenarios must be first in the MRO and you cannot
override run() or __call__. This is the most robust method, in the sense
that any test runner or test loader that obeys the python unittest protocol
@@ -101,25 +101,28 @@ Description: *****************************************************************
If you cannot subclass TestWithScenarios (e.g. because you are using
TwistedTestCase, or TestCaseWithResources, or any one of a number of other
- useful test base classes, or need to override run() or __call__ yourself) then
+ useful test base classes, or need to override run() or __call__ yourself) then
you can cause scenario application to happen later by calling
``testscenarios.generate_scenarios()``. For instance::
- >>> import unittest
- >>> import StringIO
- >>> from testscenarios.scenarios import generate_scenarios
+ >>> import unittest
+ >>> try:
+ ... from StringIO import StringIO
+ ... except ImportError:
+ ... from io import StringIO
+ >>> from testscenarios.scenarios import generate_scenarios
This can work with loaders and runners from the standard library, or possibly other
implementations::
- >>> loader = unittest.TestLoader()
- >>> test_suite = unittest.TestSuite()
- >>> runner = unittest.TextTestRunner(stream=StringIO.StringIO())
+ >>> loader = unittest.TestLoader()
+ >>> test_suite = unittest.TestSuite()
+ >>> runner = unittest.TextTestRunner(stream=StringIO())
- >>> mytests = loader.loadTestsFromNames(['doc.test_sample'])
- >>> test_suite.addTests(generate_scenarios(mytests))
- >>> runner.run(test_suite)
- <unittest._TextTestResult run=1 errors=0 failures=0>
+ >>> mytests = loader.loadTestsFromNames(['doc.test_sample'])
+ >>> test_suite.addTests(generate_scenarios(mytests))
+ >>> runner.run(test_suite)
+ <unittest...TextTestResult run=1 errors=0 failures=0>
Testloaders
+++++++++++
@@ -131,19 +134,28 @@ Description: *****************************************************************
course, if you are using the subclassing approach this is already a surety).
With ``load_tests``::
- >>> def load_tests(standard_tests, module, loader):
- ... result = loader.suiteClass()
- ... result.addTests(generate_scenarios(standard_tests))
- ... return result
+ >>> def load_tests(standard_tests, module, loader):
+ ... result = loader.suiteClass()
+ ... result.addTests(generate_scenarios(standard_tests))
+ ... return result
+
+ as a convenience, this is available in ``load_tests_apply_scenarios``, so a
+ module using scenario tests need only say ::
+
+ >>> from testscenarios import load_tests_apply_scenarios as load_tests
+
+ Python 2.7 and greater support a different calling convention for `load_tests``
+ <https://bugs.launchpad.net/bzr/+bug/607412>. `load_tests_apply_scenarios`
+ copes with both.
With ``test_suite``::
- >>> def test_suite():
- ... loader = TestLoader()
- ... tests = loader.loadTestsFromName(__name__)
- ... result = loader.suiteClass()
- ... result.addTests(generate_scenarios(tests))
- ... return result
+ >>> def test_suite():
+ ... loader = TestLoader()
+ ... tests = loader.loadTestsFromName(__name__)
+ ... result = loader.suiteClass()
+ ... result.addTests(generate_scenarios(tests))
+ ... return result
Setting Scenarios for a test
@@ -158,11 +170,11 @@ Description: *****************************************************************
You can set a scenarios attribute on the test case::
- >>> class MyTest(unittest.TestCase):
- ...
- ... scenarios = [
- ... ('scenario1', dict(param=1)),
- ... ('scenario2', dict(param=2)),]
+ >>> class MyTest(unittest.TestCase):
+ ...
+ ... scenarios = [
+ ... ('scenario1', dict(param=1)),
+ ... ('scenario2', dict(param=2)),]
This provides the main interface by which scenarios are found for a given test.
Subclasses will inherit the scenarios (unless they override the attribute).
@@ -175,43 +187,43 @@ Description: *****************************************************************
single scenarios attribute) the scenarios attribute. For instance in this
example some third party tests are extended to run with a custom scenario. ::
- >>> import testtools
- >>> class TestTransport:
- ... """Hypothetical test case for bzrlib transport tests"""
- ... pass
- ...
- >>> stock_library_tests = unittest.TestLoader().loadTestsFromNames(
- ... ['doc.test_sample'])
- ...
- >>> for test in testtools.iterate_tests(stock_library_tests):
- ... if isinstance(test, TestTransport):
- ... test.scenarios = test.scenarios + [my_vfs_scenario]
- ...
- >>> suite = unittest.TestSuite()
- >>> suite.addTests(generate_scenarios(stock_library_tests))
+ >>> import testtools
+ >>> class TestTransport:
+ ... """Hypothetical test case for bzrlib transport tests"""
+ ... pass
+ ...
+ >>> stock_library_tests = unittest.TestLoader().loadTestsFromNames(
+ ... ['doc.test_sample'])
+ ...
+ >>> for test in testtools.iterate_tests(stock_library_tests):
+ ... if isinstance(test, TestTransport):
+ ... test.scenarios = test.scenarios + [my_vfs_scenario]
+ ...
+ >>> suite = unittest.TestSuite()
+ >>> suite.addTests(generate_scenarios(stock_library_tests))
Generated tests don't have a ``scenarios`` list, because they don't normally
require any more expansion. However, you can add a ``scenarios`` list back on
to them, and then run them through ``generate_scenarios`` again to generate the
cross product of tests. ::
- >>> class CrossProductDemo(unittest.TestCase):
- ... scenarios = [('scenario_0_0', {}),
- ... ('scenario_0_1', {})]
- ... def test_foo(self):
- ... return
- ...
- >>> suite = unittest.TestSuite()
- >>> suite.addTests(generate_scenarios(CrossProductDemo("test_foo")))
- >>> for test in testtools.iterate_tests(suite):
- ... test.scenarios = [
- ... ('scenario_1_0', {}),
- ... ('scenario_1_1', {})]
- ...
- >>> suite2 = unittest.TestSuite()
- >>> suite2.addTests(generate_scenarios(suite))
- >>> print suite2.countTestCases()
- 4
+ >>> class CrossProductDemo(unittest.TestCase):
+ ... scenarios = [('scenario_0_0', {}),
+ ... ('scenario_0_1', {})]
+ ... def test_foo(self):
+ ... return
+ ...
+ >>> suite = unittest.TestSuite()
+ >>> suite.addTests(generate_scenarios(CrossProductDemo("test_foo")))
+ >>> for test in testtools.iterate_tests(suite):
+ ... test.scenarios = [
+ ... ('scenario_1_0', {}),
+ ... ('scenario_1_1', {})]
+ ...
+ >>> suite2 = unittest.TestSuite()
+ >>> suite2.addTests(generate_scenarios(suite))
+ >>> print(suite2.countTestCases())
+ 4
Dynamic Scenarios
+++++++++++++++++
@@ -222,27 +234,27 @@ Description: *****************************************************************
be customised, or dynamically populate your scenarios from a registry etc.
For instance::
- >>> hash_scenarios = []
- >>> try:
- ... from hashlib import md5
- ... except ImportError:
- ... pass
- ... else:
- ... hash_scenarios.append(("md5", dict(hash=md5)))
- >>> try:
- ... from hashlib import sha1
- ... except ImportError:
- ... pass
- ... else:
- ... hash_scenarios.append(("sha1", dict(hash=sha1)))
- ...
- >>> class TestHashContract(unittest.TestCase):
- ...
- ... scenarios = hash_scenarios
- ...
- >>> class TestHashPerformance(unittest.TestCase):
- ...
- ... scenarios = hash_scenarios
+ >>> hash_scenarios = []
+ >>> try:
+ ... from hashlib import md5
+ ... except ImportError:
+ ... pass
+ ... else:
+ ... hash_scenarios.append(("md5", dict(hash=md5)))
+ >>> try:
+ ... from hashlib import sha1
+ ... except ImportError:
+ ... pass
+ ... else:
+ ... hash_scenarios.append(("sha1", dict(hash=sha1)))
+ ...
+ >>> class TestHashContract(unittest.TestCase):
+ ...
+ ... scenarios = hash_scenarios
+ ...
+ >>> class TestHashPerformance(unittest.TestCase):
+ ...
+ ... scenarios = hash_scenarios
Forcing Scenarios
@@ -257,12 +269,60 @@ Description: *****************************************************************
selection.
+ Generating Scenarios
+ ====================
+
+ Some functions (currently one :-) are available to ease generation of scenario
+ lists for common situations.
+
+ Testing Per Implementation Module
+ +++++++++++++++++++++++++++++++++
+
+ It is reasonably common to have multiple Python modules that provide the same
+ capabilities and interface, and to want apply the same tests to all of them.
+
+ In some cases, not all of the statically defined implementations will be able
+ to be used in a particular testing environment. For example, there may be both
+ a C and a pure-Python implementation of a module. You want to test the C
+ module if it can be loaded, but also to have the tests pass if the C module has
+ not been compiled.
+
+ The ``per_module_scenarios`` function generates a scenario for each named
+ module. The module object of the imported module is set in the supplied
+ attribute name of the resulting scenario.
+ Modules which raise ``ImportError`` during import will have the
+ ``sys.exc_info()`` of the exception set instead of the module object. Tests
+ can check for the attribute being a tuple to decide what to do (e.g. to skip).
+
+ Note that for the test to be valid, all access to the module under test must go
+ through the relevant attribute of the test object. If one of the
+ implementations is also directly imported by the test module or any other,
+ testscenarios will not magically stop it being used.
+
+
Advice on Writing Scenarios
===========================
If a parameterised test is because of a bug run without being parameterized,
it should fail rather than running with defaults, because this can hide bugs.
+
+ Producing Scenarios
+ ===================
+
+ The `multiply_scenarios` function produces the cross-product of the scenarios
+ passed in::
+
+ >>> from testscenarios.scenarios import multiply_scenarios
+ >>>
+ >>> scenarios = multiply_scenarios(
+ ... [('scenario1', dict(param1=1)), ('scenario2', dict(param1=2))],
+ ... [('scenario2', dict(param2=1))],
+ ... )
+ >>> scenarios == [('scenario1,scenario2', {'param2': 1, 'param1': 1}),
+ ... ('scenario2,scenario2', {'param2': 1, 'param1': 2})]
+ True
+
Platform: UNKNOWN
Classifier: Development Status :: 6 - Mature
Classifier: Intended Audience :: Developers
@@ -270,5 +330,6 @@ Classifier: License :: OSI Approved :: BSD License
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
Classifier: Topic :: Software Development :: Quality Assurance
Classifier: Topic :: Software Development :: Testing
diff --git a/test/3rdparty/testscenarios-0.2/README b/test/3rdparty/testscenarios-0.4/README
index b827cb67a82..e7e7eb717e0 100644
--- a/test/3rdparty/testscenarios-0.2/README
+++ b/test/3rdparty/testscenarios-0.4/README
@@ -98,7 +98,10 @@ you can cause scenario application to happen later by calling
``testscenarios.generate_scenarios()``. For instance::
>>> import unittest
- >>> import StringIO
+ >>> try:
+ ... from StringIO import StringIO
+ ... except ImportError:
+ ... from io import StringIO
>>> from testscenarios.scenarios import generate_scenarios
This can work with loaders and runners from the standard library, or possibly other
@@ -106,12 +109,12 @@ implementations::
>>> loader = unittest.TestLoader()
>>> test_suite = unittest.TestSuite()
- >>> runner = unittest.TextTestRunner(stream=StringIO.StringIO())
+ >>> runner = unittest.TextTestRunner(stream=StringIO())
>>> mytests = loader.loadTestsFromNames(['doc.test_sample'])
>>> test_suite.addTests(generate_scenarios(mytests))
>>> runner.run(test_suite)
- <unittest._TextTestResult run=1 errors=0 failures=0>
+ <unittest...TextTestResult run=1 errors=0 failures=0>
Testloaders
+++++++++++
@@ -128,6 +131,15 @@ With ``load_tests``::
... result.addTests(generate_scenarios(standard_tests))
... return result
+as a convenience, this is available in ``load_tests_apply_scenarios``, so a
+module using scenario tests need only say ::
+
+ >>> from testscenarios import load_tests_apply_scenarios as load_tests
+
+Python 2.7 and greater support a different calling convention for `load_tests``
+<https://bugs.launchpad.net/bzr/+bug/607412>. `load_tests_apply_scenarios`
+copes with both.
+
With ``test_suite``::
>>> def test_suite():
@@ -202,7 +214,7 @@ cross product of tests. ::
...
>>> suite2 = unittest.TestSuite()
>>> suite2.addTests(generate_scenarios(suite))
- >>> print suite2.countTestCases()
+ >>> print(suite2.countTestCases())
4
Dynamic Scenarios
@@ -249,8 +261,56 @@ allowing it to be used to layer scenarios without affecting existing scenario
selection.
+Generating Scenarios
+====================
+
+Some functions (currently one :-) are available to ease generation of scenario
+lists for common situations.
+
+Testing Per Implementation Module
++++++++++++++++++++++++++++++++++
+
+It is reasonably common to have multiple Python modules that provide the same
+capabilities and interface, and to want apply the same tests to all of them.
+
+In some cases, not all of the statically defined implementations will be able
+to be used in a particular testing environment. For example, there may be both
+a C and a pure-Python implementation of a module. You want to test the C
+module if it can be loaded, but also to have the tests pass if the C module has
+not been compiled.
+
+The ``per_module_scenarios`` function generates a scenario for each named
+module. The module object of the imported module is set in the supplied
+attribute name of the resulting scenario.
+Modules which raise ``ImportError`` during import will have the
+``sys.exc_info()`` of the exception set instead of the module object. Tests
+can check for the attribute being a tuple to decide what to do (e.g. to skip).
+
+Note that for the test to be valid, all access to the module under test must go
+through the relevant attribute of the test object. If one of the
+implementations is also directly imported by the test module or any other,
+testscenarios will not magically stop it being used.
+
+
Advice on Writing Scenarios
===========================
If a parameterised test is because of a bug run without being parameterized,
it should fail rather than running with defaults, because this can hide bugs.
+
+
+Producing Scenarios
+===================
+
+The `multiply_scenarios` function produces the cross-product of the scenarios
+passed in::
+
+ >>> from testscenarios.scenarios import multiply_scenarios
+ >>>
+ >>> scenarios = multiply_scenarios(
+ ... [('scenario1', dict(param1=1)), ('scenario2', dict(param1=2))],
+ ... [('scenario2', dict(param2=1))],
+ ... )
+ >>> scenarios == [('scenario1,scenario2', {'param2': 1, 'param1': 1}),
+ ... ('scenario2,scenario2', {'param2': 1, 'param1': 2})]
+ True
diff --git a/test/3rdparty/testscenarios-0.2/doc/__init__.py b/test/3rdparty/testscenarios-0.4/doc/__init__.py
index 4dbad55dcbb..4dbad55dcbb 100644
--- a/test/3rdparty/testscenarios-0.2/doc/__init__.py
+++ b/test/3rdparty/testscenarios-0.4/doc/__init__.py
diff --git a/test/3rdparty/testscenarios-0.2/doc/example.py b/test/3rdparty/testscenarios-0.4/doc/example.py
index a8d195fade2..a8d195fade2 100644
--- a/test/3rdparty/testscenarios-0.2/doc/example.py
+++ b/test/3rdparty/testscenarios-0.4/doc/example.py
diff --git a/test/3rdparty/testscenarios-0.2/doc/test_sample.py b/test/3rdparty/testscenarios-0.4/doc/test_sample.py
index a0b00a5ef54..a0b00a5ef54 100644
--- a/test/3rdparty/testscenarios-0.2/doc/test_sample.py
+++ b/test/3rdparty/testscenarios-0.4/doc/test_sample.py
diff --git a/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/PKG-INFO b/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/PKG-INFO
new file mode 100644
index 00000000000..f3ab96a5653
--- /dev/null
+++ b/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/PKG-INFO
@@ -0,0 +1,335 @@
+Metadata-Version: 1.1
+Name: testscenarios
+Version: 0.4
+Summary: Testscenarios, a pyunit extension for dependency injection
+Home-page: https://launchpad.net/testscenarios
+Author: Robert Collins
+Author-email: robertc@robertcollins.net
+License: UNKNOWN
+Description: *****************************************************************
+ testscenarios: extensions to python unittest to support scenarios
+ *****************************************************************
+
+ Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+
+ testscenarios provides clean dependency injection for python unittest style
+ tests. This can be used for interface testing (testing many implementations via
+ a single test suite) or for classic dependency injection (provide tests with
+ dependencies externally to the test code itself, allowing easy testing in
+ different situations).
+
+ Dependencies
+ ============
+
+ * Python 2.4+
+ * testtools <https://launchpad.net/testtools>
+
+
+ Why TestScenarios
+ =================
+
+ Standard Python unittest.py provides on obvious method for running a single
+ test_foo method with two (or more) scenarios: by creating a mix-in that
+ provides the functions, objects or settings that make up the scenario. This is
+ however limited and unsatisfying. Firstly, when two projects are cooperating
+ on a test suite (for instance, a plugin to a larger project may want to run
+ the standard tests for a given interface on its implementation), then it is
+ easy for them to get out of sync with each other: when the list of TestCase
+ classes to mix-in with changes, the plugin will either fail to run some tests
+ or error trying to run deleted tests. Secondly, its not as easy to work with
+ runtime-created-subclasses (a way of dealing with the aforementioned skew)
+ because they require more indirection to locate the source of the test, and will
+ often be ignored by e.g. pyflakes pylint etc.
+
+ It is the intent of testscenarios to make dynamically running a single test
+ in multiple scenarios clear, easy to debug and work with even when the list
+ of scenarios is dynamically generated.
+
+
+ Defining Scenarios
+ ==================
+
+ A **scenario** is a tuple of a string name for the scenario, and a dict of
+ parameters describing the scenario. The name is appended to the test name, and
+ the parameters are made available to the test instance when it's run.
+
+ Scenarios are presented in **scenario lists** which are typically Python lists
+ but may be any iterable.
+
+
+ Getting Scenarios applied
+ =========================
+
+ At its heart the concept is simple. For a given test object with a list of
+ scenarios we prepare a new test object for each scenario. This involves:
+
+ * Clone the test to a new test with a new id uniquely distinguishing it.
+ * Apply the scenario to the test by setting each key, value in the scenario
+ as attributes on the test object.
+
+ There are some complicating factors around making this happen seamlessly. These
+ factors are in two areas:
+
+ * Choosing what scenarios to use. (See Setting Scenarios For A Test).
+ * Getting the multiplication to happen.
+
+ Subclasssing
+ ++++++++++++
+
+ If you can subclass TestWithScenarios, then the ``run()`` method in
+ TestWithScenarios will take care of test multiplication. It will at test
+ execution act as a generator causing multiple tests to execute. For this to
+ work reliably TestWithScenarios must be first in the MRO and you cannot
+ override run() or __call__. This is the most robust method, in the sense
+ that any test runner or test loader that obeys the python unittest protocol
+ will run all your scenarios.
+
+ Manual generation
+ +++++++++++++++++
+
+ If you cannot subclass TestWithScenarios (e.g. because you are using
+ TwistedTestCase, or TestCaseWithResources, or any one of a number of other
+ useful test base classes, or need to override run() or __call__ yourself) then
+ you can cause scenario application to happen later by calling
+ ``testscenarios.generate_scenarios()``. For instance::
+
+ >>> import unittest
+ >>> try:
+ ... from StringIO import StringIO
+ ... except ImportError:
+ ... from io import StringIO
+ >>> from testscenarios.scenarios import generate_scenarios
+
+ This can work with loaders and runners from the standard library, or possibly other
+ implementations::
+
+ >>> loader = unittest.TestLoader()
+ >>> test_suite = unittest.TestSuite()
+ >>> runner = unittest.TextTestRunner(stream=StringIO())
+
+ >>> mytests = loader.loadTestsFromNames(['doc.test_sample'])
+ >>> test_suite.addTests(generate_scenarios(mytests))
+ >>> runner.run(test_suite)
+ <unittest...TextTestResult run=1 errors=0 failures=0>
+
+ Testloaders
+ +++++++++++
+
+ Some test loaders support hooks like ``load_tests`` and ``test_suite``.
+ Ensuring your tests have had scenario application done through these hooks can
+ be a good idea - it means that external test runners (which support these hooks
+ like ``nose``, ``trial``, ``tribunal``) will still run your scenarios. (Of
+ course, if you are using the subclassing approach this is already a surety).
+ With ``load_tests``::
+
+ >>> def load_tests(standard_tests, module, loader):
+ ... result = loader.suiteClass()
+ ... result.addTests(generate_scenarios(standard_tests))
+ ... return result
+
+ as a convenience, this is available in ``load_tests_apply_scenarios``, so a
+ module using scenario tests need only say ::
+
+ >>> from testscenarios import load_tests_apply_scenarios as load_tests
+
+ Python 2.7 and greater support a different calling convention for `load_tests``
+ <https://bugs.launchpad.net/bzr/+bug/607412>. `load_tests_apply_scenarios`
+ copes with both.
+
+ With ``test_suite``::
+
+ >>> def test_suite():
+ ... loader = TestLoader()
+ ... tests = loader.loadTestsFromName(__name__)
+ ... result = loader.suiteClass()
+ ... result.addTests(generate_scenarios(tests))
+ ... return result
+
+
+ Setting Scenarios for a test
+ ============================
+
+ A sample test using scenarios can be found in the doc/ folder.
+
+ See `pydoc testscenarios` for details.
+
+ On the TestCase
+ +++++++++++++++
+
+ You can set a scenarios attribute on the test case::
+
+ >>> class MyTest(unittest.TestCase):
+ ...
+ ... scenarios = [
+ ... ('scenario1', dict(param=1)),
+ ... ('scenario2', dict(param=2)),]
+
+ This provides the main interface by which scenarios are found for a given test.
+ Subclasses will inherit the scenarios (unless they override the attribute).
+
+ After loading
+ +++++++++++++
+
+ Test scenarios can also be generated arbitrarily later, as long as the test has
+ not yet run. Simply replace (or alter, but be aware that many tests may share a
+ single scenarios attribute) the scenarios attribute. For instance in this
+ example some third party tests are extended to run with a custom scenario. ::
+
+ >>> import testtools
+ >>> class TestTransport:
+ ... """Hypothetical test case for bzrlib transport tests"""
+ ... pass
+ ...
+ >>> stock_library_tests = unittest.TestLoader().loadTestsFromNames(
+ ... ['doc.test_sample'])
+ ...
+ >>> for test in testtools.iterate_tests(stock_library_tests):
+ ... if isinstance(test, TestTransport):
+ ... test.scenarios = test.scenarios + [my_vfs_scenario]
+ ...
+ >>> suite = unittest.TestSuite()
+ >>> suite.addTests(generate_scenarios(stock_library_tests))
+
+ Generated tests don't have a ``scenarios`` list, because they don't normally
+ require any more expansion. However, you can add a ``scenarios`` list back on
+ to them, and then run them through ``generate_scenarios`` again to generate the
+ cross product of tests. ::
+
+ >>> class CrossProductDemo(unittest.TestCase):
+ ... scenarios = [('scenario_0_0', {}),
+ ... ('scenario_0_1', {})]
+ ... def test_foo(self):
+ ... return
+ ...
+ >>> suite = unittest.TestSuite()
+ >>> suite.addTests(generate_scenarios(CrossProductDemo("test_foo")))
+ >>> for test in testtools.iterate_tests(suite):
+ ... test.scenarios = [
+ ... ('scenario_1_0', {}),
+ ... ('scenario_1_1', {})]
+ ...
+ >>> suite2 = unittest.TestSuite()
+ >>> suite2.addTests(generate_scenarios(suite))
+ >>> print(suite2.countTestCases())
+ 4
+
+ Dynamic Scenarios
+ +++++++++++++++++
+
+ A common use case is to have the list of scenarios be dynamic based on plugins
+ and available libraries. An easy way to do this is to provide a global scope
+ scenarios somewhere relevant to the tests that will use it, and then that can
+ be customised, or dynamically populate your scenarios from a registry etc.
+ For instance::
+
+ >>> hash_scenarios = []
+ >>> try:
+ ... from hashlib import md5
+ ... except ImportError:
+ ... pass
+ ... else:
+ ... hash_scenarios.append(("md5", dict(hash=md5)))
+ >>> try:
+ ... from hashlib import sha1
+ ... except ImportError:
+ ... pass
+ ... else:
+ ... hash_scenarios.append(("sha1", dict(hash=sha1)))
+ ...
+ >>> class TestHashContract(unittest.TestCase):
+ ...
+ ... scenarios = hash_scenarios
+ ...
+ >>> class TestHashPerformance(unittest.TestCase):
+ ...
+ ... scenarios = hash_scenarios
+
+
+ Forcing Scenarios
+ +++++++++++++++++
+
+ The ``apply_scenarios`` function can be useful to apply scenarios to a test
+ that has none applied. ``apply_scenarios`` is the workhorse for
+ ``generate_scenarios``, except it takes the scenarios passed in rather than
+ introspecting the test object to determine the scenarios. The
+ ``apply_scenarios`` function does not reset the test scenarios attribute,
+ allowing it to be used to layer scenarios without affecting existing scenario
+ selection.
+
+
+ Generating Scenarios
+ ====================
+
+ Some functions (currently one :-) are available to ease generation of scenario
+ lists for common situations.
+
+ Testing Per Implementation Module
+ +++++++++++++++++++++++++++++++++
+
+ It is reasonably common to have multiple Python modules that provide the same
+ capabilities and interface, and to want apply the same tests to all of them.
+
+ In some cases, not all of the statically defined implementations will be able
+ to be used in a particular testing environment. For example, there may be both
+ a C and a pure-Python implementation of a module. You want to test the C
+ module if it can be loaded, but also to have the tests pass if the C module has
+ not been compiled.
+
+ The ``per_module_scenarios`` function generates a scenario for each named
+ module. The module object of the imported module is set in the supplied
+ attribute name of the resulting scenario.
+ Modules which raise ``ImportError`` during import will have the
+ ``sys.exc_info()`` of the exception set instead of the module object. Tests
+ can check for the attribute being a tuple to decide what to do (e.g. to skip).
+
+ Note that for the test to be valid, all access to the module under test must go
+ through the relevant attribute of the test object. If one of the
+ implementations is also directly imported by the test module or any other,
+ testscenarios will not magically stop it being used.
+
+
+ Advice on Writing Scenarios
+ ===========================
+
+ If a parameterised test is because of a bug run without being parameterized,
+ it should fail rather than running with defaults, because this can hide bugs.
+
+
+ Producing Scenarios
+ ===================
+
+ The `multiply_scenarios` function produces the cross-product of the scenarios
+ passed in::
+
+ >>> from testscenarios.scenarios import multiply_scenarios
+ >>>
+ >>> scenarios = multiply_scenarios(
+ ... [('scenario1', dict(param1=1)), ('scenario2', dict(param1=2))],
+ ... [('scenario2', dict(param2=1))],
+ ... )
+ >>> scenarios == [('scenario1,scenario2', {'param2': 1, 'param1': 1}),
+ ... ('scenario2,scenario2', {'param2': 1, 'param1': 2})]
+ True
+
+Platform: UNKNOWN
+Classifier: Development Status :: 6 - Mature
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Quality Assurance
+Classifier: Topic :: Software Development :: Testing
diff --git a/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/SOURCES.txt b/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/SOURCES.txt
new file mode 100644
index 00000000000..32492e1b819
--- /dev/null
+++ b/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/SOURCES.txt
@@ -0,0 +1,25 @@
+.bzrignore
+Apache-2.0
+BSD
+COPYING
+GOALS
+HACKING
+MANIFEST.in
+Makefile
+NEWS
+README
+setup.py
+doc/__init__.py
+doc/example.py
+doc/test_sample.py
+lib/testscenarios/__init__.py
+lib/testscenarios/scenarios.py
+lib/testscenarios/testcase.py
+lib/testscenarios.egg-info/PKG-INFO
+lib/testscenarios.egg-info/SOURCES.txt
+lib/testscenarios.egg-info/dependency_links.txt
+lib/testscenarios.egg-info/requires.txt
+lib/testscenarios.egg-info/top_level.txt
+lib/testscenarios/tests/__init__.py
+lib/testscenarios/tests/test_scenarios.py
+lib/testscenarios/tests/test_testcase.py \ No newline at end of file
diff --git a/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/dependency_links.txt b/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/dependency_links.txt
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/requires.txt b/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/requires.txt
new file mode 100644
index 00000000000..ccdb4f2ad56
--- /dev/null
+++ b/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/requires.txt
@@ -0,0 +1 @@
+testtools \ No newline at end of file
diff --git a/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/top_level.txt b/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/top_level.txt
new file mode 100644
index 00000000000..b0ec88e9d5a
--- /dev/null
+++ b/test/3rdparty/testscenarios-0.4/lib/testscenarios.egg-info/top_level.txt
@@ -0,0 +1 @@
+testscenarios
diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/__init__.py b/test/3rdparty/testscenarios-0.4/lib/testscenarios/__init__.py
index d608f13e842..ceacf37ddca 100644
--- a/test/3rdparty/testscenarios-0.2/lib/testscenarios/__init__.py
+++ b/test/3rdparty/testscenarios-0.4/lib/testscenarios/__init__.py
@@ -38,20 +38,30 @@ methods for details.
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
-__version__ = (0, 2, 0, 'final', 0)
+__version__ = (0, 4, 0, 'final', 0)
__all__ = [
'TestWithScenarios',
+ 'WithScenarios',
'apply_scenario',
'apply_scenarios',
'generate_scenarios',
+ 'load_tests_apply_scenarios',
+ 'multiply_scenarios',
+ 'per_module_scenarios',
]
import unittest
-from testscenarios.scenarios import apply_scenario, generate_scenarios
-from testscenarios.testcase import TestWithScenarios
+from testscenarios.scenarios import (
+ apply_scenario,
+ generate_scenarios,
+ load_tests_apply_scenarios,
+ multiply_scenarios,
+ per_module_scenarios,
+ )
+from testscenarios.testcase import TestWithScenarios, WithScenarios
def test_suite():
diff --git a/test/3rdparty/testscenarios-0.4/lib/testscenarios/scenarios.py b/test/3rdparty/testscenarios-0.4/lib/testscenarios/scenarios.py
new file mode 100644
index 00000000000..eeb72ebb8a4
--- /dev/null
+++ b/test/3rdparty/testscenarios-0.4/lib/testscenarios/scenarios.py
@@ -0,0 +1,167 @@
+# testscenarios: extensions to python unittest to allow declarative
+# dependency injection ('scenarios') by tests.
+#
+# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+# Copyright (c) 2010, 2011 Martin Pool <mbp@sourcefrog.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+__all__ = [
+ 'apply_scenario',
+ 'apply_scenarios',
+ 'generate_scenarios',
+ 'load_tests_apply_scenarios',
+ 'multiply_scenarios',
+ ]
+
+from itertools import (
+ chain,
+ product,
+ )
+import sys
+import unittest
+
+from testtools.testcase import clone_test_with_new_id
+from testtools import iterate_tests
+
+
+def apply_scenario(scenario, test):
+ """Apply scenario to test.
+
+ :param scenario: A tuple (name, parameters) to apply to the test. The test
+ is cloned, its id adjusted to have (name) after it, and the parameters
+ dict is used to update the new test.
+ :param test: The test to apply the scenario to. This test is unaltered.
+ :return: A new test cloned from test, with the scenario applied.
+ """
+ name, parameters = scenario
+ scenario_suffix = '(' + name + ')'
+ newtest = clone_test_with_new_id(test,
+ test.id() + scenario_suffix)
+ test_desc = test.shortDescription()
+ if test_desc is not None:
+ newtest_desc = "%(test_desc)s %(scenario_suffix)s" % vars()
+ newtest.shortDescription = (lambda: newtest_desc)
+ for key, value in parameters.items():
+ setattr(newtest, key, value)
+ return newtest
+
+
+def apply_scenarios(scenarios, test):
+ """Apply many scenarios to a test.
+
+ :param scenarios: An iterable of scenarios.
+ :param test: A test to apply the scenarios to.
+ :return: A generator of tests.
+ """
+ for scenario in scenarios:
+ yield apply_scenario(scenario, test)
+
+
+def generate_scenarios(test_or_suite):
+ """Yield the tests in test_or_suite with scenario multiplication done.
+
+ TestCase objects with no scenarios specified are yielded unaltered. Tests
+ with scenarios are not yielded at all, instead the results of multiplying
+ them by the scenarios they specified gets yielded.
+
+ :param test_or_suite: A TestCase or TestSuite.
+ :return: A generator of tests - objects satisfying the TestCase protocol.
+ """
+ for test in iterate_tests(test_or_suite):
+ scenarios = getattr(test, 'scenarios', None)
+ if scenarios:
+ for newtest in apply_scenarios(scenarios, test):
+ newtest.scenarios = None
+ yield newtest
+ else:
+ yield test
+
+
+def load_tests_apply_scenarios(*params):
+ """Adapter test runner load hooks to call generate_scenarios.
+
+ If this is referenced by the `load_tests` attribute of a module, then
+ testloaders that implement this protocol will automatically arrange for
+ the scenarios to be expanded. This can be used instead of using
+ TestWithScenarios.
+
+ Two different calling conventions for load_tests have been used, and this
+ function should support both. Python 2.7 passes (loader, standard_tests,
+ pattern), and bzr used (standard_tests, module, loader).
+
+ :param loader: A TestLoader.
+ :param standard_test: The test objects found in this module before
+ multiplication.
+ """
+ if getattr(params[0], 'suiteClass', None) is not None:
+ loader, standard_tests, pattern = params
+ else:
+ standard_tests, module, loader = params
+ result = loader.suiteClass()
+ result.addTests(generate_scenarios(standard_tests))
+ return result
+
+
+def multiply_scenarios(*scenarios):
+ """Multiply two or more iterables of scenarios.
+
+ It is safe to pass scenario generators or iterators.
+
+ :returns: A list of compound scenarios: the cross-product of all
+ scenarios, with the names concatenated and the parameters
+ merged together.
+ """
+ result = []
+ scenario_lists = map(list, scenarios)
+ for combination in product(*scenario_lists):
+ names, parameters = zip(*combination)
+ scenario_name = ','.join(names)
+ scenario_parameters = {}
+ for parameter in parameters:
+ scenario_parameters.update(parameter)
+ result.append((scenario_name, scenario_parameters))
+ return result
+
+
+def per_module_scenarios(attribute_name, modules):
+ """Generate scenarios for available implementation modules.
+
+ This is typically used when there is a subsystem implemented, for
+ example, in both Python and C, and we want to apply the same tests to
+ both, but the C module may sometimes not be available.
+
+ Note: if the module can't be loaded, the sys.exc_info() tuple for the
+ exception raised during import of the module is used instead of the module
+ object. A common idiom is to check in setUp for that and raise a skip or
+ error for that case. No special helpers are supplied in testscenarios as
+ yet.
+
+ :param attribute_name: A name to be set in the scenario parameter
+ dictionary (and thence onto the test instance) pointing to the
+ implementation module (or import exception) for this scenario.
+
+ :param modules: An iterable of (short_name, module_name), where
+ the short name is something like 'python' to put in the
+ scenario name, and the long name is a fully-qualified Python module
+ name.
+ """
+ scenarios = []
+ for short_name, module_name in modules:
+ try:
+ mod = __import__(module_name, {}, {}, [''])
+ except:
+ mod = sys.exc_info()
+ scenarios.append((
+ short_name,
+ {attribute_name: mod}))
+ return scenarios
diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/testcase.py b/test/3rdparty/testscenarios-0.4/lib/testscenarios/testcase.py
index 5ec3a94a1d3..2ab50c78848 100644
--- a/test/3rdparty/testscenarios-0.2/lib/testscenarios/testcase.py
+++ b/test/3rdparty/testscenarios-0.4/lib/testscenarios/testcase.py
@@ -16,6 +16,7 @@
__all__ = [
'TestWithScenarios',
+ 'WithScenarios',
]
import unittest
@@ -24,16 +25,18 @@ from testtools.testcase import clone_test_with_new_id
from testscenarios.scenarios import generate_scenarios
-class TestWithScenarios(unittest.TestCase):
- """A TestCase with support for scenarios via a scenarios attribute.
-
- When a test object which is an instance of TestWithScenarios is run,
- and there is a non-empty scenarios attribute on the object, the test is
- multiplied by the run method into one test per scenario. For this to work
- reliably the TestWithScenarios.run method must not be overriden in a
- subclass (or overridden compatibly with TestWithScenarios).
+_doc = """
+ When a test object which inherits from WithScenarios is run, and there is a
+ non-empty scenarios attribute on the object, the test is multiplied by the
+ run method into one test per scenario. For this to work reliably the
+ WithScenarios.run method must not be overriden in a subclass (or overridden
+ compatibly with WithScenarios).
"""
+class WithScenarios(object):
+ __doc__ = """A mixin for TestCase with support for declarative scenarios.
+ """ + _doc
+
def _get_scenarios(self):
return getattr(self, 'scenarios', None)
@@ -50,7 +53,7 @@ class TestWithScenarios(unittest.TestCase):
for test in generate_scenarios(self):
test.debug()
else:
- return super(TestWithScenarios, self).debug()
+ return super(WithScenarios, self).debug()
def run(self, result=None):
scenarios = self._get_scenarios()
@@ -59,4 +62,9 @@ class TestWithScenarios(unittest.TestCase):
test.run(result)
return
else:
- return super(TestWithScenarios, self).run(result)
+ return super(WithScenarios, self).run(result)
+
+
+class TestWithScenarios(WithScenarios, unittest.TestCase):
+ __doc__ = """Unittest TestCase with support for declarative scenarios.
+ """ + _doc
diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/__init__.py b/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/__init__.py
index e5e2bbeaa84..8e243b6e5ab 100644
--- a/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/__init__.py
+++ b/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/__init__.py
@@ -38,5 +38,6 @@ def load_tests(standard_tests, module, loader):
test_mod_names = [prefix + test_module for test_module in test_modules]
standard_tests.addTests(loader.loadTestsFromNames(test_mod_names))
doctest.set_unittest_reportflags(doctest.REPORT_ONLY_FIRST_FAILURE)
- standard_tests.addTest(doctest.DocFileSuite("../../../README"))
- return standard_tests
+ standard_tests.addTest(
+ doctest.DocFileSuite("../../../README", optionflags=doctest.ELLIPSIS))
+ return loader.suiteClass(testscenarios.generate_scenarios(standard_tests))
diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_scenarios.py b/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_scenarios.py
index 4c801503d2c..97aa17f86cf 100644
--- a/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_scenarios.py
+++ b/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_scenarios.py
@@ -2,6 +2,7 @@
# dependency injection ('scenarios') by tests.
#
# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
+# Copyright (c) 2010, 2011 Martin Pool <mbp@sourcefrog.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
@@ -21,6 +22,8 @@ from testscenarios.scenarios import (
apply_scenario,
apply_scenarios,
generate_scenarios,
+ load_tests_apply_scenarios,
+ multiply_scenarios,
)
import testtools
from testtools.tests.helpers import LoggingResult
@@ -171,3 +174,88 @@ class TestApplyScenarios(testtools.TestCase):
tests = list(apply_scenarios(ReferenceTest.scenarios, test))
self.assertEqual([('demo', {})], ReferenceTest.scenarios)
self.assertEqual(ReferenceTest.scenarios, tests[0].scenarios)
+
+
+class TestLoadTests(testtools.TestCase):
+
+ class SampleTest(unittest.TestCase):
+ def test_nothing(self):
+ pass
+ scenarios = [
+ ('a', {}),
+ ('b', {}),
+ ]
+
+ def test_load_tests_apply_scenarios(self):
+ suite = load_tests_apply_scenarios(
+ unittest.TestLoader(),
+ [self.SampleTest('test_nothing')],
+ None)
+ result_tests = list(testtools.iterate_tests(suite))
+ self.assertEquals(
+ 2,
+ len(result_tests),
+ result_tests)
+
+ def test_load_tests_apply_scenarios_old_style(self):
+ """Call load_tests in the way used by bzr."""
+ suite = load_tests_apply_scenarios(
+ [self.SampleTest('test_nothing')],
+ self.__class__.__module__,
+ unittest.TestLoader(),
+ )
+ result_tests = list(testtools.iterate_tests(suite))
+ self.assertEquals(
+ 2,
+ len(result_tests),
+ result_tests)
+
+
+class TestMultiplyScenarios(testtools.TestCase):
+
+ def test_multiply_scenarios(self):
+ def factory(name):
+ for i in 'ab':
+ yield i, {name: i}
+ scenarios = multiply_scenarios(factory('p'), factory('q'))
+ self.assertEqual([
+ ('a,a', dict(p='a', q='a')),
+ ('a,b', dict(p='a', q='b')),
+ ('b,a', dict(p='b', q='a')),
+ ('b,b', dict(p='b', q='b')),
+ ],
+ scenarios)
+
+ def test_multiply_many_scenarios(self):
+ def factory(name):
+ for i in 'abc':
+ yield i, {name: i}
+ scenarios = multiply_scenarios(factory('p'), factory('q'),
+ factory('r'), factory('t'))
+ self.assertEqual(
+ 3**4,
+ len(scenarios),
+ scenarios)
+ self.assertEqual(
+ 'a,a,a,a',
+ scenarios[0][0])
+
+
+class TestPerModuleScenarios(testtools.TestCase):
+
+ def test_per_module_scenarios(self):
+ """Generate scenarios for available modules"""
+ s = testscenarios.scenarios.per_module_scenarios(
+ 'the_module', [
+ ('Python', 'testscenarios'),
+ ('unittest', 'unittest'),
+ ('nonexistent', 'nonexistent'),
+ ])
+ self.assertEqual('nonexistent', s[-1][0])
+ self.assertIsInstance(s[-1][1]['the_module'], tuple)
+ s[-1][1]['the_module'] = None
+ self.assertEqual(s, [
+ ('Python', {'the_module': testscenarios}),
+ ('unittest', {'the_module': unittest}),
+ ('nonexistent', {'the_module': None}),
+ ])
diff --git a/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_testcase.py b/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_testcase.py
index 6a9bbf997e2..74d2fe1c504 100644
--- a/test/3rdparty/testscenarios-0.2/lib/testscenarios/tests/test_testcase.py
+++ b/test/3rdparty/testscenarios-0.4/lib/testscenarios/tests/test_testcase.py
@@ -17,13 +17,25 @@
import unittest
import testscenarios
+import testtools
from testtools.tests.helpers import LoggingResult
-class TestTestWithScenarios(unittest.TestCase):
+class TestTestWithScenarios(testtools.TestCase):
+
+ scenarios = testscenarios.scenarios.per_module_scenarios(
+ 'impl', (('unittest', 'unittest'), ('unittest2', 'unittest2')))
+
+ @property
+ def Implementation(self):
+ if isinstance(self.impl, tuple):
+ self.skipTest('import failed - module not installed?')
+ class Implementation(testscenarios.WithScenarios, self.impl.TestCase):
+ pass
+ return Implementation
def test_no_scenarios_no_error(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
def test_pass(self):
pass
test = ReferenceTest("test_pass")
@@ -33,7 +45,7 @@ class TestTestWithScenarios(unittest.TestCase):
self.assertEqual(1, result.testsRun)
def test_with_one_scenario_one_run(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [('demo', {})]
def test_pass(self):
pass
@@ -48,7 +60,7 @@ class TestTestWithScenarios(unittest.TestCase):
log[0][1].id())
def test_with_two_scenarios_two_run(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [('1', {}), ('2', {})]
def test_pass(self):
pass
@@ -66,7 +78,7 @@ class TestTestWithScenarios(unittest.TestCase):
log[4][1].id())
def test_attributes_set(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [
('1', {'foo': 1, 'bar': 2}),
('2', {'foo': 2, 'bar': 4})]
@@ -80,7 +92,7 @@ class TestTestWithScenarios(unittest.TestCase):
self.assertEqual(2, result.testsRun)
def test_scenarios_attribute_cleared(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [
('1', {'foo': 1, 'bar': 2}),
('2', {'foo': 2, 'bar': 4})]
@@ -97,14 +109,14 @@ class TestTestWithScenarios(unittest.TestCase):
self.assertEqual(None, log[4][1].scenarios)
def test_countTestCases_no_scenarios(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
def test_check_foo(self):
pass
test = ReferenceTest("test_check_foo")
self.assertEqual(1, test.countTestCases())
def test_countTestCases_empty_scenarios(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = []
def test_check_foo(self):
pass
@@ -112,7 +124,7 @@ class TestTestWithScenarios(unittest.TestCase):
self.assertEqual(1, test.countTestCases())
def test_countTestCases_1_scenarios(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [('1', {'foo': 1, 'bar': 2})]
def test_check_foo(self):
pass
@@ -120,7 +132,7 @@ class TestTestWithScenarios(unittest.TestCase):
self.assertEqual(1, test.countTestCases())
def test_countTestCases_2_scenarios(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [
('1', {'foo': 1, 'bar': 2}),
('2', {'foo': 2, 'bar': 4})]
@@ -131,7 +143,7 @@ class TestTestWithScenarios(unittest.TestCase):
def test_debug_2_scenarios(self):
log = []
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [
('1', {'foo': 1, 'bar': 2}),
('2', {'foo': 2, 'bar': 4})]
diff --git a/test/3rdparty/testscenarios-0.4/setup.cfg b/test/3rdparty/testscenarios-0.4/setup.cfg
new file mode 100644
index 00000000000..861a9f55426
--- /dev/null
+++ b/test/3rdparty/testscenarios-0.4/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/test/3rdparty/testscenarios-0.2/setup.py b/test/3rdparty/testscenarios-0.4/setup.py
index ace9f08c882..6b0d596a2a4 100755
--- a/test/3rdparty/testscenarios-0.2/setup.py
+++ b/test/3rdparty/testscenarios-0.4/setup.py
@@ -1,12 +1,12 @@
#!/usr/bin/env python
-from distutils.core import setup
+from setuptools import setup
import os.path
-description = file(os.path.join(os.path.dirname(__file__), 'README'), 'rb').read()
+description = open(os.path.join(os.path.dirname(__file__), 'README'), 'rt').read()
setup(name="testscenarios",
- version="0.2",
+ version="0.4",
description="Testscenarios, a pyunit extension for dependency injection",
long_description=description,
maintainer="Robert Collins",
@@ -21,7 +21,11 @@ setup(name="testscenarios",
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
+ 'Programming Language :: Python :: 3',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
+ install_requires = [
+ 'testtools',
+ ]
)
diff --git a/test/3rdparty/testtools-0.9.12/doc/for-framework-folk.rst b/test/3rdparty/testtools-0.9.12/doc/for-framework-folk.rst
deleted file mode 100644
index a4b20f64cac..00000000000
--- a/test/3rdparty/testtools-0.9.12/doc/for-framework-folk.rst
+++ /dev/null
@@ -1,219 +0,0 @@
-============================
-testtools for framework folk
-============================
-
-Introduction
-============
-
-In addition to having many features :doc:`for test authors
-<for-test-authors>`, testtools also has many bits and pieces that are useful
-for folk who write testing frameworks.
-
-If you are the author of a test runner, are working on a very large
-unit-tested project, are trying to get one testing framework to play nicely
-with another or are hacking away at getting your test suite to run in parallel
-over a heterogenous cluster of machines, this guide is for you.
-
-This manual is a summary. You can get details by consulting the `testtools
-API docs`_.
-
-
-Extensions to TestCase
-======================
-
-Custom exception handling
--------------------------
-
-testtools provides a way to control how test exceptions are handled. To do
-this, add a new exception to ``self.exception_handlers`` on a
-``testtools.TestCase``. For example::
-
- >>> self.exception_handlers.insert(-1, (ExceptionClass, handler)).
-
-Having done this, if any of ``setUp``, ``tearDown``, or the test method raise
-``ExceptionClass``, ``handler`` will be called with the test case, test result
-and the raised exception.
-
-Use this if you want to add a new kind of test result, that is, if you think
-that ``addError``, ``addFailure`` and so forth are not enough for your needs.
-
-
-Controlling test execution
---------------------------
-
-If you want to control more than just how exceptions are raised, you can
-provide a custom ``RunTest`` to a ``TestCase``. The ``RunTest`` object can
-change everything about how the test executes.
-
-To work with ``testtools.TestCase``, a ``RunTest`` must have a factory that
-takes a test and an optional list of exception handlers. Instances returned
-by the factory must have a ``run()`` method that takes an optional ``TestResult``
-object.
-
-The default is ``testtools.runtest.RunTest``, which calls ``setUp``, the test
-method, ``tearDown`` and clean ups (see :ref:`addCleanup`) in the normal, vanilla
-way that Python's standard unittest_ does.
-
-To specify a ``RunTest`` for all the tests in a ``TestCase`` class, do something
-like this::
-
- class SomeTests(TestCase):
- run_tests_with = CustomRunTestFactory
-
-To specify a ``RunTest`` for a specific test in a ``TestCase`` class, do::
-
- class SomeTests(TestCase):
- @run_test_with(CustomRunTestFactory, extra_arg=42, foo='whatever')
- def test_something(self):
- pass
-
-In addition, either of these can be overridden by passing a factory in to the
-``TestCase`` constructor with the optional ``runTest`` argument.
-
-
-Test renaming
--------------
-
-``testtools.clone_test_with_new_id`` is a function to copy a test case
-instance to one with a new name. This is helpful for implementing test
-parameterization.
-
-
-Test placeholders
-=================
-
-Sometimes, it's useful to be able to add things to a test suite that are not
-actually tests. For example, you might wish to represents import failures
-that occur during test discovery as tests, so that your test result object
-doesn't have to do special work to handle them nicely.
-
-testtools provides two such objects, called "placeholders": ``PlaceHolder``
-and ``ErrorHolder``. ``PlaceHolder`` takes a test id and an optional
-description. When it's run, it succeeds. ``ErrorHolder`` takes a test id,
-and error and an optional short description. When it's run, it reports that
-error.
-
-These placeholders are best used to log events that occur outside the test
-suite proper, but are still very relevant to its results.
-
-e.g.::
-
- >>> suite = TestSuite()
- >>> suite.add(PlaceHolder('I record an event'))
- >>> suite.run(TextTestResult(verbose=True))
- I record an event [OK]
-
-
-Extensions to TestResult
-========================
-
-TestResult.addSkip
-------------------
-
-This method is called on result objects when a test skips. The
-``testtools.TestResult`` class records skips in its ``skip_reasons`` instance
-dict. The can be reported on in much the same way as succesful tests.
-
-
-TestResult.time
----------------
-
-This method controls the time used by a ``TestResult``, permitting accurate
-timing of test results gathered on different machines or in different threads.
-See pydoc testtools.TestResult.time for more details.
-
-
-ThreadsafeForwardingResult
---------------------------
-
-A ``TestResult`` which forwards activity to another test result, but synchronises
-on a semaphore to ensure that all the activity for a single test arrives in a
-batch. This allows simple TestResults which do not expect concurrent test
-reporting to be fed the activity from multiple test threads, or processes.
-
-Note that when you provide multiple errors for a single test, the target sees
-each error as a distinct complete test.
-
-
-MultiTestResult
----------------
-
-A test result that dispatches its events to many test results. Use this
-to combine multiple different test result objects into one test result object
-that can be passed to ``TestCase.run()`` or similar. For example::
-
- a = TestResult()
- b = TestResult()
- combined = MultiTestResult(a, b)
- combined.startTestRun() # Calls a.startTestRun() and b.startTestRun()
-
-Each of the methods on ``MultiTestResult`` will return a tuple of whatever the
-component test results return.
-
-
-TextTestResult
---------------
-
-A ``TestResult`` that provides a text UI very similar to the Python standard
-library UI. Key differences are that its supports the extended outcomes and
-details API, and is completely encapsulated into the result object, permitting
-it to be used without a 'TestRunner' object. Not all the Python 2.7 outcomes
-are displayed (yet). It is also a 'quiet' result with no dots or verbose mode.
-These limitations will be corrected soon.
-
-
-ExtendedToOriginalDecorator
----------------------------
-
-Adapts legacy ``TestResult`` objects, such as those found in older Pythons, to
-meet the testtools ``TestResult`` API.
-
-
-Test Doubles
-------------
-
-In testtools.testresult.doubles there are three test doubles that testtools
-uses for its own testing: ``Python26TestResult``, ``Python27TestResult``,
-``ExtendedTestResult``. These TestResult objects implement a single variation of
-the TestResult API each, and log activity to a list ``self._events``. These are
-made available for the convenience of people writing their own extensions.
-
-
-startTestRun and stopTestRun
-----------------------------
-
-Python 2.7 added hooks ``startTestRun`` and ``stopTestRun`` which are called
-before and after the entire test run. 'stopTestRun' is particularly useful for
-test results that wish to produce summary output.
-
-``testtools.TestResult`` provides default ``startTestRun`` and ``stopTestRun``
-methods, and he default testtools runner will call these methods
-appropriately.
-
-The ``startTestRun`` method will reset any errors, failures and so forth on
-the result, making the result object look as if no tests have been run.
-
-
-Extensions to TestSuite
-=======================
-
-ConcurrentTestSuite
--------------------
-
-A TestSuite for parallel testing. This is used in conjuction with a helper that
-runs a single suite in some parallel fashion (for instance, forking, handing
-off to a subprocess, to a compute cloud, or simple threads).
-ConcurrentTestSuite uses the helper to get a number of separate runnable
-objects with a run(result), runs them all in threads using the
-ThreadsafeForwardingResult to coalesce their activity.
-
-FixtureSuite
-------------
-
-A test suite that sets up a fixture_ before running any tests, and then tears
-it down after all of the tests are run. The fixture is *not* made available to
-any of the tests.
-
-.. _`testtools API docs`: http://mumak.net/testtools/apidocs/
-.. _unittest: http://docs.python.org/library/unittest.html
-.. _fixture: http://pypi.python.org/pypi/fixtures
diff --git a/test/3rdparty/testtools-0.9.12/setup.cfg b/test/3rdparty/testtools-0.9.12/setup.cfg
deleted file mode 100644
index 9f95adde2b4..00000000000
--- a/test/3rdparty/testtools-0.9.12/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[test]
-test_module = testtools.tests
-buffer=1
-catch=1
diff --git a/test/3rdparty/testtools-0.9.12/testtools/__init__.py b/test/3rdparty/testtools-0.9.12/testtools/__init__.py
deleted file mode 100644
index 11b5662b146..00000000000
--- a/test/3rdparty/testtools-0.9.12/testtools/__init__.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
-
-"""Extensions to the standard Python unittest library."""
-
-__all__ = [
- 'clone_test_with_new_id',
- 'ConcurrentTestSuite',
- 'ErrorHolder',
- 'ExpectedException',
- 'ExtendedToOriginalDecorator',
- 'FixtureSuite',
- 'iterate_tests',
- 'MultipleExceptions',
- 'MultiTestResult',
- 'PlaceHolder',
- 'run_test_with',
- 'TestCase',
- 'TestCommand',
- 'TestResult',
- 'TextTestResult',
- 'RunTest',
- 'skip',
- 'skipIf',
- 'skipUnless',
- 'ThreadsafeForwardingResult',
- 'try_import',
- 'try_imports',
- ]
-
-from testtools.helpers import (
- try_import,
- try_imports,
- )
-from testtools.matchers import (
- Matcher,
- )
-# Shut up, pyflakes. We are importing for documentation, not for namespacing.
-Matcher
-
-from testtools.runtest import (
- MultipleExceptions,
- RunTest,
- )
-from testtools.testcase import (
- ErrorHolder,
- ExpectedException,
- PlaceHolder,
- TestCase,
- clone_test_with_new_id,
- run_test_with,
- skip,
- skipIf,
- skipUnless,
- )
-from testtools.testresult import (
- ExtendedToOriginalDecorator,
- MultiTestResult,
- TestResult,
- TextTestResult,
- ThreadsafeForwardingResult,
- )
-from testtools.testsuite import (
- ConcurrentTestSuite,
- FixtureSuite,
- iterate_tests,
- )
-from testtools.distutilscmd import (
- TestCommand,
-)
-
-# same format as sys.version_info: "A tuple containing the five components of
-# the version number: major, minor, micro, releaselevel, and serial. All
-# values except releaselevel are integers; the release level is 'alpha',
-# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
-# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
-# releaselevel of 'dev' for unreleased under-development code.
-#
-# If the releaselevel is 'alpha' then the major/minor/micro components are not
-# established at this point, and setup.py will use a version of next-$(revno).
-# If the releaselevel is 'final', then the tarball will be major.minor.micro.
-# Otherwise it is major.minor.micro~$(revno).
-
-__version__ = (0, 9, 12, 'final', 0)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/matchers.py b/test/3rdparty/testtools-0.9.12/testtools/matchers.py
deleted file mode 100644
index 693a20befa5..00000000000
--- a/test/3rdparty/testtools-0.9.12/testtools/matchers.py
+++ /dev/null
@@ -1,1059 +0,0 @@
-# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
-
-"""Matchers, a way to express complex assertions outside the testcase.
-
-Inspired by 'hamcrest'.
-
-Matcher provides the abstract API that all matchers need to implement.
-
-Bundled matchers are listed in __all__: a list can be obtained by running
-$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
-"""
-
-__metaclass__ = type
-__all__ = [
- 'AfterPreprocessing',
- 'AllMatch',
- 'Annotate',
- 'Contains',
- 'DocTestMatches',
- 'EndsWith',
- 'Equals',
- 'GreaterThan',
- 'Is',
- 'IsInstance',
- 'KeysEqual',
- 'LessThan',
- 'MatchesAll',
- 'MatchesAny',
- 'MatchesException',
- 'MatchesListwise',
- 'MatchesRegex',
- 'MatchesSetwise',
- 'MatchesStructure',
- 'NotEquals',
- 'Not',
- 'Raises',
- 'raises',
- 'StartsWith',
- ]
-
-import doctest
-import operator
-from pprint import pformat
-import re
-import sys
-import types
-
-from testtools.compat import (
- classtypes,
- _error_repr,
- isbaseexception,
- _isbytes,
- istext,
- str_is_unicode,
- text_repr
- )
-
-
-class Matcher(object):
- """A pattern matcher.
-
- A Matcher must implement match and __str__ to be used by
- testtools.TestCase.assertThat. Matcher.match(thing) returns None when
- thing is completely matched, and a Mismatch object otherwise.
-
- Matchers can be useful outside of test cases, as they are simply a
- pattern matching language expressed as objects.
-
- testtools.matchers is inspired by hamcrest, but is pythonic rather than
- a Java transcription.
- """
-
- def match(self, something):
- """Return None if this matcher matches something, a Mismatch otherwise.
- """
- raise NotImplementedError(self.match)
-
- def __str__(self):
- """Get a sensible human representation of the matcher.
-
- This should include the parameters given to the matcher and any
- state that would affect the matches operation.
- """
- raise NotImplementedError(self.__str__)
-
-
-class Mismatch(object):
- """An object describing a mismatch detected by a Matcher."""
-
- def __init__(self, description=None, details=None):
- """Construct a `Mismatch`.
-
- :param description: A description to use. If not provided,
- `Mismatch.describe` must be implemented.
- :param details: Extra details about the mismatch. Defaults
- to the empty dict.
- """
- if description:
- self._description = description
- if details is None:
- details = {}
- self._details = details
-
- def describe(self):
- """Describe the mismatch.
-
- This should be either a human-readable string or castable to a string.
- In particular, is should either be plain ascii or unicode on Python 2,
- and care should be taken to escape control characters.
- """
- try:
- return self._description
- except AttributeError:
- raise NotImplementedError(self.describe)
-
- def get_details(self):
- """Get extra details about the mismatch.
-
- This allows the mismatch to provide extra information beyond the basic
- description, including large text or binary files, or debugging internals
- without having to force it to fit in the output of 'describe'.
-
- The testtools assertion assertThat will query get_details and attach
- all its values to the test, permitting them to be reported in whatever
- manner the test environment chooses.
-
- :return: a dict mapping names to Content objects. name is a string to
- name the detail, and the Content object is the detail to add
- to the result. For more information see the API to which items from
- this dict are passed testtools.TestCase.addDetail.
- """
- return getattr(self, '_details', {})
-
- def __repr__(self):
- return "<testtools.matchers.Mismatch object at %x attributes=%r>" % (
- id(self), self.__dict__)
-
-
-class MismatchError(AssertionError):
- """Raised when a mismatch occurs."""
-
- # This class exists to work around
- # <https://bugs.launchpad.net/testtools/+bug/804127>. It provides a
- # guaranteed way of getting a readable exception, no matter what crazy
- # characters are in the matchee, matcher or mismatch.
-
- def __init__(self, matchee, matcher, mismatch, verbose=False):
- # Have to use old-style upcalling for Python 2.4 and 2.5
- # compatibility.
- AssertionError.__init__(self)
- self.matchee = matchee
- self.matcher = matcher
- self.mismatch = mismatch
- self.verbose = verbose
-
- def __str__(self):
- difference = self.mismatch.describe()
- if self.verbose:
- # GZ 2011-08-24: Smelly API? Better to take any object and special
- # case text inside?
- if istext(self.matchee) or _isbytes(self.matchee):
- matchee = text_repr(self.matchee, multiline=False)
- else:
- matchee = repr(self.matchee)
- return (
- 'Match failed. Matchee: %s\nMatcher: %s\nDifference: %s\n'
- % (matchee, self.matcher, difference))
- else:
- return difference
-
- if not str_is_unicode:
-
- __unicode__ = __str__
-
- def __str__(self):
- return self.__unicode__().encode("ascii", "backslashreplace")
-
-
-class MismatchDecorator(object):
- """Decorate a ``Mismatch``.
-
- Forwards all messages to the original mismatch object. Probably the best
- way to use this is inherit from this class and then provide your own
- custom decoration logic.
- """
-
- def __init__(self, original):
- """Construct a `MismatchDecorator`.
-
- :param original: A `Mismatch` object to decorate.
- """
- self.original = original
-
- def __repr__(self):
- return '<testtools.matchers.MismatchDecorator(%r)>' % (self.original,)
-
- def describe(self):
- return self.original.describe()
-
- def get_details(self):
- return self.original.get_details()
-
-
-class _NonManglingOutputChecker(doctest.OutputChecker):
- """Doctest checker that works with unicode rather than mangling strings
-
- This is needed because current Python versions have tried to fix string
- encoding related problems, but regressed the default behaviour with unicode
- inputs in the process.
-
- In Python 2.6 and 2.7 `OutputChecker.output_difference` is was changed to
- return a bytestring encoded as per `sys.stdout.encoding`, or utf-8 if that
- can't be determined. Worse, that encoding process happens in the innocent
- looking `_indent` global function. Because the `DocTestMismatch.describe`
- result may well not be destined for printing to stdout, this is no good
- for us. To get a unicode return as before, the method is monkey patched if
- `doctest._encoding` exists.
-
- Python 3 has a different problem. For some reason both inputs are encoded
- to ascii with 'backslashreplace', making an escaped string matches its
- unescaped form. Overriding the offending `OutputChecker._toAscii` method
- is sufficient to revert this.
- """
-
- def _toAscii(self, s):
- """Return `s` unchanged rather than mangling it to ascii"""
- return s
-
- # Only do this overriding hackery if doctest has a broken _input function
- if getattr(doctest, "_encoding", None) is not None:
- from types import FunctionType as __F
- __f = doctest.OutputChecker.output_difference.im_func
- __g = dict(__f.func_globals)
- def _indent(s, indent=4, _pattern=re.compile("^(?!$)", re.MULTILINE)):
- """Prepend non-empty lines in `s` with `indent` number of spaces"""
- return _pattern.sub(indent*" ", s)
- __g["_indent"] = _indent
- output_difference = __F(__f.func_code, __g, "output_difference")
- del __F, __f, __g, _indent
-
-
-class DocTestMatches(object):
- """See if a string matches a doctest example."""
-
- def __init__(self, example, flags=0):
- """Create a DocTestMatches to match example.
-
- :param example: The example to match e.g. 'foo bar baz'
- :param flags: doctest comparison flags to match on. e.g.
- doctest.ELLIPSIS.
- """
- if not example.endswith('\n'):
- example += '\n'
- self.want = example # required variable name by doctest.
- self.flags = flags
- self._checker = _NonManglingOutputChecker()
-
- def __str__(self):
- if self.flags:
- flagstr = ", flags=%d" % self.flags
- else:
- flagstr = ""
- return 'DocTestMatches(%r%s)' % (self.want, flagstr)
-
- def _with_nl(self, actual):
- result = self.want.__class__(actual)
- if not result.endswith('\n'):
- result += '\n'
- return result
-
- def match(self, actual):
- with_nl = self._with_nl(actual)
- if self._checker.check_output(self.want, with_nl, self.flags):
- return None
- return DocTestMismatch(self, with_nl)
-
- def _describe_difference(self, with_nl):
- return self._checker.output_difference(self, with_nl, self.flags)
-
-
-class DocTestMismatch(Mismatch):
- """Mismatch object for DocTestMatches."""
-
- def __init__(self, matcher, with_nl):
- self.matcher = matcher
- self.with_nl = with_nl
-
- def describe(self):
- s = self.matcher._describe_difference(self.with_nl)
- if str_is_unicode or isinstance(s, unicode):
- return s
- # GZ 2011-08-24: This is actually pretty bogus, most C0 codes should
- # be escaped, in addition to non-ascii bytes.
- return s.decode("latin1").encode("ascii", "backslashreplace")
-
-
-class DoesNotContain(Mismatch):
-
- def __init__(self, matchee, needle):
- """Create a DoesNotContain Mismatch.
-
- :param matchee: the object that did not contain needle.
- :param needle: the needle that 'matchee' was expected to contain.
- """
- self.matchee = matchee
- self.needle = needle
-
- def describe(self):
- return "%r not in %r" % (self.needle, self.matchee)
-
-
-class DoesNotStartWith(Mismatch):
-
- def __init__(self, matchee, expected):
- """Create a DoesNotStartWith Mismatch.
-
- :param matchee: the string that did not match.
- :param expected: the string that 'matchee' was expected to start with.
- """
- self.matchee = matchee
- self.expected = expected
-
- def describe(self):
- return "%s does not start with %s." % (
- text_repr(self.matchee), text_repr(self.expected))
-
-
-class DoesNotEndWith(Mismatch):
-
- def __init__(self, matchee, expected):
- """Create a DoesNotEndWith Mismatch.
-
- :param matchee: the string that did not match.
- :param expected: the string that 'matchee' was expected to end with.
- """
- self.matchee = matchee
- self.expected = expected
-
- def describe(self):
- return "%s does not end with %s." % (
- text_repr(self.matchee), text_repr(self.expected))
-
-
-class _BinaryComparison(object):
- """Matcher that compares an object to another object."""
-
- def __init__(self, expected):
- self.expected = expected
-
- def __str__(self):
- return "%s(%r)" % (self.__class__.__name__, self.expected)
-
- def match(self, other):
- if self.comparator(other, self.expected):
- return None
- return _BinaryMismatch(self.expected, self.mismatch_string, other)
-
- def comparator(self, expected, other):
- raise NotImplementedError(self.comparator)
-
-
-class _BinaryMismatch(Mismatch):
- """Two things did not match."""
-
- def __init__(self, expected, mismatch_string, other):
- self.expected = expected
- self._mismatch_string = mismatch_string
- self.other = other
-
- def _format(self, thing):
- # Blocks of text with newlines are formatted as triple-quote
- # strings. Everything else is pretty-printed.
- if istext(thing) or _isbytes(thing):
- return text_repr(thing)
- return pformat(thing)
-
- def describe(self):
- left = repr(self.expected)
- right = repr(self.other)
- if len(left) + len(right) > 70:
- return "%s:\nreference = %s\nactual = %s\n" % (
- self._mismatch_string, self._format(self.expected),
- self._format(self.other))
- else:
- return "%s %s %s" % (left, self._mismatch_string, right)
-
-
-class Equals(_BinaryComparison):
- """Matches if the items are equal."""
-
- comparator = operator.eq
- mismatch_string = '!='
-
-
-class NotEquals(_BinaryComparison):
- """Matches if the items are not equal.
-
- In most cases, this is equivalent to ``Not(Equals(foo))``. The difference
- only matters when testing ``__ne__`` implementations.
- """
-
- comparator = operator.ne
- mismatch_string = '=='
-
-
-class Is(_BinaryComparison):
- """Matches if the items are identical."""
-
- comparator = operator.is_
- mismatch_string = 'is not'
-
-
-class IsInstance(object):
- """Matcher that wraps isinstance."""
-
- def __init__(self, *types):
- self.types = tuple(types)
-
- def __str__(self):
- return "%s(%s)" % (self.__class__.__name__,
- ', '.join(type.__name__ for type in self.types))
-
- def match(self, other):
- if isinstance(other, self.types):
- return None
- return NotAnInstance(other, self.types)
-
-
-class NotAnInstance(Mismatch):
-
- def __init__(self, matchee, types):
- """Create a NotAnInstance Mismatch.
-
- :param matchee: the thing which is not an instance of any of types.
- :param types: A tuple of the types which were expected.
- """
- self.matchee = matchee
- self.types = types
-
- def describe(self):
- if len(self.types) == 1:
- typestr = self.types[0].__name__
- else:
- typestr = 'any of (%s)' % ', '.join(type.__name__ for type in
- self.types)
- return "'%s' is not an instance of %s" % (self.matchee, typestr)
-
-
-class LessThan(_BinaryComparison):
- """Matches if the item is less than the matchers reference object."""
-
- comparator = operator.__lt__
- mismatch_string = 'is not >'
-
-
-class GreaterThan(_BinaryComparison):
- """Matches if the item is greater than the matchers reference object."""
-
- comparator = operator.__gt__
- mismatch_string = 'is not <'
-
-
-class MatchesAny(object):
- """Matches if any of the matchers it is created with match."""
-
- def __init__(self, *matchers):
- self.matchers = matchers
-
- def match(self, matchee):
- results = []
- for matcher in self.matchers:
- mismatch = matcher.match(matchee)
- if mismatch is None:
- return None
- results.append(mismatch)
- return MismatchesAll(results)
-
- def __str__(self):
- return "MatchesAny(%s)" % ', '.join([
- str(matcher) for matcher in self.matchers])
-
-
-class MatchesAll(object):
- """Matches if all of the matchers it is created with match."""
-
- def __init__(self, *matchers):
- self.matchers = matchers
-
- def __str__(self):
- return 'MatchesAll(%s)' % ', '.join(map(str, self.matchers))
-
- def match(self, matchee):
- results = []
- for matcher in self.matchers:
- mismatch = matcher.match(matchee)
- if mismatch is not None:
- results.append(mismatch)
- if results:
- return MismatchesAll(results)
- else:
- return None
-
-
-class MismatchesAll(Mismatch):
- """A mismatch with many child mismatches."""
-
- def __init__(self, mismatches):
- self.mismatches = mismatches
-
- def describe(self):
- descriptions = ["Differences: ["]
- for mismatch in self.mismatches:
- descriptions.append(mismatch.describe())
- descriptions.append("]")
- return '\n'.join(descriptions)
-
-
-class Not(object):
- """Inverts a matcher."""
-
- def __init__(self, matcher):
- self.matcher = matcher
-
- def __str__(self):
- return 'Not(%s)' % (self.matcher,)
-
- def match(self, other):
- mismatch = self.matcher.match(other)
- if mismatch is None:
- return MatchedUnexpectedly(self.matcher, other)
- else:
- return None
-
-
-class MatchedUnexpectedly(Mismatch):
- """A thing matched when it wasn't supposed to."""
-
- def __init__(self, matcher, other):
- self.matcher = matcher
- self.other = other
-
- def describe(self):
- return "%r matches %s" % (self.other, self.matcher)
-
-
-class MatchesException(Matcher):
- """Match an exc_info tuple against an exception instance or type."""
-
- def __init__(self, exception, value_re=None):
- """Create a MatchesException that will match exc_info's for exception.
-
- :param exception: Either an exception instance or type.
- If an instance is given, the type and arguments of the exception
- are checked. If a type is given only the type of the exception is
- checked. If a tuple is given, then as with isinstance, any of the
- types in the tuple matching is sufficient to match.
- :param value_re: If 'exception' is a type, and the matchee exception
- is of the right type, then match against this. If value_re is a
- string, then assume value_re is a regular expression and match
- the str() of the exception against it. Otherwise, assume value_re
- is a matcher, and match the exception against it.
- """
- Matcher.__init__(self)
- self.expected = exception
- if istext(value_re):
- value_re = AfterPreproccessing(str, MatchesRegex(value_re), False)
- self.value_re = value_re
- self._is_instance = type(self.expected) not in classtypes() + (tuple,)
-
- def match(self, other):
- if type(other) != tuple:
- return Mismatch('%r is not an exc_info tuple' % other)
- expected_class = self.expected
- if self._is_instance:
- expected_class = expected_class.__class__
- if not issubclass(other[0], expected_class):
- return Mismatch('%r is not a %r' % (other[0], expected_class))
- if self._is_instance:
- if other[1].args != self.expected.args:
- return Mismatch('%s has different arguments to %s.' % (
- _error_repr(other[1]), _error_repr(self.expected)))
- elif self.value_re is not None:
- return self.value_re.match(other[1])
-
- def __str__(self):
- if self._is_instance:
- return "MatchesException(%s)" % _error_repr(self.expected)
- return "MatchesException(%s)" % repr(self.expected)
-
-
-class Contains(Matcher):
- """Checks whether something is container in another thing."""
-
- def __init__(self, needle):
- """Create a Contains Matcher.
-
- :param needle: the thing that needs to be contained by matchees.
- """
- self.needle = needle
-
- def __str__(self):
- return "Contains(%r)" % (self.needle,)
-
- def match(self, matchee):
- try:
- if self.needle not in matchee:
- return DoesNotContain(matchee, self.needle)
- except TypeError:
- # e.g. 1 in 2 will raise TypeError
- return DoesNotContain(matchee, self.needle)
- return None
-
-
-class StartsWith(Matcher):
- """Checks whether one string starts with another."""
-
- def __init__(self, expected):
- """Create a StartsWith Matcher.
-
- :param expected: the string that matchees should start with.
- """
- self.expected = expected
-
- def __str__(self):
- return "StartsWith(%r)" % (self.expected,)
-
- def match(self, matchee):
- if not matchee.startswith(self.expected):
- return DoesNotStartWith(matchee, self.expected)
- return None
-
-
-class EndsWith(Matcher):
- """Checks whether one string starts with another."""
-
- def __init__(self, expected):
- """Create a EndsWith Matcher.
-
- :param expected: the string that matchees should end with.
- """
- self.expected = expected
-
- def __str__(self):
- return "EndsWith(%r)" % (self.expected,)
-
- def match(self, matchee):
- if not matchee.endswith(self.expected):
- return DoesNotEndWith(matchee, self.expected)
- return None
-
-
-class KeysEqual(Matcher):
- """Checks whether a dict has particular keys."""
-
- def __init__(self, *expected):
- """Create a `KeysEqual` Matcher.
-
- :param expected: The keys the dict is expected to have. If a dict,
- then we use the keys of that dict, if a collection, we assume it
- is a collection of expected keys.
- """
- try:
- self.expected = expected.keys()
- except AttributeError:
- self.expected = list(expected)
-
- def __str__(self):
- return "KeysEqual(%s)" % ', '.join(map(repr, self.expected))
-
- def match(self, matchee):
- expected = sorted(self.expected)
- matched = Equals(expected).match(sorted(matchee.keys()))
- if matched:
- return AnnotatedMismatch(
- 'Keys not equal',
- _BinaryMismatch(expected, 'does not match', matchee))
- return None
-
-
-class Annotate(object):
- """Annotates a matcher with a descriptive string.
-
- Mismatches are then described as '<mismatch>: <annotation>'.
- """
-
- def __init__(self, annotation, matcher):
- self.annotation = annotation
- self.matcher = matcher
-
- @classmethod
- def if_message(cls, annotation, matcher):
- """Annotate ``matcher`` only if ``annotation`` is non-empty."""
- if not annotation:
- return matcher
- return cls(annotation, matcher)
-
- def __str__(self):
- return 'Annotate(%r, %s)' % (self.annotation, self.matcher)
-
- def match(self, other):
- mismatch = self.matcher.match(other)
- if mismatch is not None:
- return AnnotatedMismatch(self.annotation, mismatch)
-
-
-class AnnotatedMismatch(MismatchDecorator):
- """A mismatch annotated with a descriptive string."""
-
- def __init__(self, annotation, mismatch):
- super(AnnotatedMismatch, self).__init__(mismatch)
- self.annotation = annotation
- self.mismatch = mismatch
-
- def describe(self):
- return '%s: %s' % (self.original.describe(), self.annotation)
-
-
-class Raises(Matcher):
- """Match if the matchee raises an exception when called.
-
- Exceptions which are not subclasses of Exception propogate out of the
- Raises.match call unless they are explicitly matched.
- """
-
- def __init__(self, exception_matcher=None):
- """Create a Raises matcher.
-
- :param exception_matcher: Optional validator for the exception raised
- by matchee. If supplied the exc_info tuple for the exception raised
- is passed into that matcher. If no exception_matcher is supplied
- then the simple fact of raising an exception is considered enough
- to match on.
- """
- self.exception_matcher = exception_matcher
-
- def match(self, matchee):
- try:
- result = matchee()
- return Mismatch('%r returned %r' % (matchee, result))
- # Catch all exceptions: Raises() should be able to match a
- # KeyboardInterrupt or SystemExit.
- except:
- exc_info = sys.exc_info()
- if self.exception_matcher:
- mismatch = self.exception_matcher.match(exc_info)
- if not mismatch:
- del exc_info
- return
- else:
- mismatch = None
- # The exception did not match, or no explicit matching logic was
- # performed. If the exception is a non-user exception (that is, not
- # a subclass of Exception on Python 2.5+) then propogate it.
- if isbaseexception(exc_info[1]):
- del exc_info
- raise
- return mismatch
-
- def __str__(self):
- return 'Raises()'
-
-
-def raises(exception):
- """Make a matcher that checks that a callable raises an exception.
-
- This is a convenience function, exactly equivalent to::
-
- return Raises(MatchesException(exception))
-
- See `Raises` and `MatchesException` for more information.
- """
- return Raises(MatchesException(exception))
-
-
-class MatchesListwise(object):
- """Matches if each matcher matches the corresponding value.
-
- More easily explained by example than in words:
-
- >>> MatchesListwise([Equals(1)]).match([1])
- >>> MatchesListwise([Equals(1), Equals(2)]).match([1, 2])
- >>> print (MatchesListwise([Equals(1), Equals(2)]).match([2, 1]).describe())
- Differences: [
- 1 != 2
- 2 != 1
- ]
- """
-
- def __init__(self, matchers):
- self.matchers = matchers
-
- def match(self, values):
- mismatches = []
- length_mismatch = Annotate(
- "Length mismatch", Equals(len(self.matchers))).match(len(values))
- if length_mismatch:
- mismatches.append(length_mismatch)
- for matcher, value in zip(self.matchers, values):
- mismatch = matcher.match(value)
- if mismatch:
- mismatches.append(mismatch)
- if mismatches:
- return MismatchesAll(mismatches)
-
-
-class MatchesStructure(object):
- """Matcher that matches an object structurally.
-
- 'Structurally' here means that attributes of the object being matched are
- compared against given matchers.
-
- `fromExample` allows the creation of a matcher from a prototype object and
- then modified versions can be created with `update`.
-
- `byEquality` creates a matcher in much the same way as the constructor,
- except that the matcher for each of the attributes is assumed to be
- `Equals`.
-
- `byMatcher` creates a similar matcher to `byEquality`, but you get to pick
- the matcher, rather than just using `Equals`.
- """
-
- def __init__(self, **kwargs):
- """Construct a `MatchesStructure`.
-
- :param kwargs: A mapping of attributes to matchers.
- """
- self.kws = kwargs
-
- @classmethod
- def byEquality(cls, **kwargs):
- """Matches an object where the attributes equal the keyword values.
-
- Similar to the constructor, except that the matcher is assumed to be
- Equals.
- """
- return cls.byMatcher(Equals, **kwargs)
-
- @classmethod
- def byMatcher(cls, matcher, **kwargs):
- """Matches an object where the attributes match the keyword values.
-
- Similar to the constructor, except that the provided matcher is used
- to match all of the values.
- """
- return cls(
- **dict((name, matcher(value)) for name, value in kwargs.items()))
-
- @classmethod
- def fromExample(cls, example, *attributes):
- kwargs = {}
- for attr in attributes:
- kwargs[attr] = Equals(getattr(example, attr))
- return cls(**kwargs)
-
- def update(self, **kws):
- new_kws = self.kws.copy()
- for attr, matcher in kws.items():
- if matcher is None:
- new_kws.pop(attr, None)
- else:
- new_kws[attr] = matcher
- return type(self)(**new_kws)
-
- def __str__(self):
- kws = []
- for attr, matcher in sorted(self.kws.items()):
- kws.append("%s=%s" % (attr, matcher))
- return "%s(%s)" % (self.__class__.__name__, ', '.join(kws))
-
- def match(self, value):
- matchers = []
- values = []
- for attr, matcher in sorted(self.kws.items()):
- matchers.append(Annotate(attr, matcher))
- values.append(getattr(value, attr))
- return MatchesListwise(matchers).match(values)
-
-
-class MatchesRegex(object):
- """Matches if the matchee is matched by a regular expression."""
-
- def __init__(self, pattern, flags=0):
- self.pattern = pattern
- self.flags = flags
-
- def __str__(self):
- args = ['%r' % self.pattern]
- flag_arg = []
- # dir() sorts the attributes for us, so we don't need to do it again.
- for flag in dir(re):
- if len(flag) == 1:
- if self.flags & getattr(re, flag):
- flag_arg.append('re.%s' % flag)
- if flag_arg:
- args.append('|'.join(flag_arg))
- return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
-
- def match(self, value):
- if not re.match(self.pattern, value, self.flags):
- pattern = self.pattern
- if not isinstance(pattern, str_is_unicode and str or unicode):
- pattern = pattern.decode("latin1")
- pattern = pattern.encode("unicode_escape").decode("ascii")
- return Mismatch("%r does not match /%s/" % (
- value, pattern.replace("\\\\", "\\")))
-
-
-class MatchesSetwise(object):
- """Matches if all the matchers match elements of the value being matched.
-
- That is, each element in the 'observed' set must match exactly one matcher
- from the set of matchers, with no matchers left over.
-
- The difference compared to `MatchesListwise` is that the order of the
- matchings does not matter.
- """
-
- def __init__(self, *matchers):
- self.matchers = matchers
-
- def match(self, observed):
- remaining_matchers = set(self.matchers)
- not_matched = []
- for value in observed:
- for matcher in remaining_matchers:
- if matcher.match(value) is None:
- remaining_matchers.remove(matcher)
- break
- else:
- not_matched.append(value)
- if not_matched or remaining_matchers:
- remaining_matchers = list(remaining_matchers)
- # There are various cases that all should be reported somewhat
- # differently.
-
- # There are two trivial cases:
- # 1) There are just some matchers left over.
- # 2) There are just some values left over.
-
- # Then there are three more interesting cases:
- # 3) There are the same number of matchers and values left over.
- # 4) There are more matchers left over than values.
- # 5) There are more values left over than matchers.
-
- if len(not_matched) == 0:
- if len(remaining_matchers) > 1:
- msg = "There were %s matchers left over: " % (
- len(remaining_matchers),)
- else:
- msg = "There was 1 matcher left over: "
- msg += ', '.join(map(str, remaining_matchers))
- return Mismatch(msg)
- elif len(remaining_matchers) == 0:
- if len(not_matched) > 1:
- return Mismatch(
- "There were %s values left over: %s" % (
- len(not_matched), not_matched))
- else:
- return Mismatch(
- "There was 1 value left over: %s" % (
- not_matched, ))
- else:
- common_length = min(len(remaining_matchers), len(not_matched))
- if common_length == 0:
- raise AssertionError("common_length can't be 0 here")
- if common_length > 1:
- msg = "There were %s mismatches" % (common_length,)
- else:
- msg = "There was 1 mismatch"
- if len(remaining_matchers) > len(not_matched):
- extra_matchers = remaining_matchers[common_length:]
- msg += " and %s extra matcher" % (len(extra_matchers), )
- if len(extra_matchers) > 1:
- msg += "s"
- msg += ': ' + ', '.join(map(str, extra_matchers))
- elif len(not_matched) > len(remaining_matchers):
- extra_values = not_matched[common_length:]
- msg += " and %s extra value" % (len(extra_values), )
- if len(extra_values) > 1:
- msg += "s"
- msg += ': ' + str(extra_values)
- return Annotate(
- msg, MatchesListwise(remaining_matchers[:common_length])
- ).match(not_matched[:common_length])
-
-
-class AfterPreprocessing(object):
- """Matches if the value matches after passing through a function.
-
- This can be used to aid in creating trivial matchers as functions, for
- example::
-
- def PathHasFileContent(content):
- def _read(path):
- return open(path).read()
- return AfterPreprocessing(_read, Equals(content))
- """
-
- def __init__(self, preprocessor, matcher, annotate=True):
- """Create an AfterPreprocessing matcher.
-
- :param preprocessor: A function called with the matchee before
- matching.
- :param matcher: What to match the preprocessed matchee against.
- :param annotate: Whether or not to annotate the matcher with
- something explaining how we transformed the matchee. Defaults
- to True.
- """
- self.preprocessor = preprocessor
- self.matcher = matcher
- self.annotate = annotate
-
- def _str_preprocessor(self):
- if isinstance(self.preprocessor, types.FunctionType):
- return '<function %s>' % self.preprocessor.__name__
- return str(self.preprocessor)
-
- def __str__(self):
- return "AfterPreprocessing(%s, %s)" % (
- self._str_preprocessor(), self.matcher)
-
- def match(self, value):
- after = self.preprocessor(value)
- if self.annotate:
- matcher = Annotate(
- "after %s on %r" % (self._str_preprocessor(), value),
- self.matcher)
- else:
- matcher = self.matcher
- return matcher.match(after)
-
-# This is the old, deprecated. spelling of the name, kept for backwards
-# compatibility.
-AfterPreproccessing = AfterPreprocessing
-
-
-class AllMatch(object):
- """Matches if all provided values match the given matcher."""
-
- def __init__(self, matcher):
- self.matcher = matcher
-
- def __str__(self):
- return 'AllMatch(%s)' % (self.matcher,)
-
- def match(self, values):
- mismatches = []
- for value in values:
- mismatch = self.matcher.match(value)
- if mismatch:
- mismatches.append(mismatch)
- if mismatches:
- return MismatchesAll(mismatches)
-
-
-# Signal that this is part of the testing framework, and that code from this
-# should not normally appear in tracebacks.
-__unittest = True
diff --git a/test/3rdparty/testtools-0.9.12/testtools/testresult/__init__.py b/test/3rdparty/testtools-0.9.12/testtools/testresult/__init__.py
deleted file mode 100644
index 19f88bc8a34..00000000000
--- a/test/3rdparty/testtools-0.9.12/testtools/testresult/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2009 testtools developers. See LICENSE for details.
-
-"""Test result objects."""
-
-__all__ = [
- 'ExtendedToOriginalDecorator',
- 'MultiTestResult',
- 'TestResult',
- 'TextTestResult',
- 'ThreadsafeForwardingResult',
- ]
-
-from testtools.testresult.real import (
- ExtendedToOriginalDecorator,
- MultiTestResult,
- TestResult,
- TextTestResult,
- ThreadsafeForwardingResult,
- )
diff --git a/test/3rdparty/testtools-0.9.12/testtools/testresult/real.py b/test/3rdparty/testtools-0.9.12/testtools/testresult/real.py
deleted file mode 100644
index eb548dfa2c8..00000000000
--- a/test/3rdparty/testtools-0.9.12/testtools/testresult/real.py
+++ /dev/null
@@ -1,658 +0,0 @@
-# Copyright (c) 2008 testtools developers. See LICENSE for details.
-
-"""Test results and related things."""
-
-__metaclass__ = type
-__all__ = [
- 'ExtendedToOriginalDecorator',
- 'MultiTestResult',
- 'TestResult',
- 'ThreadsafeForwardingResult',
- ]
-
-import datetime
-import sys
-import unittest
-
-from testtools.compat import all, _format_exc_info, str_is_unicode, _u
-
-# From http://docs.python.org/library/datetime.html
-_ZERO = datetime.timedelta(0)
-
-# A UTC class.
-
-class UTC(datetime.tzinfo):
- """UTC"""
-
- def utcoffset(self, dt):
- return _ZERO
-
- def tzname(self, dt):
- return "UTC"
-
- def dst(self, dt):
- return _ZERO
-
-utc = UTC()
-
-
-class TestResult(unittest.TestResult):
- """Subclass of unittest.TestResult extending the protocol for flexability.
-
- This test result supports an experimental protocol for providing additional
- data to in test outcomes. All the outcome methods take an optional dict
- 'details'. If supplied any other detail parameters like 'err' or 'reason'
- should not be provided. The details dict is a mapping from names to
- MIME content objects (see testtools.content). This permits attaching
- tracebacks, log files, or even large objects like databases that were
- part of the test fixture. Until this API is accepted into upstream
- Python it is considered experimental: it may be replaced at any point
- by a newer version more in line with upstream Python. Compatibility would
- be aimed for in this case, but may not be possible.
-
- :ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip.
- """
-
- def __init__(self):
- # startTestRun resets all attributes, and older clients don't know to
- # call startTestRun, so it is called once here.
- # Because subclasses may reasonably not expect this, we call the
- # specific version we want to run.
- TestResult.startTestRun(self)
-
- def addExpectedFailure(self, test, err=None, details=None):
- """Called when a test has failed in an expected manner.
-
- Like with addSuccess and addError, testStopped should still be called.
-
- :param test: The test that has been skipped.
- :param err: The exc_info of the error that was raised.
- :return: None
- """
- # This is the python 2.7 implementation
- self.expectedFailures.append(
- (test, self._err_details_to_string(test, err, details)))
-
- def addError(self, test, err=None, details=None):
- """Called when an error has occurred. 'err' is a tuple of values as
- returned by sys.exc_info().
-
- :param details: Alternative way to supply details about the outcome.
- see the class docstring for more information.
- """
- self.errors.append((test,
- self._err_details_to_string(test, err, details)))
-
- def addFailure(self, test, err=None, details=None):
- """Called when an error has occurred. 'err' is a tuple of values as
- returned by sys.exc_info().
-
- :param details: Alternative way to supply details about the outcome.
- see the class docstring for more information.
- """
- self.failures.append((test,
- self._err_details_to_string(test, err, details)))
-
- def addSkip(self, test, reason=None, details=None):
- """Called when a test has been skipped rather than running.
-
- Like with addSuccess and addError, testStopped should still be called.
-
- This must be called by the TestCase. 'addError' and 'addFailure' will
- not call addSkip, since they have no assumptions about the kind of
- errors that a test can raise.
-
- :param test: The test that has been skipped.
- :param reason: The reason for the test being skipped. For instance,
- u"pyGL is not available".
- :param details: Alternative way to supply details about the outcome.
- see the class docstring for more information.
- :return: None
- """
- if reason is None:
- reason = details.get('reason')
- if reason is None:
- reason = 'No reason given'
- else:
- reason = ''.join(reason.iter_text())
- skip_list = self.skip_reasons.setdefault(reason, [])
- skip_list.append(test)
-
- def addSuccess(self, test, details=None):
- """Called when a test succeeded."""
-
- def addUnexpectedSuccess(self, test, details=None):
- """Called when a test was expected to fail, but succeed."""
- self.unexpectedSuccesses.append(test)
-
- def wasSuccessful(self):
- """Has this result been successful so far?
-
- If there have been any errors, failures or unexpected successes,
- return False. Otherwise, return True.
-
- Note: This differs from standard unittest in that we consider
- unexpected successes to be equivalent to failures, rather than
- successes.
- """
- return not (self.errors or self.failures or self.unexpectedSuccesses)
-
- if str_is_unicode:
- # Python 3 and IronPython strings are unicode, use parent class method
- _exc_info_to_unicode = unittest.TestResult._exc_info_to_string
- else:
- # For Python 2, need to decode components of traceback according to
- # their source, so can't use traceback.format_exception
- # Here follows a little deep magic to copy the existing method and
- # replace the formatter with one that returns unicode instead
- from types import FunctionType as __F, ModuleType as __M
- __f = unittest.TestResult._exc_info_to_string.im_func
- __g = dict(__f.func_globals)
- __m = __M("__fake_traceback")
- __m.format_exception = _format_exc_info
- __g["traceback"] = __m
- _exc_info_to_unicode = __F(__f.func_code, __g, "_exc_info_to_unicode")
- del __F, __M, __f, __g, __m
-
- def _err_details_to_string(self, test, err=None, details=None):
- """Convert an error in exc_info form or a contents dict to a string."""
- if err is not None:
- return self._exc_info_to_unicode(err, test)
- return _details_to_str(details, special='traceback')
-
- def _now(self):
- """Return the current 'test time'.
-
- If the time() method has not been called, this is equivalent to
- datetime.now(), otherwise its the last supplied datestamp given to the
- time() method.
- """
- if self.__now is None:
- return datetime.datetime.now(utc)
- else:
- return self.__now
-
- def startTestRun(self):
- """Called before a test run starts.
-
- New in Python 2.7. The testtools version resets the result to a
- pristine condition ready for use in another test run. Note that this
- is different from Python 2.7's startTestRun, which does nothing.
- """
- super(TestResult, self).__init__()
- self.skip_reasons = {}
- self.__now = None
- # -- Start: As per python 2.7 --
- self.expectedFailures = []
- self.unexpectedSuccesses = []
- # -- End: As per python 2.7 --
-
- def stopTestRun(self):
- """Called after a test run completes
-
- New in python 2.7
- """
-
- def time(self, a_datetime):
- """Provide a timestamp to represent the current time.
-
- This is useful when test activity is time delayed, or happening
- concurrently and getting the system time between API calls will not
- accurately represent the duration of tests (or the whole run).
-
- Calling time() sets the datetime used by the TestResult object.
- Time is permitted to go backwards when using this call.
-
- :param a_datetime: A datetime.datetime object with TZ information or
- None to reset the TestResult to gathering time from the system.
- """
- self.__now = a_datetime
-
- def done(self):
- """Called when the test runner is done.
-
- deprecated in favour of stopTestRun.
- """
-
-
-class MultiTestResult(TestResult):
- """A test result that dispatches to many test results."""
-
- def __init__(self, *results):
- TestResult.__init__(self)
- self._results = list(map(ExtendedToOriginalDecorator, results))
-
- def _dispatch(self, message, *args, **kwargs):
- return tuple(
- getattr(result, message)(*args, **kwargs)
- for result in self._results)
-
- def startTest(self, test):
- return self._dispatch('startTest', test)
-
- def stopTest(self, test):
- return self._dispatch('stopTest', test)
-
- def addError(self, test, error=None, details=None):
- return self._dispatch('addError', test, error, details=details)
-
- def addExpectedFailure(self, test, err=None, details=None):
- return self._dispatch(
- 'addExpectedFailure', test, err, details=details)
-
- def addFailure(self, test, err=None, details=None):
- return self._dispatch('addFailure', test, err, details=details)
-
- def addSkip(self, test, reason=None, details=None):
- return self._dispatch('addSkip', test, reason, details=details)
-
- def addSuccess(self, test, details=None):
- return self._dispatch('addSuccess', test, details=details)
-
- def addUnexpectedSuccess(self, test, details=None):
- return self._dispatch('addUnexpectedSuccess', test, details=details)
-
- def startTestRun(self):
- return self._dispatch('startTestRun')
-
- def stopTestRun(self):
- return self._dispatch('stopTestRun')
-
- def time(self, a_datetime):
- return self._dispatch('time', a_datetime)
-
- def done(self):
- return self._dispatch('done')
-
- def wasSuccessful(self):
- """Was this result successful?
-
- Only returns True if every constituent result was successful.
- """
- return all(self._dispatch('wasSuccessful'))
-
-
-class TextTestResult(TestResult):
- """A TestResult which outputs activity to a text stream."""
-
- def __init__(self, stream):
- """Construct a TextTestResult writing to stream."""
- super(TextTestResult, self).__init__()
- self.stream = stream
- self.sep1 = '=' * 70 + '\n'
- self.sep2 = '-' * 70 + '\n'
-
- def _delta_to_float(self, a_timedelta):
- return (a_timedelta.days * 86400.0 + a_timedelta.seconds +
- a_timedelta.microseconds / 1000000.0)
-
- def _show_list(self, label, error_list):
- for test, output in error_list:
- self.stream.write(self.sep1)
- self.stream.write("%s: %s\n" % (label, test.id()))
- self.stream.write(self.sep2)
- self.stream.write(output)
-
- def startTestRun(self):
- super(TextTestResult, self).startTestRun()
- self.__start = self._now()
- self.stream.write("Tests running...\n")
-
- def stopTestRun(self):
- if self.testsRun != 1:
- plural = 's'
- else:
- plural = ''
- stop = self._now()
- self._show_list('ERROR', self.errors)
- self._show_list('FAIL', self.failures)
- for test in self.unexpectedSuccesses:
- self.stream.write(
- "%sUNEXPECTED SUCCESS: %s\n%s" % (
- self.sep1, test.id(), self.sep2))
- self.stream.write("\nRan %d test%s in %.3fs\n" %
- (self.testsRun, plural,
- self._delta_to_float(stop - self.__start)))
- if self.wasSuccessful():
- self.stream.write("OK\n")
- else:
- self.stream.write("FAILED (")
- details = []
- details.append("failures=%d" % (
- sum(map(len, (
- self.failures, self.errors, self.unexpectedSuccesses)))))
- self.stream.write(", ".join(details))
- self.stream.write(")\n")
- super(TextTestResult, self).stopTestRun()
-
-
-class ThreadsafeForwardingResult(TestResult):
- """A TestResult which ensures the target does not receive mixed up calls.
-
- This is used when receiving test results from multiple sources, and batches
- up all the activity for a single test into a thread-safe batch where all
- other ThreadsafeForwardingResult objects sharing the same semaphore will be
- locked out.
-
- Typical use of ThreadsafeForwardingResult involves creating one
- ThreadsafeForwardingResult per thread in a ConcurrentTestSuite. These
- forward to the TestResult that the ConcurrentTestSuite run method was
- called with.
-
- target.done() is called once for each ThreadsafeForwardingResult that
- forwards to the same target. If the target's done() takes special action,
- care should be taken to accommodate this.
- """
-
- def __init__(self, target, semaphore):
- """Create a ThreadsafeForwardingResult forwarding to target.
-
- :param target: A TestResult.
- :param semaphore: A threading.Semaphore with limit 1.
- """
- TestResult.__init__(self)
- self.result = ExtendedToOriginalDecorator(target)
- self.semaphore = semaphore
-
- def _add_result_with_semaphore(self, method, test, *args, **kwargs):
- self.semaphore.acquire()
- try:
- self.result.time(self._test_start)
- self.result.startTest(test)
- self.result.time(self._now())
- try:
- method(test, *args, **kwargs)
- finally:
- self.result.stopTest(test)
- finally:
- self.semaphore.release()
-
- def addError(self, test, err=None, details=None):
- self._add_result_with_semaphore(self.result.addError,
- test, err, details=details)
-
- def addExpectedFailure(self, test, err=None, details=None):
- self._add_result_with_semaphore(self.result.addExpectedFailure,
- test, err, details=details)
-
- def addFailure(self, test, err=None, details=None):
- self._add_result_with_semaphore(self.result.addFailure,
- test, err, details=details)
-
- def addSkip(self, test, reason=None, details=None):
- self._add_result_with_semaphore(self.result.addSkip,
- test, reason, details=details)
-
- def addSuccess(self, test, details=None):
- self._add_result_with_semaphore(self.result.addSuccess,
- test, details=details)
-
- def addUnexpectedSuccess(self, test, details=None):
- self._add_result_with_semaphore(self.result.addUnexpectedSuccess,
- test, details=details)
-
- def startTestRun(self):
- self.semaphore.acquire()
- try:
- self.result.startTestRun()
- finally:
- self.semaphore.release()
-
- def stopTestRun(self):
- self.semaphore.acquire()
- try:
- self.result.stopTestRun()
- finally:
- self.semaphore.release()
-
- def done(self):
- self.semaphore.acquire()
- try:
- self.result.done()
- finally:
- self.semaphore.release()
-
- def startTest(self, test):
- self._test_start = self._now()
- super(ThreadsafeForwardingResult, self).startTest(test)
-
- def wasSuccessful(self):
- return self.result.wasSuccessful()
-
-
-class ExtendedToOriginalDecorator(object):
- """Permit new TestResult API code to degrade gracefully with old results.
-
- This decorates an existing TestResult and converts missing outcomes
- such as addSkip to older outcomes such as addSuccess. It also supports
- the extended details protocol. In all cases the most recent protocol
- is attempted first, and fallbacks only occur when the decorated result
- does not support the newer style of calling.
- """
-
- def __init__(self, decorated):
- self.decorated = decorated
-
- def __getattr__(self, name):
- return getattr(self.decorated, name)
-
- def addError(self, test, err=None, details=None):
- self._check_args(err, details)
- if details is not None:
- try:
- return self.decorated.addError(test, details=details)
- except TypeError:
- # have to convert
- err = self._details_to_exc_info(details)
- return self.decorated.addError(test, err)
-
- def addExpectedFailure(self, test, err=None, details=None):
- self._check_args(err, details)
- addExpectedFailure = getattr(
- self.decorated, 'addExpectedFailure', None)
- if addExpectedFailure is None:
- return self.addSuccess(test)
- if details is not None:
- try:
- return addExpectedFailure(test, details=details)
- except TypeError:
- # have to convert
- err = self._details_to_exc_info(details)
- return addExpectedFailure(test, err)
-
- def addFailure(self, test, err=None, details=None):
- self._check_args(err, details)
- if details is not None:
- try:
- return self.decorated.addFailure(test, details=details)
- except TypeError:
- # have to convert
- err = self._details_to_exc_info(details)
- return self.decorated.addFailure(test, err)
-
- def addSkip(self, test, reason=None, details=None):
- self._check_args(reason, details)
- addSkip = getattr(self.decorated, 'addSkip', None)
- if addSkip is None:
- return self.decorated.addSuccess(test)
- if details is not None:
- try:
- return addSkip(test, details=details)
- except TypeError:
- # extract the reason if it's available
- try:
- reason = ''.join(details['reason'].iter_text())
- except KeyError:
- reason = _details_to_str(details)
- return addSkip(test, reason)
-
- def addUnexpectedSuccess(self, test, details=None):
- outcome = getattr(self.decorated, 'addUnexpectedSuccess', None)
- if outcome is None:
- try:
- test.fail("")
- except test.failureException:
- return self.addFailure(test, sys.exc_info())
- if details is not None:
- try:
- return outcome(test, details=details)
- except TypeError:
- pass
- return outcome(test)
-
- def addSuccess(self, test, details=None):
- if details is not None:
- try:
- return self.decorated.addSuccess(test, details=details)
- except TypeError:
- pass
- return self.decorated.addSuccess(test)
-
- def _check_args(self, err, details):
- param_count = 0
- if err is not None:
- param_count += 1
- if details is not None:
- param_count += 1
- if param_count != 1:
- raise ValueError("Must pass only one of err '%s' and details '%s"
- % (err, details))
-
- def _details_to_exc_info(self, details):
- """Convert a details dict to an exc_info tuple."""
- return (
- _StringException,
- _StringException(_details_to_str(details, special='traceback')),
- None)
-
- def done(self):
- try:
- return self.decorated.done()
- except AttributeError:
- return
-
- def progress(self, offset, whence):
- method = getattr(self.decorated, 'progress', None)
- if method is None:
- return
- return method(offset, whence)
-
- @property
- def shouldStop(self):
- return self.decorated.shouldStop
-
- def startTest(self, test):
- return self.decorated.startTest(test)
-
- def startTestRun(self):
- try:
- return self.decorated.startTestRun()
- except AttributeError:
- return
-
- def stop(self):
- return self.decorated.stop()
-
- def stopTest(self, test):
- return self.decorated.stopTest(test)
-
- def stopTestRun(self):
- try:
- return self.decorated.stopTestRun()
- except AttributeError:
- return
-
- def tags(self, new_tags, gone_tags):
- method = getattr(self.decorated, 'tags', None)
- if method is None:
- return
- return method(new_tags, gone_tags)
-
- def time(self, a_datetime):
- method = getattr(self.decorated, 'time', None)
- if method is None:
- return
- return method(a_datetime)
-
- def wasSuccessful(self):
- return self.decorated.wasSuccessful()
-
-
-class _StringException(Exception):
- """An exception made from an arbitrary string."""
-
- if not str_is_unicode:
- def __init__(self, string):
- if type(string) is not unicode:
- raise TypeError("_StringException expects unicode, got %r" %
- (string,))
- Exception.__init__(self, string)
-
- def __str__(self):
- return self.args[0].encode("utf-8")
-
- def __unicode__(self):
- return self.args[0]
- # For 3.0 and above the default __str__ is fine, so we don't define one.
-
- def __hash__(self):
- return id(self)
-
- def __eq__(self, other):
- try:
- return self.args == other.args
- except AttributeError:
- return False
-
-
-def _format_text_attachment(name, text):
- if '\n' in text:
- return "%s: {{{\n%s\n}}}\n" % (name, text)
- return "%s: {{{%s}}}" % (name, text)
-
-
-def _details_to_str(details, special=None):
- """Convert a details dict to a string.
-
- :param details: A dictionary mapping short names to ``Content`` objects.
- :param special: If specified, an attachment that should have special
- attention drawn to it. The primary attachment. Normally it's the
- traceback that caused the test to fail.
- :return: A formatted string that can be included in text test results.
- """
- empty_attachments = []
- binary_attachments = []
- text_attachments = []
- special_content = None
- # sorted is for testing, may want to remove that and use a dict
- # subclass with defined order for items instead.
- for key, content in sorted(details.items()):
- if content.content_type.type != 'text':
- binary_attachments.append((key, content.content_type))
- continue
- text = _u('').join(content.iter_text()).strip()
- if not text:
- empty_attachments.append(key)
- continue
- # We want the 'special' attachment to be at the bottom.
- if key == special:
- special_content = '%s\n' % (text,)
- continue
- text_attachments.append(_format_text_attachment(key, text))
- if text_attachments and not text_attachments[-1].endswith('\n'):
- text_attachments.append('')
- if special_content:
- text_attachments.append(special_content)
- lines = []
- if binary_attachments:
- lines.append('Binary content:\n')
- for name, content_type in binary_attachments:
- lines.append(' %s (%s)\n' % (name, content_type))
- if empty_attachments:
- lines.append('Empty attachments:\n')
- for name in empty_attachments:
- lines.append(' %s\n' % (name,))
- if (binary_attachments or empty_attachments) and text_attachments:
- lines.append('\n')
- lines.append('\n'.join(text_attachments))
- return _u('').join(lines)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_matchers.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_matchers.py
deleted file mode 100644
index ebdd4a95102..00000000000
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_matchers.py
+++ /dev/null
@@ -1,1071 +0,0 @@
-# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
-
-"""Tests for matchers."""
-
-import doctest
-import re
-import sys
-
-from testtools import (
- Matcher, # check that Matcher is exposed at the top level for docs.
- TestCase,
- )
-from testtools.compat import (
- StringIO,
- str_is_unicode,
- text_repr,
- _b,
- _u,
- )
-from testtools.matchers import (
- AfterPreprocessing,
- AllMatch,
- Annotate,
- AnnotatedMismatch,
- _BinaryMismatch,
- Contains,
- Equals,
- DocTestMatches,
- DoesNotEndWith,
- DoesNotStartWith,
- EndsWith,
- KeysEqual,
- Is,
- IsInstance,
- LessThan,
- GreaterThan,
- MatchesAny,
- MatchesAll,
- MatchesException,
- MatchesListwise,
- MatchesRegex,
- MatchesSetwise,
- MatchesStructure,
- Mismatch,
- MismatchDecorator,
- MismatchError,
- Not,
- NotEquals,
- Raises,
- raises,
- StartsWith,
- )
-from testtools.tests.helpers import FullStackRunTest
-
-# Silence pyflakes.
-Matcher
-
-
-class TestMismatch(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def test_constructor_arguments(self):
- mismatch = Mismatch("some description", {'detail': "things"})
- self.assertEqual("some description", mismatch.describe())
- self.assertEqual({'detail': "things"}, mismatch.get_details())
-
- def test_constructor_no_arguments(self):
- mismatch = Mismatch()
- self.assertThat(mismatch.describe,
- Raises(MatchesException(NotImplementedError)))
- self.assertEqual({}, mismatch.get_details())
-
-
-class TestMismatchError(TestCase):
-
- def test_is_assertion_error(self):
- # MismatchError is an AssertionError, so that most of the time, it
- # looks like a test failure, rather than an error.
- def raise_mismatch_error():
- raise MismatchError(2, Equals(3), Equals(3).match(2))
- self.assertRaises(AssertionError, raise_mismatch_error)
-
- def test_default_description_is_mismatch(self):
- mismatch = Equals(3).match(2)
- e = MismatchError(2, Equals(3), mismatch)
- self.assertEqual(mismatch.describe(), str(e))
-
- def test_default_description_unicode(self):
- matchee = _u('\xa7')
- matcher = Equals(_u('a'))
- mismatch = matcher.match(matchee)
- e = MismatchError(matchee, matcher, mismatch)
- self.assertEqual(mismatch.describe(), str(e))
-
- def test_verbose_description(self):
- matchee = 2
- matcher = Equals(3)
- mismatch = matcher.match(2)
- e = MismatchError(matchee, matcher, mismatch, True)
- expected = (
- 'Match failed. Matchee: %r\n'
- 'Matcher: %s\n'
- 'Difference: %s\n' % (
- matchee,
- matcher,
- matcher.match(matchee).describe(),
- ))
- self.assertEqual(expected, str(e))
-
- def test_verbose_unicode(self):
- # When assertThat is given matchees or matchers that contain non-ASCII
- # unicode strings, we can still provide a meaningful error.
- matchee = _u('\xa7')
- matcher = Equals(_u('a'))
- mismatch = matcher.match(matchee)
- expected = (
- 'Match failed. Matchee: %s\n'
- 'Matcher: %s\n'
- 'Difference: %s\n' % (
- text_repr(matchee),
- matcher,
- mismatch.describe(),
- ))
- e = MismatchError(matchee, matcher, mismatch, True)
- if str_is_unicode:
- actual = str(e)
- else:
- actual = unicode(e)
- # Using str() should still work, and return ascii only
- self.assertEqual(
- expected.replace(matchee, matchee.encode("unicode-escape")),
- str(e).decode("ascii"))
- self.assertEqual(expected, actual)
-
-
-class Test_BinaryMismatch(TestCase):
- """Mismatches from binary comparisons need useful describe output"""
-
- _long_string = "This is a longish multiline non-ascii string\n\xa7"
- _long_b = _b(_long_string)
- _long_u = _u(_long_string)
-
- def test_short_objects(self):
- o1, o2 = object(), object()
- mismatch = _BinaryMismatch(o1, "!~", o2)
- self.assertEqual(mismatch.describe(), "%r !~ %r" % (o1, o2))
-
- def test_short_mixed_strings(self):
- b, u = _b("\xa7"), _u("\xa7")
- mismatch = _BinaryMismatch(b, "!~", u)
- self.assertEqual(mismatch.describe(), "%r !~ %r" % (b, u))
-
- def test_long_bytes(self):
- one_line_b = self._long_b.replace(_b("\n"), _b(" "))
- mismatch = _BinaryMismatch(one_line_b, "!~", self._long_b)
- self.assertEqual(mismatch.describe(),
- "%s:\nreference = %s\nactual = %s\n" % ("!~",
- text_repr(one_line_b),
- text_repr(self._long_b, multiline=True)))
-
- def test_long_unicode(self):
- one_line_u = self._long_u.replace("\n", " ")
- mismatch = _BinaryMismatch(one_line_u, "!~", self._long_u)
- self.assertEqual(mismatch.describe(),
- "%s:\nreference = %s\nactual = %s\n" % ("!~",
- text_repr(one_line_u),
- text_repr(self._long_u, multiline=True)))
-
- def test_long_mixed_strings(self):
- mismatch = _BinaryMismatch(self._long_b, "!~", self._long_u)
- self.assertEqual(mismatch.describe(),
- "%s:\nreference = %s\nactual = %s\n" % ("!~",
- text_repr(self._long_b, multiline=True),
- text_repr(self._long_u, multiline=True)))
-
- def test_long_bytes_and_object(self):
- obj = object()
- mismatch = _BinaryMismatch(self._long_b, "!~", obj)
- self.assertEqual(mismatch.describe(),
- "%s:\nreference = %s\nactual = %s\n" % ("!~",
- text_repr(self._long_b, multiline=True),
- repr(obj)))
-
- def test_long_unicode_and_object(self):
- obj = object()
- mismatch = _BinaryMismatch(self._long_u, "!~", obj)
- self.assertEqual(mismatch.describe(),
- "%s:\nreference = %s\nactual = %s\n" % ("!~",
- text_repr(self._long_u, multiline=True),
- repr(obj)))
-
-
-class TestMatchersInterface(object):
-
- run_tests_with = FullStackRunTest
-
- def test_matches_match(self):
- matcher = self.matches_matcher
- matches = self.matches_matches
- mismatches = self.matches_mismatches
- for candidate in matches:
- self.assertEqual(None, matcher.match(candidate))
- for candidate in mismatches:
- mismatch = matcher.match(candidate)
- self.assertNotEqual(None, mismatch)
- self.assertNotEqual(None, getattr(mismatch, 'describe', None))
-
- def test__str__(self):
- # [(expected, object to __str__)].
- examples = self.str_examples
- for expected, matcher in examples:
- self.assertThat(matcher, DocTestMatches(expected))
-
- def test_describe_difference(self):
- # [(expected, matchee, matcher), ...]
- examples = self.describe_examples
- for difference, matchee, matcher in examples:
- mismatch = matcher.match(matchee)
- self.assertEqual(difference, mismatch.describe())
-
- def test_mismatch_details(self):
- # The mismatch object must provide get_details, which must return a
- # dictionary mapping names to Content objects.
- examples = self.describe_examples
- for difference, matchee, matcher in examples:
- mismatch = matcher.match(matchee)
- details = mismatch.get_details()
- self.assertEqual(dict(details), details)
-
-
-class TestDocTestMatchesInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS)
- matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"]
- matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"]
-
- str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')",
- DocTestMatches("Ran 1 test in ...s")),
- ("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)),
- ]
-
- describe_examples = [('Expected:\n Ran 1 tests in ...s\nGot:\n'
- ' Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s",
- DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))]
-
-
-class TestDocTestMatchesInterfaceUnicode(TestCase, TestMatchersInterface):
-
- matches_matcher = DocTestMatches(_u("\xa7..."), doctest.ELLIPSIS)
- matches_matches = [_u("\xa7"), _u("\xa7 more\n")]
- matches_mismatches = ["\\xa7", _u("more \xa7"), _u("\n\xa7")]
-
- str_examples = [("DocTestMatches(%r)" % (_u("\xa7\n"),),
- DocTestMatches(_u("\xa7"))),
- ]
-
- describe_examples = [(
- _u("Expected:\n \xa7\nGot:\n a\n"),
- "a",
- DocTestMatches(_u("\xa7"), doctest.ELLIPSIS))]
-
-
-class TestDocTestMatchesSpecific(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def test___init__simple(self):
- matcher = DocTestMatches("foo")
- self.assertEqual("foo\n", matcher.want)
-
- def test___init__flags(self):
- matcher = DocTestMatches("bar\n", doctest.ELLIPSIS)
- self.assertEqual("bar\n", matcher.want)
- self.assertEqual(doctest.ELLIPSIS, matcher.flags)
-
- def test_describe_non_ascii_bytes(self):
- """Even with bytestrings, the mismatch should be coercible to unicode
-
- DocTestMatches is intended for text, but the Python 2 str type also
- permits arbitrary binary inputs. This is a slightly bogus thing to do,
- and under Python 3 using bytes objects will reasonably raise an error.
- """
- header = _b("\x89PNG\r\n\x1a\n...")
- if str_is_unicode:
- self.assertRaises(TypeError,
- DocTestMatches, header, doctest.ELLIPSIS)
- return
- matcher = DocTestMatches(header, doctest.ELLIPSIS)
- mismatch = matcher.match(_b("GIF89a\1\0\1\0\0\0\0;"))
- # Must be treatable as unicode text, the exact output matters less
- self.assertTrue(unicode(mismatch.describe()))
-
-
-class TestEqualsInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = Equals(1)
- matches_matches = [1]
- matches_mismatches = [2]
-
- str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))]
-
- describe_examples = [("1 != 2", 2, Equals(1))]
-
-
-class TestNotEqualsInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = NotEquals(1)
- matches_matches = [2]
- matches_mismatches = [1]
-
- str_examples = [
- ("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))]
-
- describe_examples = [("1 == 1", 1, NotEquals(1))]
-
-
-class TestIsInterface(TestCase, TestMatchersInterface):
-
- foo = object()
- bar = object()
-
- matches_matcher = Is(foo)
- matches_matches = [foo]
- matches_mismatches = [bar, 1]
-
- str_examples = [("Is(2)", Is(2))]
-
- describe_examples = [("1 is not 2", 2, Is(1))]
-
-
-class TestIsInstanceInterface(TestCase, TestMatchersInterface):
-
- class Foo:pass
-
- matches_matcher = IsInstance(Foo)
- matches_matches = [Foo()]
- matches_mismatches = [object(), 1, Foo]
-
- str_examples = [
- ("IsInstance(str)", IsInstance(str)),
- ("IsInstance(str, int)", IsInstance(str, int)),
- ]
-
- describe_examples = [
- ("'foo' is not an instance of int", 'foo', IsInstance(int)),
- ("'foo' is not an instance of any of (int, type)", 'foo',
- IsInstance(int, type)),
- ]
-
-
-class TestLessThanInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = LessThan(4)
- matches_matches = [-5, 3]
- matches_mismatches = [4, 5, 5000]
-
- str_examples = [
- ("LessThan(12)", LessThan(12)),
- ]
-
- describe_examples = [
- ('4 is not > 5', 5, LessThan(4)),
- ('4 is not > 4', 4, LessThan(4)),
- ]
-
-
-class TestGreaterThanInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = GreaterThan(4)
- matches_matches = [5, 8]
- matches_mismatches = [-2, 0, 4]
-
- str_examples = [
- ("GreaterThan(12)", GreaterThan(12)),
- ]
-
- describe_examples = [
- ('5 is not < 4', 4, GreaterThan(5)),
- ('4 is not < 4', 4, GreaterThan(4)),
- ]
-
-
-class TestContainsInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = Contains('foo')
- matches_matches = ['foo', 'afoo', 'fooa']
- matches_mismatches = ['f', 'fo', 'oo', 'faoo', 'foao']
-
- str_examples = [
- ("Contains(1)", Contains(1)),
- ("Contains('foo')", Contains('foo')),
- ]
-
- describe_examples = [("1 not in 2", 2, Contains(1))]
-
-
-def make_error(type, *args, **kwargs):
- try:
- raise type(*args, **kwargs)
- except type:
- return sys.exc_info()
-
-
-class TestMatchesExceptionInstanceInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = MatchesException(ValueError("foo"))
- error_foo = make_error(ValueError, 'foo')
- error_bar = make_error(ValueError, 'bar')
- error_base_foo = make_error(Exception, 'foo')
- matches_matches = [error_foo]
- matches_mismatches = [error_bar, error_base_foo]
-
- str_examples = [
- ("MatchesException(Exception('foo',))",
- MatchesException(Exception('foo')))
- ]
- describe_examples = [
- ("%r is not a %r" % (Exception, ValueError),
- error_base_foo,
- MatchesException(ValueError("foo"))),
- ("ValueError('bar',) has different arguments to ValueError('foo',).",
- error_bar,
- MatchesException(ValueError("foo"))),
- ]
-
-
-class TestMatchesExceptionTypeInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = MatchesException(ValueError)
- error_foo = make_error(ValueError, 'foo')
- error_sub = make_error(UnicodeError, 'bar')
- error_base_foo = make_error(Exception, 'foo')
- matches_matches = [error_foo, error_sub]
- matches_mismatches = [error_base_foo]
-
- str_examples = [
- ("MatchesException(%r)" % Exception,
- MatchesException(Exception))
- ]
- describe_examples = [
- ("%r is not a %r" % (Exception, ValueError),
- error_base_foo,
- MatchesException(ValueError)),
- ]
-
-
-class TestMatchesExceptionTypeReInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = MatchesException(ValueError, 'fo.')
- error_foo = make_error(ValueError, 'foo')
- error_sub = make_error(UnicodeError, 'foo')
- error_bar = make_error(ValueError, 'bar')
- matches_matches = [error_foo, error_sub]
- matches_mismatches = [error_bar]
-
- str_examples = [
- ("MatchesException(%r)" % Exception,
- MatchesException(Exception, 'fo.'))
- ]
- describe_examples = [
- ("'bar' does not match /fo./",
- error_bar, MatchesException(ValueError, "fo.")),
- ]
-
-
-class TestMatchesExceptionTypeMatcherInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = MatchesException(
- ValueError, AfterPreprocessing(str, Equals('foo')))
- error_foo = make_error(ValueError, 'foo')
- error_sub = make_error(UnicodeError, 'foo')
- error_bar = make_error(ValueError, 'bar')
- matches_matches = [error_foo, error_sub]
- matches_mismatches = [error_bar]
-
- str_examples = [
- ("MatchesException(%r)" % Exception,
- MatchesException(Exception, Equals('foo')))
- ]
- describe_examples = [
- ("5 != %r" % (error_bar[1],),
- error_bar, MatchesException(ValueError, Equals(5))),
- ]
-
-
-class TestNotInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = Not(Equals(1))
- matches_matches = [2]
- matches_mismatches = [1]
-
- str_examples = [
- ("Not(Equals(1))", Not(Equals(1))),
- ("Not(Equals('1'))", Not(Equals('1')))]
-
- describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))]
-
-
-class TestMatchersAnyInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2"))
- matches_matches = ["1", "2"]
- matches_mismatches = ["3"]
-
- str_examples = [(
- "MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))",
- MatchesAny(DocTestMatches("1"), DocTestMatches("2"))),
- ]
-
- describe_examples = [("""Differences: [
-Expected:
- 1
-Got:
- 3
-
-Expected:
- 2
-Got:
- 3
-
-]""",
- "3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))]
-
-
-class TestMatchesAllInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = MatchesAll(NotEquals(1), NotEquals(2))
- matches_matches = [3, 4]
- matches_mismatches = [1, 2]
-
- str_examples = [
- ("MatchesAll(NotEquals(1), NotEquals(2))",
- MatchesAll(NotEquals(1), NotEquals(2)))]
-
- describe_examples = [("""Differences: [
-1 == 1
-]""",
- 1, MatchesAll(NotEquals(1), NotEquals(2)))]
-
-
-class TestKeysEqual(TestCase, TestMatchersInterface):
-
- matches_matcher = KeysEqual('foo', 'bar')
- matches_matches = [
- {'foo': 0, 'bar': 1},
- ]
- matches_mismatches = [
- {},
- {'foo': 0},
- {'bar': 1},
- {'foo': 0, 'bar': 1, 'baz': 2},
- {'a': None, 'b': None, 'c': None},
- ]
-
- str_examples = [
- ("KeysEqual('foo', 'bar')", KeysEqual('foo', 'bar')),
- ]
-
- describe_examples = [
- ("['bar', 'foo'] does not match {'baz': 2, 'foo': 0, 'bar': 1}: "
- "Keys not equal",
- {'foo': 0, 'bar': 1, 'baz': 2}, KeysEqual('foo', 'bar')),
- ]
-
-
-class TestAnnotate(TestCase, TestMatchersInterface):
-
- matches_matcher = Annotate("foo", Equals(1))
- matches_matches = [1]
- matches_mismatches = [2]
-
- str_examples = [
- ("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))]
-
- describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))]
-
- def test_if_message_no_message(self):
- # Annotate.if_message returns the given matcher if there is no
- # message.
- matcher = Equals(1)
- not_annotated = Annotate.if_message('', matcher)
- self.assertIs(matcher, not_annotated)
-
- def test_if_message_given_message(self):
- # Annotate.if_message returns an annotated version of the matcher if a
- # message is provided.
- matcher = Equals(1)
- expected = Annotate('foo', matcher)
- annotated = Annotate.if_message('foo', matcher)
- self.assertThat(
- annotated,
- MatchesStructure.fromExample(expected, 'annotation', 'matcher'))
-
-
-class TestAnnotatedMismatch(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def test_forwards_details(self):
- x = Mismatch('description', {'foo': 'bar'})
- annotated = AnnotatedMismatch("annotation", x)
- self.assertEqual(x.get_details(), annotated.get_details())
-
-
-class TestRaisesInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = Raises()
- def boom():
- raise Exception('foo')
- matches_matches = [boom]
- matches_mismatches = [lambda:None]
-
- # Tricky to get function objects to render constantly, and the interfaces
- # helper uses assertEqual rather than (for instance) DocTestMatches.
- str_examples = []
-
- describe_examples = []
-
-
-class TestRaisesExceptionMatcherInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = Raises(
- exception_matcher=MatchesException(Exception('foo')))
- def boom_bar():
- raise Exception('bar')
- def boom_foo():
- raise Exception('foo')
- matches_matches = [boom_foo]
- matches_mismatches = [lambda:None, boom_bar]
-
- # Tricky to get function objects to render constantly, and the interfaces
- # helper uses assertEqual rather than (for instance) DocTestMatches.
- str_examples = []
-
- describe_examples = []
-
-
-class TestRaisesBaseTypes(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def raiser(self):
- raise KeyboardInterrupt('foo')
-
- def test_KeyboardInterrupt_matched(self):
- # When KeyboardInterrupt is matched, it is swallowed.
- matcher = Raises(MatchesException(KeyboardInterrupt))
- self.assertThat(self.raiser, matcher)
-
- def test_KeyboardInterrupt_propogates(self):
- # The default 'it raised' propogates KeyboardInterrupt.
- match_keyb = Raises(MatchesException(KeyboardInterrupt))
- def raise_keyb_from_match():
- matcher = Raises()
- matcher.match(self.raiser)
- self.assertThat(raise_keyb_from_match, match_keyb)
-
- def test_KeyboardInterrupt_match_Exception_propogates(self):
- # If the raised exception isn't matched, and it is not a subclass of
- # Exception, it is propogated.
- match_keyb = Raises(MatchesException(KeyboardInterrupt))
- def raise_keyb_from_match():
- if sys.version_info > (2, 5):
- matcher = Raises(MatchesException(Exception))
- else:
- # On Python 2.4 KeyboardInterrupt is a StandardError subclass
- # but should propogate from less generic exception matchers
- matcher = Raises(MatchesException(EnvironmentError))
- matcher.match(self.raiser)
- self.assertThat(raise_keyb_from_match, match_keyb)
-
-
-class TestRaisesConvenience(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def test_exc_type(self):
- self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
-
- def test_exc_value(self):
- e = RuntimeError("You lose!")
- def raiser():
- raise e
- self.assertThat(raiser, raises(e))
-
-
-class DoesNotStartWithTests(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def test_describe(self):
- mismatch = DoesNotStartWith("fo", "bo")
- self.assertEqual("'fo' does not start with 'bo'.", mismatch.describe())
-
- def test_describe_non_ascii_unicode(self):
- string = _u("A\xA7")
- suffix = _u("B\xA7")
- mismatch = DoesNotStartWith(string, suffix)
- self.assertEqual("%s does not start with %s." % (
- text_repr(string), text_repr(suffix)),
- mismatch.describe())
-
- def test_describe_non_ascii_bytes(self):
- string = _b("A\xA7")
- suffix = _b("B\xA7")
- mismatch = DoesNotStartWith(string, suffix)
- self.assertEqual("%r does not start with %r." % (string, suffix),
- mismatch.describe())
-
-
-class StartsWithTests(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def test_str(self):
- matcher = StartsWith("bar")
- self.assertEqual("StartsWith('bar')", str(matcher))
-
- def test_str_with_bytes(self):
- b = _b("\xA7")
- matcher = StartsWith(b)
- self.assertEqual("StartsWith(%r)" % (b,), str(matcher))
-
- def test_str_with_unicode(self):
- u = _u("\xA7")
- matcher = StartsWith(u)
- self.assertEqual("StartsWith(%r)" % (u,), str(matcher))
-
- def test_match(self):
- matcher = StartsWith("bar")
- self.assertIs(None, matcher.match("barf"))
-
- def test_mismatch_returns_does_not_start_with(self):
- matcher = StartsWith("bar")
- self.assertIsInstance(matcher.match("foo"), DoesNotStartWith)
-
- def test_mismatch_sets_matchee(self):
- matcher = StartsWith("bar")
- mismatch = matcher.match("foo")
- self.assertEqual("foo", mismatch.matchee)
-
- def test_mismatch_sets_expected(self):
- matcher = StartsWith("bar")
- mismatch = matcher.match("foo")
- self.assertEqual("bar", mismatch.expected)
-
-
-class DoesNotEndWithTests(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def test_describe(self):
- mismatch = DoesNotEndWith("fo", "bo")
- self.assertEqual("'fo' does not end with 'bo'.", mismatch.describe())
-
- def test_describe_non_ascii_unicode(self):
- string = _u("A\xA7")
- suffix = _u("B\xA7")
- mismatch = DoesNotEndWith(string, suffix)
- self.assertEqual("%s does not end with %s." % (
- text_repr(string), text_repr(suffix)),
- mismatch.describe())
-
- def test_describe_non_ascii_bytes(self):
- string = _b("A\xA7")
- suffix = _b("B\xA7")
- mismatch = DoesNotEndWith(string, suffix)
- self.assertEqual("%r does not end with %r." % (string, suffix),
- mismatch.describe())
-
-
-class EndsWithTests(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def test_str(self):
- matcher = EndsWith("bar")
- self.assertEqual("EndsWith('bar')", str(matcher))
-
- def test_str_with_bytes(self):
- b = _b("\xA7")
- matcher = EndsWith(b)
- self.assertEqual("EndsWith(%r)" % (b,), str(matcher))
-
- def test_str_with_unicode(self):
- u = _u("\xA7")
- matcher = EndsWith(u)
- self.assertEqual("EndsWith(%r)" % (u,), str(matcher))
-
- def test_match(self):
- matcher = EndsWith("arf")
- self.assertIs(None, matcher.match("barf"))
-
- def test_mismatch_returns_does_not_end_with(self):
- matcher = EndsWith("bar")
- self.assertIsInstance(matcher.match("foo"), DoesNotEndWith)
-
- def test_mismatch_sets_matchee(self):
- matcher = EndsWith("bar")
- mismatch = matcher.match("foo")
- self.assertEqual("foo", mismatch.matchee)
-
- def test_mismatch_sets_expected(self):
- matcher = EndsWith("bar")
- mismatch = matcher.match("foo")
- self.assertEqual("bar", mismatch.expected)
-
-
-def run_doctest(obj, name):
- p = doctest.DocTestParser()
- t = p.get_doctest(
- obj.__doc__, sys.modules[obj.__module__].__dict__, name, '', 0)
- r = doctest.DocTestRunner()
- output = StringIO()
- r.run(t, out=output.write)
- return r.failures, output.getvalue()
-
-
-class TestMatchesListwise(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def test_docstring(self):
- failure_count, output = run_doctest(
- MatchesListwise, "MatchesListwise")
- if failure_count:
- self.fail("Doctest failed with %s" % output)
-
-
-class TestMatchesStructure(TestCase, TestMatchersInterface):
-
- class SimpleClass:
- def __init__(self, x, y):
- self.x = x
- self.y = y
-
- matches_matcher = MatchesStructure(x=Equals(1), y=Equals(2))
- matches_matches = [SimpleClass(1, 2)]
- matches_mismatches = [
- SimpleClass(2, 2),
- SimpleClass(1, 1),
- SimpleClass(3, 3),
- ]
-
- str_examples = [
- ("MatchesStructure(x=Equals(1))", MatchesStructure(x=Equals(1))),
- ("MatchesStructure(y=Equals(2))", MatchesStructure(y=Equals(2))),
- ("MatchesStructure(x=Equals(1), y=Equals(2))",
- MatchesStructure(x=Equals(1), y=Equals(2))),
- ]
-
- describe_examples = [
- ("""\
-Differences: [
-3 != 1: x
-]""", SimpleClass(1, 2), MatchesStructure(x=Equals(3), y=Equals(2))),
- ("""\
-Differences: [
-3 != 2: y
-]""", SimpleClass(1, 2), MatchesStructure(x=Equals(1), y=Equals(3))),
- ("""\
-Differences: [
-0 != 1: x
-0 != 2: y
-]""", SimpleClass(1, 2), MatchesStructure(x=Equals(0), y=Equals(0))),
- ]
-
- def test_fromExample(self):
- self.assertThat(
- self.SimpleClass(1, 2),
- MatchesStructure.fromExample(self.SimpleClass(1, 3), 'x'))
-
- def test_byEquality(self):
- self.assertThat(
- self.SimpleClass(1, 2),
- MatchesStructure.byEquality(x=1))
-
- def test_withStructure(self):
- self.assertThat(
- self.SimpleClass(1, 2),
- MatchesStructure.byMatcher(LessThan, x=2))
-
- def test_update(self):
- self.assertThat(
- self.SimpleClass(1, 2),
- MatchesStructure(x=NotEquals(1)).update(x=Equals(1)))
-
- def test_update_none(self):
- self.assertThat(
- self.SimpleClass(1, 2),
- MatchesStructure(x=Equals(1), z=NotEquals(42)).update(
- z=None))
-
-
-class TestMatchesRegex(TestCase, TestMatchersInterface):
-
- matches_matcher = MatchesRegex('a|b')
- matches_matches = ['a', 'b']
- matches_mismatches = ['c']
-
- str_examples = [
- ("MatchesRegex('a|b')", MatchesRegex('a|b')),
- ("MatchesRegex('a|b', re.M)", MatchesRegex('a|b', re.M)),
- ("MatchesRegex('a|b', re.I|re.M)", MatchesRegex('a|b', re.I|re.M)),
- ("MatchesRegex(%r)" % (_b("\xA7"),), MatchesRegex(_b("\xA7"))),
- ("MatchesRegex(%r)" % (_u("\xA7"),), MatchesRegex(_u("\xA7"))),
- ]
-
- describe_examples = [
- ("'c' does not match /a|b/", 'c', MatchesRegex('a|b')),
- ("'c' does not match /a\d/", 'c', MatchesRegex(r'a\d')),
- ("%r does not match /\\s+\\xa7/" % (_b('c'),),
- _b('c'), MatchesRegex(_b("\\s+\xA7"))),
- ("%r does not match /\\s+\\xa7/" % (_u('c'),),
- _u('c'), MatchesRegex(_u("\\s+\xA7"))),
- ]
-
-
-class TestMatchesSetwise(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def assertMismatchWithDescriptionMatching(self, value, matcher,
- description_matcher):
- mismatch = matcher.match(value)
- if mismatch is None:
- self.fail("%s matched %s" % (matcher, value))
- actual_description = mismatch.describe()
- self.assertThat(
- actual_description,
- Annotate(
- "%s matching %s" % (matcher, value),
- description_matcher))
-
- def test_matches(self):
- self.assertIs(
- None, MatchesSetwise(Equals(1), Equals(2)).match([2, 1]))
-
- def test_mismatches(self):
- self.assertMismatchWithDescriptionMatching(
- [2, 3], MatchesSetwise(Equals(1), Equals(2)),
- MatchesRegex('.*There was 1 mismatch$', re.S))
-
- def test_too_many_matchers(self):
- self.assertMismatchWithDescriptionMatching(
- [2, 3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
- Equals('There was 1 matcher left over: Equals(1)'))
-
- def test_too_many_values(self):
- self.assertMismatchWithDescriptionMatching(
- [1, 2, 3], MatchesSetwise(Equals(1), Equals(2)),
- Equals('There was 1 value left over: [3]'))
-
- def test_two_too_many_matchers(self):
- self.assertMismatchWithDescriptionMatching(
- [3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
- MatchesRegex(
- 'There were 2 matchers left over: Equals\([12]\), '
- 'Equals\([12]\)'))
-
- def test_two_too_many_values(self):
- self.assertMismatchWithDescriptionMatching(
- [1, 2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
- MatchesRegex(
- 'There were 2 values left over: \[[34], [34]\]'))
-
- def test_mismatch_and_too_many_matchers(self):
- self.assertMismatchWithDescriptionMatching(
- [2, 3], MatchesSetwise(Equals(0), Equals(1), Equals(2)),
- MatchesRegex(
- '.*There was 1 mismatch and 1 extra matcher: Equals\([01]\)',
- re.S))
-
- def test_mismatch_and_too_many_values(self):
- self.assertMismatchWithDescriptionMatching(
- [2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
- MatchesRegex(
- '.*There was 1 mismatch and 1 extra value: \[[34]\]',
- re.S))
-
- def test_mismatch_and_two_too_many_matchers(self):
- self.assertMismatchWithDescriptionMatching(
- [3, 4], MatchesSetwise(
- Equals(0), Equals(1), Equals(2), Equals(3)),
- MatchesRegex(
- '.*There was 1 mismatch and 2 extra matchers: '
- 'Equals\([012]\), Equals\([012]\)', re.S))
-
- def test_mismatch_and_two_too_many_values(self):
- self.assertMismatchWithDescriptionMatching(
- [2, 3, 4, 5], MatchesSetwise(Equals(1), Equals(2)),
- MatchesRegex(
- '.*There was 1 mismatch and 2 extra values: \[[145], [145]\]',
- re.S))
-
-
-class TestAfterPreprocessing(TestCase, TestMatchersInterface):
-
- def parity(x):
- return x % 2
-
- matches_matcher = AfterPreprocessing(parity, Equals(1))
- matches_matches = [3, 5]
- matches_mismatches = [2]
-
- str_examples = [
- ("AfterPreprocessing(<function parity>, Equals(1))",
- AfterPreprocessing(parity, Equals(1))),
- ]
-
- describe_examples = [
- ("1 != 0: after <function parity> on 2", 2,
- AfterPreprocessing(parity, Equals(1))),
- ("1 != 0", 2,
- AfterPreprocessing(parity, Equals(1), annotate=False)),
- ]
-
-
-class TestMismatchDecorator(TestCase):
-
- run_tests_with = FullStackRunTest
-
- def test_forwards_description(self):
- x = Mismatch("description", {'foo': 'bar'})
- decorated = MismatchDecorator(x)
- self.assertEqual(x.describe(), decorated.describe())
-
- def test_forwards_details(self):
- x = Mismatch("description", {'foo': 'bar'})
- decorated = MismatchDecorator(x)
- self.assertEqual(x.get_details(), decorated.get_details())
-
- def test_repr(self):
- x = Mismatch("description", {'foo': 'bar'})
- decorated = MismatchDecorator(x)
- self.assertEqual(
- '<testtools.matchers.MismatchDecorator(%r)>' % (x,),
- repr(decorated))
-
-
-class TestAllMatch(TestCase, TestMatchersInterface):
-
- matches_matcher = AllMatch(LessThan(10))
- matches_matches = [
- [9, 9, 9],
- (9, 9),
- iter([9, 9, 9, 9, 9]),
- ]
- matches_mismatches = [
- [11, 9, 9],
- iter([9, 12, 9, 11]),
- ]
-
- str_examples = [
- ("AllMatch(LessThan(12))", AllMatch(LessThan(12))),
- ]
-
- describe_examples = [
- ('Differences: [\n'
- '10 is not > 11\n'
- '10 is not > 10\n'
- ']',
- [11, 9, 10],
- AllMatch(LessThan(10))),
- ]
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_run.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_run.py
deleted file mode 100644
index d2974f63731..00000000000
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_run.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (c) 2010 testtools developers. See LICENSE for details.
-
-"""Tests for the test runner logic."""
-
-from testtools.compat import (
- _b,
- StringIO,
- )
-from testtools.helpers import try_import
-fixtures = try_import('fixtures')
-
-import testtools
-from testtools import TestCase, run
-
-
-if fixtures:
- class SampleTestFixture(fixtures.Fixture):
- """Creates testtools.runexample temporarily."""
-
- def __init__(self):
- self.package = fixtures.PythonPackage(
- 'runexample', [('__init__.py', _b("""
-from testtools import TestCase
-
-class TestFoo(TestCase):
- def test_bar(self):
- pass
- def test_quux(self):
- pass
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
-"""))])
-
- def setUp(self):
- super(SampleTestFixture, self).setUp()
- self.useFixture(self.package)
- testtools.__path__.append(self.package.base)
- self.addCleanup(testtools.__path__.remove, self.package.base)
-
-
-class TestRun(TestCase):
-
- def test_run_list(self):
- if fixtures is None:
- self.skipTest("Need fixtures")
- self.useFixture(SampleTestFixture())
- out = StringIO()
- run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
- self.assertEqual("""testtools.runexample.TestFoo.test_bar
-testtools.runexample.TestFoo.test_quux
-""", out.getvalue())
-
- def test_run_load_list(self):
- if fixtures is None:
- self.skipTest("Need fixtures")
- self.useFixture(SampleTestFixture())
- out = StringIO()
- # We load two tests - one that exists and one that doesn't, and we
- # should get the one that exists and neither the one that doesn't nor
- # the unmentioned one that does.
- tempdir = self.useFixture(fixtures.TempDir())
- tempname = tempdir.path + '/tests.list'
- f = open(tempname, 'wb')
- try:
- f.write(_b("""
-testtools.runexample.TestFoo.test_bar
-testtools.runexample.missingtest
-"""))
- finally:
- f.close()
- run.main(['prog', '-l', '--load-list', tempname,
- 'testtools.runexample.test_suite'], out)
- self.assertEqual("""testtools.runexample.TestFoo.test_bar
-""", out.getvalue())
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_testresult.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_testresult.py
deleted file mode 100644
index a241788bea8..00000000000
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_testresult.py
+++ /dev/null
@@ -1,1507 +0,0 @@
-# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
-
-"""Test TestResults and related things."""
-
-__metaclass__ = type
-
-import codecs
-import datetime
-import doctest
-import os
-import shutil
-import sys
-import tempfile
-import threading
-import warnings
-
-from testtools import (
- ExtendedToOriginalDecorator,
- MultiTestResult,
- TestCase,
- TestResult,
- TextTestResult,
- ThreadsafeForwardingResult,
- testresult,
- )
-from testtools.compat import (
- _b,
- _get_exception_encoding,
- _r,
- _u,
- str_is_unicode,
- StringIO,
- )
-from testtools.content import (
- Content,
- content_from_stream,
- text_content,
- )
-from testtools.content_type import ContentType, UTF8_TEXT
-from testtools.matchers import (
- DocTestMatches,
- Equals,
- MatchesException,
- Raises,
- )
-from testtools.tests.helpers import (
- an_exc_info,
- FullStackRunTest,
- LoggingResult,
- run_with_stack_hidden,
- )
-from testtools.testresult.doubles import (
- Python26TestResult,
- Python27TestResult,
- ExtendedTestResult,
- )
-from testtools.testresult.real import (
- _details_to_str,
- utc,
- )
-
-
-def make_erroring_test():
- class Test(TestCase):
- def error(self):
- 1/0
- return Test("error")
-
-
-def make_failing_test():
- class Test(TestCase):
- def failed(self):
- self.fail("yo!")
- return Test("failed")
-
-
-def make_unexpectedly_successful_test():
- class Test(TestCase):
- def succeeded(self):
- self.expectFailure("yo!", lambda: None)
- return Test("succeeded")
-
-
-def make_test():
- class Test(TestCase):
- def test(self):
- pass
- return Test("test")
-
-
-def make_exception_info(exceptionFactory, *args, **kwargs):
- try:
- raise exceptionFactory(*args, **kwargs)
- except:
- return sys.exc_info()
-
-
-class Python26Contract(object):
-
- def test_fresh_result_is_successful(self):
- # A result is considered successful before any tests are run.
- result = self.makeResult()
- self.assertTrue(result.wasSuccessful())
-
- def test_addError_is_failure(self):
- # addError fails the test run.
- result = self.makeResult()
- result.startTest(self)
- result.addError(self, an_exc_info)
- result.stopTest(self)
- self.assertFalse(result.wasSuccessful())
-
- def test_addFailure_is_failure(self):
- # addFailure fails the test run.
- result = self.makeResult()
- result.startTest(self)
- result.addFailure(self, an_exc_info)
- result.stopTest(self)
- self.assertFalse(result.wasSuccessful())
-
- def test_addSuccess_is_success(self):
- # addSuccess does not fail the test run.
- result = self.makeResult()
- result.startTest(self)
- result.addSuccess(self)
- result.stopTest(self)
- self.assertTrue(result.wasSuccessful())
-
-
-class Python27Contract(Python26Contract):
-
- def test_addExpectedFailure(self):
- # Calling addExpectedFailure(test, exc_info) completes ok.
- result = self.makeResult()
- result.startTest(self)
- result.addExpectedFailure(self, an_exc_info)
-
- def test_addExpectedFailure_is_success(self):
- # addExpectedFailure does not fail the test run.
- result = self.makeResult()
- result.startTest(self)
- result.addExpectedFailure(self, an_exc_info)
- result.stopTest(self)
- self.assertTrue(result.wasSuccessful())
-
- def test_addSkipped(self):
- # Calling addSkip(test, reason) completes ok.
- result = self.makeResult()
- result.startTest(self)
- result.addSkip(self, _u("Skipped for some reason"))
-
- def test_addSkip_is_success(self):
- # addSkip does not fail the test run.
- result = self.makeResult()
- result.startTest(self)
- result.addSkip(self, _u("Skipped for some reason"))
- result.stopTest(self)
- self.assertTrue(result.wasSuccessful())
-
- def test_addUnexpectedSuccess(self):
- # Calling addUnexpectedSuccess(test) completes ok.
- result = self.makeResult()
- result.startTest(self)
- result.addUnexpectedSuccess(self)
-
- def test_addUnexpectedSuccess_was_successful(self):
- # addUnexpectedSuccess does not fail the test run in Python 2.7.
- result = self.makeResult()
- result.startTest(self)
- result.addUnexpectedSuccess(self)
- result.stopTest(self)
- self.assertTrue(result.wasSuccessful())
-
- def test_startStopTestRun(self):
- # Calling startTestRun completes ok.
- result = self.makeResult()
- result.startTestRun()
- result.stopTestRun()
-
-
-class DetailsContract(Python27Contract):
- """Tests for the contract of TestResults."""
-
- def test_addExpectedFailure_details(self):
- # Calling addExpectedFailure(test, details=xxx) completes ok.
- result = self.makeResult()
- result.startTest(self)
- result.addExpectedFailure(self, details={})
-
- def test_addError_details(self):
- # Calling addError(test, details=xxx) completes ok.
- result = self.makeResult()
- result.startTest(self)
- result.addError(self, details={})
-
- def test_addFailure_details(self):
- # Calling addFailure(test, details=xxx) completes ok.
- result = self.makeResult()
- result.startTest(self)
- result.addFailure(self, details={})
-
- def test_addSkipped_details(self):
- # Calling addSkip(test, reason) completes ok.
- result = self.makeResult()
- result.startTest(self)
- result.addSkip(self, details={})
-
- def test_addUnexpectedSuccess_details(self):
- # Calling addUnexpectedSuccess(test) completes ok.
- result = self.makeResult()
- result.startTest(self)
- result.addUnexpectedSuccess(self, details={})
-
- def test_addSuccess_details(self):
- # Calling addSuccess(test) completes ok.
- result = self.makeResult()
- result.startTest(self)
- result.addSuccess(self, details={})
-
-
-class FallbackContract(DetailsContract):
- """When we fallback we take our policy choice to map calls.
-
- For instance, we map unexpectedSuccess to an error code, not to success.
- """
-
- def test_addUnexpectedSuccess_was_successful(self):
- # addUnexpectedSuccess fails test run in testtools.
- result = self.makeResult()
- result.startTest(self)
- result.addUnexpectedSuccess(self)
- result.stopTest(self)
- self.assertFalse(result.wasSuccessful())
-
-
-class StartTestRunContract(FallbackContract):
- """Defines the contract for testtools policy choices.
-
- That is things which are not simply extensions to unittest but choices we
- have made differently.
- """
-
- def test_startTestRun_resets_unexpected_success(self):
- result = self.makeResult()
- result.startTest(self)
- result.addUnexpectedSuccess(self)
- result.stopTest(self)
- result.startTestRun()
- self.assertTrue(result.wasSuccessful())
-
- def test_startTestRun_resets_failure(self):
- result = self.makeResult()
- result.startTest(self)
- result.addFailure(self, an_exc_info)
- result.stopTest(self)
- result.startTestRun()
- self.assertTrue(result.wasSuccessful())
-
- def test_startTestRun_resets_errors(self):
- result = self.makeResult()
- result.startTest(self)
- result.addError(self, an_exc_info)
- result.stopTest(self)
- result.startTestRun()
- self.assertTrue(result.wasSuccessful())
-
-
-class TestTestResultContract(TestCase, StartTestRunContract):
-
- run_test_with = FullStackRunTest
-
- def makeResult(self):
- return TestResult()
-
-
-class TestMultiTestResultContract(TestCase, StartTestRunContract):
-
- run_test_with = FullStackRunTest
-
- def makeResult(self):
- return MultiTestResult(TestResult(), TestResult())
-
-
-class TestTextTestResultContract(TestCase, StartTestRunContract):
-
- run_test_with = FullStackRunTest
-
- def makeResult(self):
- return TextTestResult(StringIO())
-
-
-class TestThreadSafeForwardingResultContract(TestCase, StartTestRunContract):
-
- run_test_with = FullStackRunTest
-
- def makeResult(self):
- result_semaphore = threading.Semaphore(1)
- target = TestResult()
- return ThreadsafeForwardingResult(target, result_semaphore)
-
-
-class TestExtendedTestResultContract(TestCase, StartTestRunContract):
-
- def makeResult(self):
- return ExtendedTestResult()
-
-
-class TestPython26TestResultContract(TestCase, Python26Contract):
-
- def makeResult(self):
- return Python26TestResult()
-
-
-class TestAdaptedPython26TestResultContract(TestCase, FallbackContract):
-
- def makeResult(self):
- return ExtendedToOriginalDecorator(Python26TestResult())
-
-
-class TestPython27TestResultContract(TestCase, Python27Contract):
-
- def makeResult(self):
- return Python27TestResult()
-
-
-class TestAdaptedPython27TestResultContract(TestCase, DetailsContract):
-
- def makeResult(self):
- return ExtendedToOriginalDecorator(Python27TestResult())
-
-
-class TestTestResult(TestCase):
- """Tests for 'TestResult'."""
-
- run_tests_with = FullStackRunTest
-
- def makeResult(self):
- """Make an arbitrary result for testing."""
- return TestResult()
-
- def test_addSkipped(self):
- # Calling addSkip on a TestResult records the test that was skipped in
- # its skip_reasons dict.
- result = self.makeResult()
- result.addSkip(self, _u("Skipped for some reason"))
- self.assertEqual({_u("Skipped for some reason"):[self]},
- result.skip_reasons)
- result.addSkip(self, _u("Skipped for some reason"))
- self.assertEqual({_u("Skipped for some reason"):[self, self]},
- result.skip_reasons)
- result.addSkip(self, _u("Skipped for another reason"))
- self.assertEqual({_u("Skipped for some reason"):[self, self],
- _u("Skipped for another reason"):[self]},
- result.skip_reasons)
-
- def test_now_datetime_now(self):
- result = self.makeResult()
- olddatetime = testresult.real.datetime
- def restore():
- testresult.real.datetime = olddatetime
- self.addCleanup(restore)
- class Module:
- pass
- now = datetime.datetime.now(utc)
- stubdatetime = Module()
- stubdatetime.datetime = Module()
- stubdatetime.datetime.now = lambda tz: now
- testresult.real.datetime = stubdatetime
- # Calling _now() looks up the time.
- self.assertEqual(now, result._now())
- then = now + datetime.timedelta(0, 1)
- # Set an explicit datetime, which gets returned from then on.
- result.time(then)
- self.assertNotEqual(now, result._now())
- self.assertEqual(then, result._now())
- # go back to looking it up.
- result.time(None)
- self.assertEqual(now, result._now())
-
- def test_now_datetime_time(self):
- result = self.makeResult()
- now = datetime.datetime.now(utc)
- result.time(now)
- self.assertEqual(now, result._now())
-
- def test_traceback_formatting_without_stack_hidden(self):
- # During the testtools test run, we show our levels of the stack,
- # because we want to be able to use our test suite to debug our own
- # code.
- result = self.makeResult()
- test = make_erroring_test()
- test.run(result)
- self.assertThat(
- result.errors[0][1],
- DocTestMatches(
- 'Traceback (most recent call last):\n'
- ' File "...testtools...runtest.py", line ..., in _run_user\n'
- ' return fn(*args, **kwargs)\n'
- ' File "...testtools...testcase.py", line ..., in _run_test_method\n'
- ' return self._get_test_method()()\n'
- ' File "...testtools...tests...test_testresult.py", line ..., in error\n'
- ' 1/0\n'
- 'ZeroDivisionError: ...\n',
- doctest.ELLIPSIS | doctest.REPORT_UDIFF))
-
- def test_traceback_formatting_with_stack_hidden(self):
- result = self.makeResult()
- test = make_erroring_test()
- run_with_stack_hidden(True, test.run, result)
- self.assertThat(
- result.errors[0][1],
- DocTestMatches(
- 'Traceback (most recent call last):\n'
- ' File "...testtools...tests...test_testresult.py", line ..., in error\n'
- ' 1/0\n'
- 'ZeroDivisionError: ...\n',
- doctest.ELLIPSIS))
-
-
-class TestMultiTestResult(TestCase):
- """Tests for 'MultiTestResult'."""
-
- def setUp(self):
- super(TestMultiTestResult, self).setUp()
- self.result1 = LoggingResult([])
- self.result2 = LoggingResult([])
- self.multiResult = MultiTestResult(self.result1, self.result2)
-
- def assertResultLogsEqual(self, expectedEvents):
- """Assert that our test results have received the expected events."""
- self.assertEqual(expectedEvents, self.result1._events)
- self.assertEqual(expectedEvents, self.result2._events)
-
- def test_empty(self):
- # Initializing a `MultiTestResult` doesn't do anything to its
- # `TestResult`s.
- self.assertResultLogsEqual([])
-
- def test_startTest(self):
- # Calling `startTest` on a `MultiTestResult` calls `startTest` on all
- # its `TestResult`s.
- self.multiResult.startTest(self)
- self.assertResultLogsEqual([('startTest', self)])
-
- def test_stopTest(self):
- # Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all
- # its `TestResult`s.
- self.multiResult.stopTest(self)
- self.assertResultLogsEqual([('stopTest', self)])
-
- def test_addSkipped(self):
- # Calling `addSkip` on a `MultiTestResult` calls addSkip on its
- # results.
- reason = _u("Skipped for some reason")
- self.multiResult.addSkip(self, reason)
- self.assertResultLogsEqual([('addSkip', self, reason)])
-
- def test_addSuccess(self):
- # Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on
- # all its `TestResult`s.
- self.multiResult.addSuccess(self)
- self.assertResultLogsEqual([('addSuccess', self)])
-
- def test_done(self):
- # Calling `done` on a `MultiTestResult` calls `done` on all its
- # `TestResult`s.
- self.multiResult.done()
- self.assertResultLogsEqual([('done')])
-
- def test_addFailure(self):
- # Calling `addFailure` on a `MultiTestResult` calls `addFailure` on
- # all its `TestResult`s.
- exc_info = make_exception_info(AssertionError, 'failure')
- self.multiResult.addFailure(self, exc_info)
- self.assertResultLogsEqual([('addFailure', self, exc_info)])
-
- def test_addError(self):
- # Calling `addError` on a `MultiTestResult` calls `addError` on all
- # its `TestResult`s.
- exc_info = make_exception_info(RuntimeError, 'error')
- self.multiResult.addError(self, exc_info)
- self.assertResultLogsEqual([('addError', self, exc_info)])
-
- def test_startTestRun(self):
- # Calling `startTestRun` on a `MultiTestResult` forwards to all its
- # `TestResult`s.
- self.multiResult.startTestRun()
- self.assertResultLogsEqual([('startTestRun')])
-
- def test_stopTestRun(self):
- # Calling `stopTestRun` on a `MultiTestResult` forwards to all its
- # `TestResult`s.
- self.multiResult.stopTestRun()
- self.assertResultLogsEqual([('stopTestRun')])
-
- def test_stopTestRun_returns_results(self):
- # `MultiTestResult.stopTestRun` returns a tuple of all of the return
- # values the `stopTestRun`s that it forwards to.
- class Result(LoggingResult):
- def stopTestRun(self):
- super(Result, self).stopTestRun()
- return 'foo'
- multi_result = MultiTestResult(Result([]), Result([]))
- result = multi_result.stopTestRun()
- self.assertEqual(('foo', 'foo'), result)
-
- def test_time(self):
- # the time call is dispatched, not eaten by the base class
- self.multiResult.time('foo')
- self.assertResultLogsEqual([('time', 'foo')])
-
-
-class TestTextTestResult(TestCase):
- """Tests for 'TextTestResult'."""
-
- def setUp(self):
- super(TestTextTestResult, self).setUp()
- self.result = TextTestResult(StringIO())
-
- def getvalue(self):
- return self.result.stream.getvalue()
-
- def test__init_sets_stream(self):
- result = TextTestResult("fp")
- self.assertEqual("fp", result.stream)
-
- def reset_output(self):
- self.result.stream = StringIO()
-
- def test_startTestRun(self):
- self.result.startTestRun()
- self.assertEqual("Tests running...\n", self.getvalue())
-
- def test_stopTestRun_count_many(self):
- test = make_test()
- self.result.startTestRun()
- self.result.startTest(test)
- self.result.stopTest(test)
- self.result.startTest(test)
- self.result.stopTest(test)
- self.result.stream = StringIO()
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("\nRan 2 tests in ...s\n...", doctest.ELLIPSIS))
-
- def test_stopTestRun_count_single(self):
- test = make_test()
- self.result.startTestRun()
- self.result.startTest(test)
- self.result.stopTest(test)
- self.reset_output()
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("\nRan 1 test in ...s\nOK\n", doctest.ELLIPSIS))
-
- def test_stopTestRun_count_zero(self):
- self.result.startTestRun()
- self.reset_output()
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("\nRan 0 tests in ...s\nOK\n", doctest.ELLIPSIS))
-
- def test_stopTestRun_current_time(self):
- test = make_test()
- now = datetime.datetime.now(utc)
- self.result.time(now)
- self.result.startTestRun()
- self.result.startTest(test)
- now = now + datetime.timedelta(0, 0, 0, 1)
- self.result.time(now)
- self.result.stopTest(test)
- self.reset_output()
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS))
-
- def test_stopTestRun_successful(self):
- self.result.startTestRun()
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("...\nOK\n", doctest.ELLIPSIS))
-
- def test_stopTestRun_not_successful_failure(self):
- test = make_failing_test()
- self.result.startTestRun()
- test.run(self.result)
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
-
- def test_stopTestRun_not_successful_error(self):
- test = make_erroring_test()
- self.result.startTestRun()
- test.run(self.result)
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
-
- def test_stopTestRun_not_successful_unexpected_success(self):
- test = make_unexpectedly_successful_test()
- self.result.startTestRun()
- test.run(self.result)
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
-
- def test_stopTestRun_shows_details(self):
- def run_tests():
- self.result.startTestRun()
- make_erroring_test().run(self.result)
- make_unexpectedly_successful_test().run(self.result)
- make_failing_test().run(self.result)
- self.reset_output()
- self.result.stopTestRun()
- run_with_stack_hidden(True, run_tests)
- self.assertThat(self.getvalue(),
- DocTestMatches("""...======================================================================
-ERROR: testtools.tests.test_testresult.Test.error
-----------------------------------------------------------------------
-Traceback (most recent call last):
- File "...testtools...tests...test_testresult.py", line ..., in error
- 1/0
-ZeroDivisionError:... divi... by zero...
-======================================================================
-FAIL: testtools.tests.test_testresult.Test.failed
-----------------------------------------------------------------------
-Traceback (most recent call last):
- File "...testtools...tests...test_testresult.py", line ..., in failed
- self.fail("yo!")
-AssertionError: yo!
-======================================================================
-UNEXPECTED SUCCESS: testtools.tests.test_testresult.Test.succeeded
-----------------------------------------------------------------------
-...""", doctest.ELLIPSIS | doctest.REPORT_NDIFF))
-
-
-class TestThreadSafeForwardingResult(TestCase):
- """Tests for `TestThreadSafeForwardingResult`."""
-
- def setUp(self):
- super(TestThreadSafeForwardingResult, self).setUp()
- self.result_semaphore = threading.Semaphore(1)
- self.target = LoggingResult([])
- self.result1 = ThreadsafeForwardingResult(self.target,
- self.result_semaphore)
-
- def test_nonforwarding_methods(self):
- # startTest and stopTest are not forwarded because they need to be
- # batched.
- self.result1.startTest(self)
- self.result1.stopTest(self)
- self.assertEqual([], self.target._events)
-
- def test_startTestRun(self):
- self.result1.startTestRun()
- self.result2 = ThreadsafeForwardingResult(self.target,
- self.result_semaphore)
- self.result2.startTestRun()
- self.assertEqual(["startTestRun", "startTestRun"], self.target._events)
-
- def test_stopTestRun(self):
- self.result1.stopTestRun()
- self.result2 = ThreadsafeForwardingResult(self.target,
- self.result_semaphore)
- self.result2.stopTestRun()
- self.assertEqual(["stopTestRun", "stopTestRun"], self.target._events)
-
- def test_forwarding_methods(self):
- # error, failure, skip and success are forwarded in batches.
- exc_info1 = make_exception_info(RuntimeError, 'error')
- starttime1 = datetime.datetime.utcfromtimestamp(1.489)
- endtime1 = datetime.datetime.utcfromtimestamp(51.476)
- self.result1.time(starttime1)
- self.result1.startTest(self)
- self.result1.time(endtime1)
- self.result1.addError(self, exc_info1)
- exc_info2 = make_exception_info(AssertionError, 'failure')
- starttime2 = datetime.datetime.utcfromtimestamp(2.489)
- endtime2 = datetime.datetime.utcfromtimestamp(3.476)
- self.result1.time(starttime2)
- self.result1.startTest(self)
- self.result1.time(endtime2)
- self.result1.addFailure(self, exc_info2)
- reason = _u("Skipped for some reason")
- starttime3 = datetime.datetime.utcfromtimestamp(4.489)
- endtime3 = datetime.datetime.utcfromtimestamp(5.476)
- self.result1.time(starttime3)
- self.result1.startTest(self)
- self.result1.time(endtime3)
- self.result1.addSkip(self, reason)
- starttime4 = datetime.datetime.utcfromtimestamp(6.489)
- endtime4 = datetime.datetime.utcfromtimestamp(7.476)
- self.result1.time(starttime4)
- self.result1.startTest(self)
- self.result1.time(endtime4)
- self.result1.addSuccess(self)
- self.assertEqual([
- ('time', starttime1),
- ('startTest', self),
- ('time', endtime1),
- ('addError', self, exc_info1),
- ('stopTest', self),
- ('time', starttime2),
- ('startTest', self),
- ('time', endtime2),
- ('addFailure', self, exc_info2),
- ('stopTest', self),
- ('time', starttime3),
- ('startTest', self),
- ('time', endtime3),
- ('addSkip', self, reason),
- ('stopTest', self),
- ('time', starttime4),
- ('startTest', self),
- ('time', endtime4),
- ('addSuccess', self),
- ('stopTest', self),
- ], self.target._events)
-
-
-class TestExtendedToOriginalResultDecoratorBase(TestCase):
-
- def make_26_result(self):
- self.result = Python26TestResult()
- self.make_converter()
-
- def make_27_result(self):
- self.result = Python27TestResult()
- self.make_converter()
-
- def make_converter(self):
- self.converter = ExtendedToOriginalDecorator(self.result)
-
- def make_extended_result(self):
- self.result = ExtendedTestResult()
- self.make_converter()
-
- def check_outcome_details(self, outcome):
- """Call an outcome with a details dict to be passed through."""
- # This dict is /not/ convertible - thats deliberate, as it should
- # not hit the conversion code path.
- details = {'foo': 'bar'}
- getattr(self.converter, outcome)(self, details=details)
- self.assertEqual([(outcome, self, details)], self.result._events)
-
- def get_details_and_string(self):
- """Get a details dict and expected string."""
- text1 = lambda: [_b("1\n2\n")]
- text2 = lambda: [_b("3\n4\n")]
- bin1 = lambda: [_b("5\n")]
- details = {'text 1': Content(ContentType('text', 'plain'), text1),
- 'text 2': Content(ContentType('text', 'strange'), text2),
- 'bin 1': Content(ContentType('application', 'binary'), bin1)}
- return (details,
- ("Binary content:\n"
- " bin 1 (application/binary)\n"
- "\n"
- "text 1: {{{\n"
- "1\n"
- "2\n"
- "}}}\n"
- "\n"
- "text 2: {{{\n"
- "3\n"
- "4\n"
- "}}}\n"))
-
- def check_outcome_details_to_exec_info(self, outcome, expected=None):
- """Call an outcome with a details dict to be made into exc_info."""
- # The conversion is a done using RemoteError and the string contents
- # of the text types in the details dict.
- if not expected:
- expected = outcome
- details, err_str = self.get_details_and_string()
- getattr(self.converter, outcome)(self, details=details)
- err = self.converter._details_to_exc_info(details)
- self.assertEqual([(expected, self, err)], self.result._events)
-
- def check_outcome_details_to_nothing(self, outcome, expected=None):
- """Call an outcome with a details dict to be swallowed."""
- if not expected:
- expected = outcome
- details = {'foo': 'bar'}
- getattr(self.converter, outcome)(self, details=details)
- self.assertEqual([(expected, self)], self.result._events)
-
- def check_outcome_details_to_string(self, outcome):
- """Call an outcome with a details dict to be stringified."""
- details, err_str = self.get_details_and_string()
- getattr(self.converter, outcome)(self, details=details)
- self.assertEqual([(outcome, self, err_str)], self.result._events)
-
- def check_outcome_details_to_arg(self, outcome, arg, extra_detail=None):
- """Call an outcome with a details dict to have an arg extracted."""
- details, _ = self.get_details_and_string()
- if extra_detail:
- details.update(extra_detail)
- getattr(self.converter, outcome)(self, details=details)
- self.assertEqual([(outcome, self, arg)], self.result._events)
-
- def check_outcome_exc_info(self, outcome, expected=None):
- """Check that calling a legacy outcome still works."""
- # calling some outcome with the legacy exc_info style api (no keyword
- # parameters) gets passed through.
- if not expected:
- expected = outcome
- err = sys.exc_info()
- getattr(self.converter, outcome)(self, err)
- self.assertEqual([(expected, self, err)], self.result._events)
-
- def check_outcome_exc_info_to_nothing(self, outcome, expected=None):
- """Check that calling a legacy outcome on a fallback works."""
- # calling some outcome with the legacy exc_info style api (no keyword
- # parameters) gets passed through.
- if not expected:
- expected = outcome
- err = sys.exc_info()
- getattr(self.converter, outcome)(self, err)
- self.assertEqual([(expected, self)], self.result._events)
-
- def check_outcome_nothing(self, outcome, expected=None):
- """Check that calling a legacy outcome still works."""
- if not expected:
- expected = outcome
- getattr(self.converter, outcome)(self)
- self.assertEqual([(expected, self)], self.result._events)
-
- def check_outcome_string_nothing(self, outcome, expected):
- """Check that calling outcome with a string calls expected."""
- getattr(self.converter, outcome)(self, "foo")
- self.assertEqual([(expected, self)], self.result._events)
-
- def check_outcome_string(self, outcome):
- """Check that calling outcome with a string works."""
- getattr(self.converter, outcome)(self, "foo")
- self.assertEqual([(outcome, self, "foo")], self.result._events)
-
-
-class TestExtendedToOriginalResultDecorator(
- TestExtendedToOriginalResultDecoratorBase):
-
- def test_progress_py26(self):
- self.make_26_result()
- self.converter.progress(1, 2)
-
- def test_progress_py27(self):
- self.make_27_result()
- self.converter.progress(1, 2)
-
- def test_progress_pyextended(self):
- self.make_extended_result()
- self.converter.progress(1, 2)
- self.assertEqual([('progress', 1, 2)], self.result._events)
-
- def test_shouldStop(self):
- self.make_26_result()
- self.assertEqual(False, self.converter.shouldStop)
- self.converter.decorated.stop()
- self.assertEqual(True, self.converter.shouldStop)
-
- def test_startTest_py26(self):
- self.make_26_result()
- self.converter.startTest(self)
- self.assertEqual([('startTest', self)], self.result._events)
-
- def test_startTest_py27(self):
- self.make_27_result()
- self.converter.startTest(self)
- self.assertEqual([('startTest', self)], self.result._events)
-
- def test_startTest_pyextended(self):
- self.make_extended_result()
- self.converter.startTest(self)
- self.assertEqual([('startTest', self)], self.result._events)
-
- def test_startTestRun_py26(self):
- self.make_26_result()
- self.converter.startTestRun()
- self.assertEqual([], self.result._events)
-
- def test_startTestRun_py27(self):
- self.make_27_result()
- self.converter.startTestRun()
- self.assertEqual([('startTestRun',)], self.result._events)
-
- def test_startTestRun_pyextended(self):
- self.make_extended_result()
- self.converter.startTestRun()
- self.assertEqual([('startTestRun',)], self.result._events)
-
- def test_stopTest_py26(self):
- self.make_26_result()
- self.converter.stopTest(self)
- self.assertEqual([('stopTest', self)], self.result._events)
-
- def test_stopTest_py27(self):
- self.make_27_result()
- self.converter.stopTest(self)
- self.assertEqual([('stopTest', self)], self.result._events)
-
- def test_stopTest_pyextended(self):
- self.make_extended_result()
- self.converter.stopTest(self)
- self.assertEqual([('stopTest', self)], self.result._events)
-
- def test_stopTestRun_py26(self):
- self.make_26_result()
- self.converter.stopTestRun()
- self.assertEqual([], self.result._events)
-
- def test_stopTestRun_py27(self):
- self.make_27_result()
- self.converter.stopTestRun()
- self.assertEqual([('stopTestRun',)], self.result._events)
-
- def test_stopTestRun_pyextended(self):
- self.make_extended_result()
- self.converter.stopTestRun()
- self.assertEqual([('stopTestRun',)], self.result._events)
-
- def test_tags_py26(self):
- self.make_26_result()
- self.converter.tags(1, 2)
-
- def test_tags_py27(self):
- self.make_27_result()
- self.converter.tags(1, 2)
-
- def test_tags_pyextended(self):
- self.make_extended_result()
- self.converter.tags(1, 2)
- self.assertEqual([('tags', 1, 2)], self.result._events)
-
- def test_time_py26(self):
- self.make_26_result()
- self.converter.time(1)
-
- def test_time_py27(self):
- self.make_27_result()
- self.converter.time(1)
-
- def test_time_pyextended(self):
- self.make_extended_result()
- self.converter.time(1)
- self.assertEqual([('time', 1)], self.result._events)
-
-
-class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase):
-
- outcome = 'addError'
-
- def test_outcome_Original_py26(self):
- self.make_26_result()
- self.check_outcome_exc_info(self.outcome)
-
- def test_outcome_Original_py27(self):
- self.make_27_result()
- self.check_outcome_exc_info(self.outcome)
-
- def test_outcome_Original_pyextended(self):
- self.make_extended_result()
- self.check_outcome_exc_info(self.outcome)
-
- def test_outcome_Extended_py26(self):
- self.make_26_result()
- self.check_outcome_details_to_exec_info(self.outcome)
-
- def test_outcome_Extended_py27(self):
- self.make_27_result()
- self.check_outcome_details_to_exec_info(self.outcome)
-
- def test_outcome_Extended_pyextended(self):
- self.make_extended_result()
- self.check_outcome_details(self.outcome)
-
- def test_outcome__no_details(self):
- self.make_extended_result()
- self.assertThat(
- lambda: getattr(self.converter, self.outcome)(self),
- Raises(MatchesException(ValueError)))
-
-
-class TestExtendedToOriginalAddFailure(
- TestExtendedToOriginalAddError):
-
- outcome = 'addFailure'
-
-
-class TestExtendedToOriginalAddExpectedFailure(
- TestExtendedToOriginalAddError):
-
- outcome = 'addExpectedFailure'
-
- def test_outcome_Original_py26(self):
- self.make_26_result()
- self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess')
-
- def test_outcome_Extended_py26(self):
- self.make_26_result()
- self.check_outcome_details_to_nothing(self.outcome, 'addSuccess')
-
-
-
-class TestExtendedToOriginalAddSkip(
- TestExtendedToOriginalResultDecoratorBase):
-
- outcome = 'addSkip'
-
- def test_outcome_Original_py26(self):
- self.make_26_result()
- self.check_outcome_string_nothing(self.outcome, 'addSuccess')
-
- def test_outcome_Original_py27(self):
- self.make_27_result()
- self.check_outcome_string(self.outcome)
-
- def test_outcome_Original_pyextended(self):
- self.make_extended_result()
- self.check_outcome_string(self.outcome)
-
- def test_outcome_Extended_py26(self):
- self.make_26_result()
- self.check_outcome_string_nothing(self.outcome, 'addSuccess')
-
- def test_outcome_Extended_py27_no_reason(self):
- self.make_27_result()
- self.check_outcome_details_to_string(self.outcome)
-
- def test_outcome_Extended_py27_reason(self):
- self.make_27_result()
- self.check_outcome_details_to_arg(self.outcome, 'foo',
- {'reason': Content(UTF8_TEXT, lambda:[_b('foo')])})
-
- def test_outcome_Extended_pyextended(self):
- self.make_extended_result()
- self.check_outcome_details(self.outcome)
-
- def test_outcome__no_details(self):
- self.make_extended_result()
- self.assertThat(
- lambda: getattr(self.converter, self.outcome)(self),
- Raises(MatchesException(ValueError)))
-
-
-class TestExtendedToOriginalAddSuccess(
- TestExtendedToOriginalResultDecoratorBase):
-
- outcome = 'addSuccess'
- expected = 'addSuccess'
-
- def test_outcome_Original_py26(self):
- self.make_26_result()
- self.check_outcome_nothing(self.outcome, self.expected)
-
- def test_outcome_Original_py27(self):
- self.make_27_result()
- self.check_outcome_nothing(self.outcome)
-
- def test_outcome_Original_pyextended(self):
- self.make_extended_result()
- self.check_outcome_nothing(self.outcome)
-
- def test_outcome_Extended_py26(self):
- self.make_26_result()
- self.check_outcome_details_to_nothing(self.outcome, self.expected)
-
- def test_outcome_Extended_py27(self):
- self.make_27_result()
- self.check_outcome_details_to_nothing(self.outcome)
-
- def test_outcome_Extended_pyextended(self):
- self.make_extended_result()
- self.check_outcome_details(self.outcome)
-
-
-class TestExtendedToOriginalAddUnexpectedSuccess(
- TestExtendedToOriginalResultDecoratorBase):
-
- outcome = 'addUnexpectedSuccess'
- expected = 'addFailure'
-
- def test_outcome_Original_py26(self):
- self.make_26_result()
- getattr(self.converter, self.outcome)(self)
- [event] = self.result._events
- self.assertEqual((self.expected, self), event[:2])
-
- def test_outcome_Original_py27(self):
- self.make_27_result()
- self.check_outcome_nothing(self.outcome)
-
- def test_outcome_Original_pyextended(self):
- self.make_extended_result()
- self.check_outcome_nothing(self.outcome)
-
- def test_outcome_Extended_py26(self):
- self.make_26_result()
- getattr(self.converter, self.outcome)(self)
- [event] = self.result._events
- self.assertEqual((self.expected, self), event[:2])
-
- def test_outcome_Extended_py27(self):
- self.make_27_result()
- self.check_outcome_details_to_nothing(self.outcome)
-
- def test_outcome_Extended_pyextended(self):
- self.make_extended_result()
- self.check_outcome_details(self.outcome)
-
-
-class TestExtendedToOriginalResultOtherAttributes(
- TestExtendedToOriginalResultDecoratorBase):
-
- def test_other_attribute(self):
- class OtherExtendedResult:
- def foo(self):
- return 2
- bar = 1
- self.result = OtherExtendedResult()
- self.make_converter()
- self.assertEqual(1, self.converter.bar)
- self.assertEqual(2, self.converter.foo())
-
-
-class TestNonAsciiResults(TestCase):
- """Test all kinds of tracebacks are cleanly interpreted as unicode
-
- Currently only uses weak "contains" assertions, would be good to be much
- stricter about the expected output. This would add a few failures for the
- current release of IronPython for instance, which gets some traceback
- lines muddled.
- """
-
- _sample_texts = (
- _u("pa\u026a\u03b8\u0259n"), # Unicode encodings only
- _u("\u5357\u7121"), # In ISO 2022 encodings
- _u("\xa7\xa7\xa7"), # In ISO 8859 encodings
- )
- # Everything but Jython shows syntax errors on the current character
- _error_on_character = os.name != "java"
-
- def _run(self, stream, test):
- """Run the test, the same as in testtools.run but not to stdout"""
- result = TextTestResult(stream)
- result.startTestRun()
- try:
- return test.run(result)
- finally:
- result.stopTestRun()
-
- def _write_module(self, name, encoding, contents):
- """Create Python module on disk with contents in given encoding"""
- try:
- # Need to pre-check that the coding is valid or codecs.open drops
- # the file without closing it which breaks non-refcounted pythons
- codecs.lookup(encoding)
- except LookupError:
- self.skip("Encoding unsupported by implementation: %r" % encoding)
- f = codecs.open(os.path.join(self.dir, name + ".py"), "w", encoding)
- try:
- f.write(contents)
- finally:
- f.close()
-
- def _test_external_case(self, testline, coding="ascii", modulelevel="",
- suffix=""):
- """Create and run a test case in a seperate module"""
- self._setup_external_case(testline, coding, modulelevel, suffix)
- return self._run_external_case()
-
- def _setup_external_case(self, testline, coding="ascii", modulelevel="",
- suffix=""):
- """Create a test case in a seperate module"""
- _, prefix, self.modname = self.id().rsplit(".", 2)
- self.dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
- self.addCleanup(shutil.rmtree, self.dir)
- self._write_module(self.modname, coding,
- # Older Python 2 versions don't see a coding declaration in a
- # docstring so it has to be in a comment, but then we can't
- # workaround bug: <http://ironpython.codeplex.com/workitem/26940>
- "# coding: %s\n"
- "import testtools\n"
- "%s\n"
- "class Test(testtools.TestCase):\n"
- " def runTest(self):\n"
- " %s\n" % (coding, modulelevel, testline))
-
- def _run_external_case(self):
- """Run the prepared test case in a seperate module"""
- sys.path.insert(0, self.dir)
- self.addCleanup(sys.path.remove, self.dir)
- module = __import__(self.modname)
- self.addCleanup(sys.modules.pop, self.modname)
- stream = StringIO()
- self._run(stream, module.Test())
- return stream.getvalue()
-
- def _silence_deprecation_warnings(self):
- """Shut up DeprecationWarning for this test only"""
- warnings.simplefilter("ignore", DeprecationWarning)
- self.addCleanup(warnings.filters.remove, warnings.filters[0])
-
- def _get_sample_text(self, encoding="unicode_internal"):
- if encoding is None and str_is_unicode:
- encoding = "unicode_internal"
- for u in self._sample_texts:
- try:
- b = u.encode(encoding)
- if u == b.decode(encoding):
- if str_is_unicode:
- return u, u
- return u, b
- except (LookupError, UnicodeError):
- pass
- self.skip("Could not find a sample text for encoding: %r" % encoding)
-
- def _as_output(self, text):
- return text
-
- def test_non_ascii_failure_string(self):
- """Assertion contents can be non-ascii and should get decoded"""
- text, raw = self._get_sample_text(_get_exception_encoding())
- textoutput = self._test_external_case("self.fail(%s)" % _r(raw))
- self.assertIn(self._as_output(text), textoutput)
-
- def test_non_ascii_failure_string_via_exec(self):
- """Assertion via exec can be non-ascii and still gets decoded"""
- text, raw = self._get_sample_text(_get_exception_encoding())
- textoutput = self._test_external_case(
- testline='exec ("self.fail(%s)")' % _r(raw))
- self.assertIn(self._as_output(text), textoutput)
-
- def test_control_characters_in_failure_string(self):
- """Control characters in assertions should be escaped"""
- textoutput = self._test_external_case("self.fail('\\a\\a\\a')")
- self.expectFailure("Defense against the beeping horror unimplemented",
- self.assertNotIn, self._as_output("\a\a\a"), textoutput)
- self.assertIn(self._as_output(_u("\uFFFD\uFFFD\uFFFD")), textoutput)
-
- def test_os_error(self):
- """Locale error messages from the OS shouldn't break anything"""
- textoutput = self._test_external_case(
- modulelevel="import os",
- testline="os.mkdir('/')")
- if os.name != "nt" or sys.version_info < (2, 5):
- self.assertIn(self._as_output("OSError: "), textoutput)
- else:
- self.assertIn(self._as_output("WindowsError: "), textoutput)
-
- def test_assertion_text_shift_jis(self):
- """A terminal raw backslash in an encoded string is weird but fine"""
- example_text = _u("\u5341")
- textoutput = self._test_external_case(
- coding="shift_jis",
- testline="self.fail('%s')" % example_text)
- if str_is_unicode:
- output_text = example_text
- else:
- output_text = example_text.encode("shift_jis").decode(
- _get_exception_encoding(), "replace")
- self.assertIn(self._as_output("AssertionError: %s" % output_text),
- textoutput)
-
- def test_file_comment_iso2022_jp(self):
- """Control character escapes must be preserved if valid encoding"""
- example_text, _ = self._get_sample_text("iso2022_jp")
- textoutput = self._test_external_case(
- coding="iso2022_jp",
- testline="self.fail('Simple') # %s" % example_text)
- self.assertIn(self._as_output(example_text), textoutput)
-
- def test_unicode_exception(self):
- """Exceptions that can be formated losslessly as unicode should be"""
- example_text, _ = self._get_sample_text()
- exception_class = (
- "class FancyError(Exception):\n"
- # A __unicode__ method does nothing on py3k but the default works
- " def __unicode__(self):\n"
- " return self.args[0]\n")
- textoutput = self._test_external_case(
- modulelevel=exception_class,
- testline="raise FancyError(%s)" % _r(example_text))
- self.assertIn(self._as_output(example_text), textoutput)
-
- def test_unprintable_exception(self):
- """A totally useless exception instance still prints something"""
- exception_class = (
- "class UnprintableError(Exception):\n"
- " def __str__(self):\n"
- " raise RuntimeError\n"
- " def __unicode__(self):\n"
- " raise RuntimeError\n"
- " def __repr__(self):\n"
- " raise RuntimeError\n")
- textoutput = self._test_external_case(
- modulelevel=exception_class,
- testline="raise UnprintableError")
- self.assertIn(self._as_output(
- "UnprintableError: <unprintable UnprintableError object>\n"),
- textoutput)
-
- def test_string_exception(self):
- """Raise a string rather than an exception instance if supported"""
- if sys.version_info > (2, 6):
- self.skip("No string exceptions in Python 2.6 or later")
- elif sys.version_info > (2, 5):
- self._silence_deprecation_warnings()
- textoutput = self._test_external_case(testline="raise 'plain str'")
- self.assertIn(self._as_output("\nplain str\n"), textoutput)
-
- def test_non_ascii_dirname(self):
- """Script paths in the traceback can be non-ascii"""
- text, raw = self._get_sample_text(sys.getfilesystemencoding())
- textoutput = self._test_external_case(
- # Avoid bug in Python 3 by giving a unicode source encoding rather
- # than just ascii which raises a SyntaxError with no other details
- coding="utf-8",
- testline="self.fail('Simple')",
- suffix=raw)
- self.assertIn(self._as_output(text), textoutput)
-
- def test_syntax_error(self):
- """Syntax errors should still have fancy special-case formatting"""
- textoutput = self._test_external_case("exec ('f(a, b c)')")
- self.assertIn(self._as_output(
- ' File "<string>", line 1\n'
- ' f(a, b c)\n'
- + ' ' * self._error_on_character +
- ' ^\n'
- 'SyntaxError: '
- ), textoutput)
-
- def test_syntax_error_malformed(self):
- """Syntax errors with bogus parameters should break anything"""
- textoutput = self._test_external_case("raise SyntaxError(3, 2, 1)")
- self.assertIn(self._as_output("\nSyntaxError: "), textoutput)
-
- def test_syntax_error_import_binary(self):
- """Importing a binary file shouldn't break SyntaxError formatting"""
- if sys.version_info < (2, 5):
- # Python 2.4 assumes the file is latin-1 and tells you off
- self._silence_deprecation_warnings()
- self._setup_external_case("import bad")
- f = open(os.path.join(self.dir, "bad.py"), "wb")
- try:
- f.write(_b("x\x9c\xcb*\xcd\xcb\x06\x00\x04R\x01\xb9"))
- finally:
- f.close()
- textoutput = self._run_external_case()
- self.assertIn(self._as_output("\nSyntaxError: "), textoutput)
-
- def test_syntax_error_line_iso_8859_1(self):
- """Syntax error on a latin-1 line shows the line decoded"""
- text, raw = self._get_sample_text("iso-8859-1")
- textoutput = self._setup_external_case("import bad")
- self._write_module("bad", "iso-8859-1",
- "# coding: iso-8859-1\n! = 0 # %s\n" % text)
- textoutput = self._run_external_case()
- self.assertIn(self._as_output(_u(
- #'bad.py", line 2\n'
- ' ! = 0 # %s\n'
- ' ^\n'
- 'SyntaxError: ') %
- (text,)), textoutput)
-
- def test_syntax_error_line_iso_8859_5(self):
- """Syntax error on a iso-8859-5 line shows the line decoded"""
- text, raw = self._get_sample_text("iso-8859-5")
- textoutput = self._setup_external_case("import bad")
- self._write_module("bad", "iso-8859-5",
- "# coding: iso-8859-5\n%% = 0 # %s\n" % text)
- textoutput = self._run_external_case()
- self.assertIn(self._as_output(_u(
- #'bad.py", line 2\n'
- ' %% = 0 # %s\n'
- + ' ' * self._error_on_character +
- ' ^\n'
- 'SyntaxError: ') %
- (text,)), textoutput)
-
- def test_syntax_error_line_euc_jp(self):
- """Syntax error on a euc_jp line shows the line decoded"""
- text, raw = self._get_sample_text("euc_jp")
- textoutput = self._setup_external_case("import bad")
- self._write_module("bad", "euc_jp",
- "# coding: euc_jp\n$ = 0 # %s\n" % text)
- textoutput = self._run_external_case()
- self.assertIn(self._as_output(_u(
- #'bad.py", line 2\n'
- ' $ = 0 # %s\n'
- + ' ' * self._error_on_character +
- ' ^\n'
- 'SyntaxError: ') %
- (text,)), textoutput)
-
- def test_syntax_error_line_utf_8(self):
- """Syntax error on a utf-8 line shows the line decoded"""
- text, raw = self._get_sample_text("utf-8")
- textoutput = self._setup_external_case("import bad")
- self._write_module("bad", "utf-8", _u("\ufeff^ = 0 # %s\n") % text)
- textoutput = self._run_external_case()
- self.assertIn(self._as_output(_u(
- 'bad.py", line 1\n'
- ' ^ = 0 # %s\n'
- + ' ' * self._error_on_character +
- ' ^\n'
- 'SyntaxError: ') %
- text), textoutput)
-
-
-class TestNonAsciiResultsWithUnittest(TestNonAsciiResults):
- """Test that running under unittest produces clean ascii strings"""
-
- def _run(self, stream, test):
- from unittest import TextTestRunner as _Runner
- return _Runner(stream).run(test)
-
- def _as_output(self, text):
- if str_is_unicode:
- return text
- return text.encode("utf-8")
-
-
-class TestDetailsToStr(TestCase):
-
- def test_no_details(self):
- string = _details_to_str({})
- self.assertThat(string, Equals(''))
-
- def test_binary_content(self):
- content = content_from_stream(
- StringIO('foo'), content_type=ContentType('image', 'jpeg'))
- string = _details_to_str({'attachment': content})
- self.assertThat(
- string, Equals("""\
-Binary content:
- attachment (image/jpeg)
-"""))
-
- def test_single_line_content(self):
- content = text_content('foo')
- string = _details_to_str({'attachment': content})
- self.assertThat(string, Equals('attachment: {{{foo}}}\n'))
-
- def test_multi_line_text_content(self):
- content = text_content('foo\nbar\nbaz')
- string = _details_to_str({'attachment': content})
- self.assertThat(string, Equals('attachment: {{{\nfoo\nbar\nbaz\n}}}\n'))
-
- def test_special_text_content(self):
- content = text_content('foo')
- string = _details_to_str({'attachment': content}, special='attachment')
- self.assertThat(string, Equals('foo\n'))
-
- def test_multiple_text_content(self):
- string = _details_to_str(
- {'attachment': text_content('foo\nfoo'),
- 'attachment-1': text_content('bar\nbar')})
- self.assertThat(
- string, Equals('attachment: {{{\n'
- 'foo\n'
- 'foo\n'
- '}}}\n'
- '\n'
- 'attachment-1: {{{\n'
- 'bar\n'
- 'bar\n'
- '}}}\n'))
-
- def test_empty_attachment(self):
- string = _details_to_str({'attachment': text_content('')})
- self.assertThat(
- string, Equals("""\
-Empty attachments:
- attachment
-"""))
-
- def test_lots_of_different_attachments(self):
- jpg = lambda x: content_from_stream(
- StringIO(x), ContentType('image', 'jpeg'))
- attachments = {
- 'attachment': text_content('foo'),
- 'attachment-1': text_content('traceback'),
- 'attachment-2': jpg('pic1'),
- 'attachment-3': text_content('bar'),
- 'attachment-4': text_content(''),
- 'attachment-5': jpg('pic2'),
- }
- string = _details_to_str(attachments, special='attachment-1')
- self.assertThat(
- string, Equals("""\
-Binary content:
- attachment-2 (image/jpeg)
- attachment-5 (image/jpeg)
-Empty attachments:
- attachment-4
-
-attachment: {{{foo}}}
-attachment-3: {{{bar}}}
-
-traceback
-"""))
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_testsuite.py b/test/3rdparty/testtools-0.9.12/testtools/tests/test_testsuite.py
deleted file mode 100644
index 05647577cdb..00000000000
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_testsuite.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
-
-"""Test ConcurrentTestSuite and related things."""
-
-__metaclass__ = type
-
-import unittest
-
-from testtools import (
- ConcurrentTestSuite,
- iterate_tests,
- TestCase,
- )
-from testtools.helpers import try_import
-from testtools.testsuite import FixtureSuite
-from testtools.tests.helpers import LoggingResult
-
-FunctionFixture = try_import('fixtures.FunctionFixture')
-
-
-class TestConcurrentTestSuiteRun(TestCase):
-
- def test_trivial(self):
- log = []
- result = LoggingResult(log)
- class Sample(TestCase):
- def __hash__(self):
- return id(self)
-
- def test_method1(self):
- pass
- def test_method2(self):
- pass
- test1 = Sample('test_method1')
- test2 = Sample('test_method2')
- original_suite = unittest.TestSuite([test1, test2])
- suite = ConcurrentTestSuite(original_suite, self.split_suite)
- suite.run(result)
- # 0 is the timestamp for the first test starting.
- test1 = log[1][1]
- test2 = log[-1][1]
- self.assertIsInstance(test1, Sample)
- self.assertIsInstance(test2, Sample)
- self.assertNotEqual(test1.id(), test2.id())
-
- def split_suite(self, suite):
- tests = list(iterate_tests(suite))
- return tests[0], tests[1]
-
-
-class TestFixtureSuite(TestCase):
-
- def setUp(self):
- super(TestFixtureSuite, self).setUp()
- if FunctionFixture is None:
- self.skip("Need fixtures")
-
- def test_fixture_suite(self):
- log = []
- class Sample(TestCase):
- def test_one(self):
- log.append(1)
- def test_two(self):
- log.append(2)
- fixture = FunctionFixture(
- lambda: log.append('setUp'),
- lambda fixture: log.append('tearDown'))
- suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_two')])
- suite.run(LoggingResult([]))
- self.assertEqual(['setUp', 1, 2, 'tearDown'], log)
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/testsuite.py b/test/3rdparty/testtools-0.9.12/testtools/testsuite.py
deleted file mode 100644
index 18de8b89e12..00000000000
--- a/test/3rdparty/testtools-0.9.12/testtools/testsuite.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
-
-"""Test suites and related things."""
-
-__metaclass__ = type
-__all__ = [
- 'ConcurrentTestSuite',
- 'iterate_tests',
- ]
-
-from testtools.helpers import try_imports
-
-Queue = try_imports(['Queue.Queue', 'queue.Queue'])
-
-import threading
-import unittest
-
-import testtools
-
-
-def iterate_tests(test_suite_or_case):
- """Iterate through all of the test cases in 'test_suite_or_case'."""
- try:
- suite = iter(test_suite_or_case)
- except TypeError:
- yield test_suite_or_case
- else:
- for test in suite:
- for subtest in iterate_tests(test):
- yield subtest
-
-
-class ConcurrentTestSuite(unittest.TestSuite):
- """A TestSuite whose run() calls out to a concurrency strategy."""
-
- def __init__(self, suite, make_tests):
- """Create a ConcurrentTestSuite to execute suite.
-
- :param suite: A suite to run concurrently.
- :param make_tests: A helper function to split the tests in the
- ConcurrentTestSuite into some number of concurrently executing
- sub-suites. make_tests must take a suite, and return an iterable
- of TestCase-like object, each of which must have a run(result)
- method.
- """
- super(ConcurrentTestSuite, self).__init__([suite])
- self.make_tests = make_tests
-
- def run(self, result):
- """Run the tests concurrently.
-
- This calls out to the provided make_tests helper, and then serialises
- the results so that result only sees activity from one TestCase at
- a time.
-
- ConcurrentTestSuite provides no special mechanism to stop the tests
- returned by make_tests, it is up to the make_tests to honour the
- shouldStop attribute on the result object they are run with, which will
- be set if an exception is raised in the thread which
- ConcurrentTestSuite.run is called in.
- """
- tests = self.make_tests(self)
- try:
- threads = {}
- queue = Queue()
- result_semaphore = threading.Semaphore(1)
- for test in tests:
- process_result = testtools.ThreadsafeForwardingResult(result,
- result_semaphore)
- reader_thread = threading.Thread(
- target=self._run_test, args=(test, process_result, queue))
- threads[test] = reader_thread, process_result
- reader_thread.start()
- while threads:
- finished_test = queue.get()
- threads[finished_test][0].join()
- del threads[finished_test]
- except:
- for thread, process_result in threads.values():
- process_result.stop()
- raise
-
- def _run_test(self, test, process_result, queue):
- try:
- test.run(process_result)
- finally:
- queue.put(test)
-
-
-class FixtureSuite(unittest.TestSuite):
-
- def __init__(self, fixture, tests):
- super(FixtureSuite, self).__init__(tests)
- self._fixture = fixture
-
- def run(self, result):
- self._fixture.setUp()
- try:
- super(FixtureSuite, self).run(result)
- finally:
- self._fixture.cleanUp()
diff --git a/test/3rdparty/testtools-0.9.12/.bzrignore b/test/3rdparty/testtools-0.9.34/.gitignore
index d6aac0da189..862442d5067 100644
--- a/test/3rdparty/testtools-0.9.12/.bzrignore
+++ b/test/3rdparty/testtools-0.9.34/.gitignore
@@ -7,4 +7,9 @@ TAGS
apidocs
_trial_temp
doc/_build
-./.testrepository
+.testrepository
+./testtools.egg-info
+*.pyc
+*.swp
+*~
+testtools.egg-info
diff --git a/test/3rdparty/testtools-0.9.12/LICENSE b/test/3rdparty/testtools-0.9.34/LICENSE
index 42421b0b2d9..21010cc4856 100644
--- a/test/3rdparty/testtools-0.9.12/LICENSE
+++ b/test/3rdparty/testtools-0.9.34/LICENSE
@@ -16,6 +16,8 @@ The testtools authors are:
* Christian Kampka
* Gavin Panella
* Martin Pool
+ * Vincent Ladeuil
+ * Nikola Đipanov
and are collectively referred to as "testtools developers".
diff --git a/test/3rdparty/testtools-0.9.12/MANIFEST.in b/test/3rdparty/testtools-0.9.34/MANIFEST.in
index 92a623b2a12..4619349f3b9 100644
--- a/test/3rdparty/testtools-0.9.12/MANIFEST.in
+++ b/test/3rdparty/testtools-0.9.34/MANIFEST.in
@@ -1,11 +1,9 @@
include LICENSE
-include HACKING
include Makefile
include MANIFEST.in
-include MANUAL
include NEWS
-include README
-include .bzrignore
+include README.rst
+include .gitignore
graft doc
graft doc/_static
graft doc/_templates
diff --git a/test/3rdparty/testtools-0.9.12/Makefile b/test/3rdparty/testtools-0.9.34/Makefile
index b3e40ecddfb..ccaa7762286 100644
--- a/test/3rdparty/testtools-0.9.12/Makefile
+++ b/test/3rdparty/testtools-0.9.34/Makefile
@@ -1,4 +1,4 @@
-# See README for copyright and licensing details.
+# Copyright (c) 2008-2013 testtools developers. See LICENSE for details.
PYTHON=python
SOURCES=$(shell find testtools -name "*.py")
@@ -34,7 +34,7 @@ apidocs:
PYTHONWARNINGS='ignore::DeprecationWarning' \
pydoctor --make-html --add-package testtools \
--docformat=restructuredtext --project-name=testtools \
- --project-url=https://launchpad.net/testtools
+ --project-url=https://github.com/testing-cabal/testtools
doc/news.rst:
ln -s ../NEWS doc/news.rst
diff --git a/test/3rdparty/testtools-0.9.12/NEWS b/test/3rdparty/testtools-0.9.34/NEWS
index 9ff3c05ce22..a8aa4d23b3a 100644
--- a/test/3rdparty/testtools-0.9.12/NEWS
+++ b/test/3rdparty/testtools-0.9.34/NEWS
@@ -3,6 +3,599 @@ testtools NEWS
Changes and improvements to testtools_, grouped by release.
+
+NEXT
+~~~~
+
+0.9.34
+~~~~~~
+
+Improvements
+------------
+
+* Added ability for ``testtools.TestCase`` instances to force a test to
+ fail, even if no assertions failed. (Thomi Richards)
+
+* Added ``testtools.content.StacktraceContent``, a content object that
+ automatically creates a ``StackLinesContent`` object containing the current
+ stack trace. (Thomi Richards)
+
+* ``AnyMatch`` is now exported properly in ``testtools.matchers``.
+ (Robert Collins, Rob Kennedy, github #44)
+
+* In Python 3.3, if there are duplicate test ids, tests.sort() will
+ fail and raise TypeError. Detect the duplicate test ids firstly in
+ sorted_tests() to ensure that all test ids are unique.
+ (Kui Shi, #1243922)
+
+* ``json_content`` is now in the ``__all__`` attribute for
+ ``testtools.content``. (Robert Collins)
+
+* Network tests now bind to 127.0.0.1 to avoid (even temporary) network
+ visible ports. (Benedikt Morbach, github #46)
+
+* Test listing now explicitly indicates by printing 'Failed to import' and
+ exiting (2) when an import has failed rather than only signalling through the
+ test name. (Robert Collins, #1245672)
+
+* ``test_compat.TestDetectEncoding.test_bom`` now works on Python 3.3 - the
+ corner case with euc_jp is no longer permitted in Python 3.3 so we can
+ skip it. (Martin [gz], #1251962)
+
+0.9.33
+~~~~~~
+
+Improvements
+------------
+
+* Added ``addDetailuniqueName`` method to ``testtools.TestCase`` class.
+ (Thomi Richards)
+
+* Removed some unused code from ``testtools.content.TracebackContent``.
+ (Thomi Richards)
+
+* Added ``testtools.StackLinesContent``: a content object for displaying
+ pre-processed stack lines. (Thomi Richards)
+
+* ``StreamSummary`` was calculating testsRun incorrectly: ``exists`` status
+ tests were counted as run tests, but they are not.
+ (Robert Collins, #1203728)
+
+0.9.32
+~~~~~~
+
+Regular maintenance release. Special thanks to new contributor, Xiao Hanyu!
+
+Changes
+-------
+
+ * ``testttols.compat._format_exc_info`` has been refactored into several
+ smaller functions. (Thomi Richards)
+
+Improvements
+------------
+
+* Stacktrace filtering no longer hides unittest frames that are surrounded by
+ user frames. We will reenable this when we figure out a better algorithm for
+ retaining meaning. (Robert Collins, #1188420)
+
+* The compatibility code for skipped tests with unittest2 was broken.
+ (Robert Collins, #1190951)
+
+* Various documentation improvements (Clint Byrum, Xiao Hanyu).
+
+0.9.31
+~~~~~~
+
+Improvements
+------------
+
+* ``ExpectedException`` now accepts a msg parameter for describing an error,
+ much the same as assertEquals etc. (Robert Collins)
+
+0.9.30
+~~~~~~
+
+A new sort of TestResult, the StreamResult has been added, as a prototype for
+a revised standard library test result API. Expect this API to change.
+Although we will try to preserve compatibility for early adopters, it is
+experimental and we might need to break it if it turns out to be unsuitable.
+
+Improvements
+------------
+* ``assertRaises`` works properly for exception classes that have custom
+ metaclasses
+
+* ``ConcurrentTestSuite`` was silently eating exceptions that propagate from
+ the test.run(result) method call. Ignoring them is fine in a normal test
+ runner, but when they happen in a different thread, the thread that called
+ suite.run() is not in the stack anymore, and the exceptions are lost. We now
+ create a synthetic test recording any such exception.
+ (Robert Collins, #1130429)
+
+* Fixed SyntaxError raised in ``_compat2x.py`` when installing via Python 3.
+ (Will Bond, #941958)
+
+* New class ``StreamResult`` which defines the API for the new result type.
+ (Robert Collins)
+
+* New support class ``ConcurrentStreamTestSuite`` for convenient construction
+ and utilisation of ``StreamToQueue`` objects. (Robert Collins)
+
+* New support class ``CopyStreamResult`` which forwards events onto multiple
+ ``StreamResult`` objects (each of which receives all the events).
+ (Robert Collins)
+
+* New support class ``StreamSummary`` which summarises a ``StreamResult``
+ stream compatibly with ``TestResult`` code. (Robert Collins)
+
+* New support class ``StreamTagger`` which adds or removes tags from
+ ``StreamResult`` events. (RobertCollins)
+
+* New support class ``StreamToDict`` which converts a ``StreamResult`` to a
+ series of dicts describing a test. Useful for writing trivial stream
+ analysers. (Robert Collins)
+
+* New support class ``TestControl`` which permits cancelling an in-progress
+ run. (Robert Collins)
+
+* New support class ``StreamFailFast`` which calls a ``TestControl`` instance
+ to abort the test run when a failure is detected. (Robert Collins)
+
+* New support class ``ExtendedToStreamDecorator`` which translates both regular
+ unittest TestResult API calls and the ExtendedTestResult API which testtools
+ has supported into the StreamResult API. ExtendedToStreamDecorator also
+ forwards calls made in the StreamResult API, permitting it to be used
+ anywhere a StreamResult is used. Key TestResult query methods like
+ wasSuccessful and shouldStop are synchronised with the StreamResult API
+ calls, but the detailed statistics like the list of errors are not - a
+ separate consumer will be created to support that.
+ (Robert Collins)
+
+* New support class ``StreamToExtendedDecorator`` which translates
+ ``StreamResult`` API calls into ``ExtendedTestResult`` (or any older
+ ``TestResult``) calls. This permits using un-migrated result objects with
+ new runners / tests. (Robert Collins)
+
+* New support class ``StreamToQueue`` for sending messages to one
+ ``StreamResult`` from multiple threads. (Robert Collins)
+
+* New support class ``TimestampingStreamResult`` which adds a timestamp to
+ events with no timestamp. (Robert Collins)
+
+* New ``TestCase`` decorator ``DecorateTestCaseResult`` that adapts the
+ ``TestResult`` or ``StreamResult`` a case will be run with, for ensuring that
+ a particular result object is used even if the runner running the test doesn't
+ know to use it. (Robert Collins)
+
+* New test support class ``testtools.testresult.doubles.StreamResult``, which
+ captures all the StreamResult events. (Robert Collins)
+
+* ``PlaceHolder`` can now hold tags, and applies them before, and removes them
+ after, the test. (Robert Collins)
+
+* ``PlaceHolder`` can now hold timestamps, and applies them before the test and
+ then before the outcome. (Robert Collins)
+
+* ``StreamResultRouter`` added. This is useful for demultiplexing - e.g. for
+ partitioning analysis of events or sending feedback encapsulated in
+ StreamResult events back to their source. (Robert Collins)
+
+* ``testtools.run.TestProgram`` now supports the ``TestRunner`` taking over
+ responsibility for formatting the output of ``--list-tests``.
+ (Robert Collins)
+
+* The error message for setUp and tearDown upcall errors was broken on Python
+ 3.4. (Monty Taylor, Robert Collins, #1140688)
+
+* The repr of object() on pypy includes the object id, which was breaking a
+ test that accidentally depended on the CPython repr for object().
+ (Jonathan Lange)
+
+0.9.29
+~~~~~~
+
+A simple bug fix, and better error messages when you don't up-call.
+
+Changes
+-------
+
+* ``testtools.content_type.ContentType`` incorrectly used ',' rather than ';'
+ to separate parameters. (Robert Collins)
+
+Improvements
+------------
+
+* ``testtools.compat.unicode_output_stream`` was wrapping a stream encoder
+ around ``io.StringIO`` and ``io.TextIOWrapper`` objects, which was incorrect.
+ (Robert Collins)
+
+* Report the name of the source file for setUp and tearDown upcall errors.
+ (Monty Taylor)
+
+0.9.28
+~~~~~~
+
+Testtools has moved VCS - https://github.com/testing-cabal/testtools/ is
+the new home. Bug tracking is still on Launchpad, and releases are on Pypi.
+
+We made this change to take advantage of the richer ecosystem of tools around
+Git, and to lower the barrier for new contributors.
+
+Improvements
+------------
+
+* New ``testtools.testcase.attr`` and ``testtools.testcase.WithAttributes``
+ helpers allow marking up test case methods with simple labels. This permits
+ filtering tests with more granularity than organising them into modules and
+ test classes. (Robert Collins)
+
+0.9.27
+~~~~~~
+
+Improvements
+------------
+
+* New matcher ``HasLength`` for matching the length of a collection.
+ (Robert Collins)
+
+* New matcher ``MatchesPredicateWithParams`` make it still easier to create
+ ad hoc matchers. (Robert Collins)
+
+* We have a simpler release process in future - see doc/hacking.rst.
+ (Robert Collins)
+
+0.9.26
+~~~~~~
+
+Brown paper bag fix: failed to document the need for setup to be able to use
+extras. Compounded by pip not supporting setup_requires.
+
+Changes
+-------
+
+* setup.py now can generate egg_info even if extras is not available.
+ Also lists extras in setup_requires for easy_install.
+ (Robert Collins, #1102464)
+
+0.9.25
+~~~~~~
+
+Changes
+-------
+
+* ``python -m testtools.run --load-list`` will now preserve any custom suites
+ (such as ``testtools.FixtureSuite`` or ``testresources.OptimisingTestSuite``)
+ rather than flattening them.
+ (Robert Collins, #827175)
+
+* Testtools now depends on extras, a small library split out from it to contain
+ generally useful non-testing facilities. Since extras has been around for a
+ couple of testtools releases now, we're making this into a hard dependency of
+ testtools. (Robert Collins)
+
+* Testtools now uses setuptools rather than distutils so that we can document
+ the extras dependency. (Robert Collins)
+
+Improvements
+------------
+
+* Testtools will no longer override test code registered details called
+ 'traceback' when reporting caught exceptions from test code.
+ (Robert Collins, #812793)
+
+0.9.24
+~~~~~~
+
+Changes
+-------
+
+* ``testtools.run discover`` will now sort the tests it discovered. This is a
+ workaround for http://bugs.python.org/issue16709. Non-standard test suites
+ are preserved, and their ``sort_tests()`` method called (if they have such an
+ attribute). ``testtools.testsuite.sorted_tests(suite, True)`` can be used by
+ such suites to do a local sort. (Robert Collins, #1091512)
+
+* ``ThreadsafeForwardingResult`` now defines a stub ``progress`` method, which
+ fixes ``testr run`` of streams containing progress markers (by discarding the
+ progress data). (Robert Collins, #1019165)
+
+0.9.23
+~~~~~~
+
+Changes
+-------
+
+* ``run.TestToolsTestRunner`` now accepts the verbosity, buffer and failfast
+ arguments the upstream python TestProgram code wants to give it, making it
+ possible to support them in a compatible fashion. (Robert Collins)
+
+Improvements
+------------
+
+* ``testtools.run`` now supports the ``-f`` or ``--failfast`` parameter.
+ Previously it was advertised in the help but ignored.
+ (Robert Collins, #1090582)
+
+* ``AnyMatch`` added, a new matcher that matches when any item in a collection
+ matches the given matcher. (Jonathan Lange)
+
+* Spelling corrections to documentation. (Vincent Ladeuil)
+
+* ``TestProgram`` now has a sane default for its ``testRunner`` argument.
+ (Vincent Ladeuil)
+
+* The test suite passes on Python 3 again. (Robert Collins)
+
+0.9.22
+~~~~~~
+
+Improvements
+------------
+
+* ``content_from_file`` and ``content_from_stream`` now accept seek_offset and
+ seek_whence parameters allowing them to be used to grab less than the full
+ stream, or to be used with StringIO streams. (Robert Collins, #1088693)
+
+0.9.21
+~~~~~~
+
+Improvements
+------------
+
+* ``DirContains`` correctly exposed, after being accidentally hidden in the
+ great matcher re-organization of 0.9.17. (Jonathan Lange)
+
+
+0.9.20
+~~~~~~
+
+Three new matchers that'll rock your world.
+
+Improvements
+------------
+
+* New, powerful matchers that match items in a dictionary:
+
+ - ``MatchesDict``, match every key in a dictionary with a key in a
+ dictionary of matchers. For when the set of expected keys is equal to the
+ set of observed keys.
+
+ - ``ContainsDict``, every key in a dictionary of matchers must be
+ found in a dictionary, and the values for those keys must match. For when
+ the set of expected keys is a subset of the set of observed keys.
+
+ - ``ContainedByDict``, every key in a dictionary must be found in
+ a dictionary of matchers. For when the set of expected keys is a superset
+ of the set of observed keys.
+
+ The names are a little confusing, sorry. We're still trying to figure out
+ how to present the concept in the simplest way possible.
+
+
+0.9.19
+~~~~~~
+
+How embarrassing! Three releases in two days.
+
+We've worked out the kinks and have confirmation from our downstreams that
+this is all good. Should be the last release for a little while. Please
+ignore 0.9.18 and 0.9.17.
+
+Improvements
+------------
+
+* Include the matcher tests in the release, allowing the tests to run and
+ pass from the release tarball. (Jonathan Lange)
+
+* Fix cosmetic test failures in Python 3.3, introduced during release 0.9.17.
+ (Jonathan Lange)
+
+
+0.9.18
+~~~~~~
+
+Due to an oversight, release 0.9.18 did not contain the new
+``testtools.matchers`` package and was thus completely broken. This release
+corrects that, returning us all to normality.
+
+0.9.17
+~~~~~~
+
+This release brings better discover support and Python3.x improvements. There
+are still some test failures on Python3.3 but they are cosmetic - the library
+is as usable there as on any other Python 3 release.
+
+Changes
+-------
+
+* The ``testtools.matchers`` package has been split up. No change to the
+ public interface. (Jonathan Lange)
+
+Improvements
+------------
+
+* ``python -m testtools.run discover . --list`` now works. (Robert Collins)
+
+* Correctly handling of bytes vs text in JSON content type. (Martin [gz])
+
+
+0.9.16
+~~~~~~
+
+Some new matchers and a new content helper for JSON content.
+
+This is the first release of testtools to drop support for Python 2.4 and 2.5.
+If you need support for either of those versions, please use testtools 0.9.15.
+
+Improvements
+------------
+
+* New content helper, ``json_content`` (Jonathan Lange)
+
+* New matchers:
+
+ * ``ContainsAll`` for asserting one thing is a subset of another
+ (Raphaël Badin)
+
+ * ``SameMembers`` for asserting two iterators have the same members.
+ (Jonathan Lange)
+
+* Reraising of exceptions in Python 3 is more reliable. (Martin [gz])
+
+
+0.9.15
+~~~~~~
+
+This is the last release to support Python2.4 and 2.5. It brings in a slew of
+improvements to test tagging and concurrency, making running large test suites
+with partitioned workers more reliable and easier to reproduce exact test
+ordering in a given worker. See our sister project ``testrepository`` for a
+test runner that uses these features.
+
+Changes
+-------
+
+* ``PlaceHolder`` and ``ErrorHolder`` now support being given result details.
+ (Robert Collins)
+
+* ``ErrorHolder`` is now just a function - all the logic is in ``PlaceHolder``.
+ (Robert Collins)
+
+* ``TestResult`` and all other ``TestResult``-like objects in testtools
+ distinguish between global tags and test-local tags, as per the subunit
+ specification. (Jonathan Lange)
+
+* This is the **last** release of testtools that supports Python 2.4 or 2.5.
+ These releases are no longer supported by the Python community and do not
+ receive security updates. If this affects you, you will need to either
+ stay on this release or perform your own backports.
+ (Jonathan Lange, Robert Collins)
+
+* ``ThreadsafeForwardingResult`` now forwards global tags as test-local tags,
+ making reasoning about the correctness of the multiplexed stream simpler.
+ This preserves the semantic value (what tags apply to a given test) while
+ consuming less stream size (as no negative-tag statement is needed).
+ (Robert Collins, Gary Poster, #986434)
+
+Improvements
+------------
+
+* API documentation corrections. (Raphaël Badin)
+
+* ``ConcurrentTestSuite`` now takes an optional ``wrap_result`` parameter
+ that can be used to wrap the ``ThreadsafeForwardingResults`` created by
+ the suite. (Jonathan Lange)
+
+* ``Tagger`` added. It's a new ``TestResult`` that tags all tests sent to
+ it with a particular set of tags. (Jonathan Lange)
+
+* ``testresultdecorator`` brought over from subunit. (Jonathan Lange)
+
+* All ``TestResult`` wrappers now correctly forward ``current_tags`` from
+ their wrapped results, meaning that ``current_tags`` can always be relied
+ upon to return the currently active tags on a test result.
+
+* ``TestByTestResult``, a ``TestResult`` that calls a method once per test,
+ added. (Jonathan Lange)
+
+* ``ThreadsafeForwardingResult`` correctly forwards ``tags()`` calls where
+ only one of ``new_tags`` or ``gone_tags`` are specified.
+ (Jonathan Lange, #980263)
+
+* ``ThreadsafeForwardingResult`` no longer leaks local tags from one test
+ into all future tests run. (Jonathan Lange, #985613)
+
+* ``ThreadsafeForwardingResult`` has many, many more tests. (Jonathan Lange)
+
+
+0.9.14
+~~~~~~
+
+Our sister project, `subunit <https://launchpad.net/subunit>`_, was using a
+private API that was deleted in the 0.9.13 release. This release restores
+that API in order to smooth out the upgrade path.
+
+If you don't use subunit, then this release won't matter very much to you.
+
+
+0.9.13
+~~~~~~
+
+Plenty of new matchers and quite a few critical bug fixes (especially to do
+with stack traces from failed assertions). A net win for all.
+
+Changes
+-------
+
+* ``MatchesAll`` now takes an ``first_only`` keyword argument that changes how
+ mismatches are displayed. If you were previously passing matchers to
+ ``MatchesAll`` with keyword arguments, then this change might affect your
+ test results. (Jonathan Lange)
+
+Improvements
+------------
+
+* Actually hide all of the testtools stack for assertion failures. The
+ previous release promised clean stack, but now we actually provide it.
+ (Jonathan Lange, #854769)
+
+* ``assertRaises`` now includes the ``repr`` of the callable that failed to raise
+ properly. (Jonathan Lange, #881052)
+
+* Asynchronous tests no longer hang when run with trial.
+ (Jonathan Lange, #926189)
+
+* ``Content`` objects now have an ``as_text`` method to convert their contents
+ to Unicode text. (Jonathan Lange)
+
+* Failed equality assertions now line up. (Jonathan Lange, #879339)
+
+* ``FullStackRunTest`` no longer aborts the test run if a test raises an
+ error. (Jonathan Lange)
+
+* ``MatchesAll`` and ``MatchesListwise`` both take a ``first_only`` keyword
+ argument. If True, they will report only on the first mismatch they find,
+ and not continue looking for other possible mismatches.
+ (Jonathan Lange)
+
+* New helper, ``Nullary`` that turns callables with arguments into ones that
+ don't take arguments. (Jonathan Lange)
+
+* New matchers:
+
+ * ``DirContains`` matches the contents of a directory.
+ (Jonathan Lange, James Westby)
+
+ * ``DirExists`` matches if a directory exists.
+ (Jonathan Lange, James Westby)
+
+ * ``FileContains`` matches the contents of a file.
+ (Jonathan Lange, James Westby)
+
+ * ``FileExists`` matches if a file exists.
+ (Jonathan Lange, James Westby)
+
+ * ``HasPermissions`` matches the permissions of a file. (Jonathan Lange)
+
+ * ``MatchesPredicate`` matches if a predicate is true. (Jonathan Lange)
+
+ * ``PathExists`` matches if a path exists. (Jonathan Lange, James Westby)
+
+ * ``SamePath`` matches if two paths are the same. (Jonathan Lange)
+
+ * ``TarballContains`` matches the contents of a tarball. (Jonathan Lange)
+
+* ``MultiTestResult`` supports the ``tags`` method.
+ (Graham Binns, Francesco Banconi, #914279)
+
+* ``ThreadsafeForwardingResult`` supports the ``tags`` method.
+ (Graham Binns, Francesco Banconi, #914279)
+
+* ``ThreadsafeForwardingResult`` no longer includes semaphore acquisition time
+ in the test duration (for implicitly timed test runs).
+ (Robert Collins, #914362)
+
0.9.12
~~~~~~
diff --git a/test/3rdparty/testtools-0.9.12/PKG-INFO b/test/3rdparty/testtools-0.9.34/PKG-INFO
index 7dbace779b0..8bb756609df 100644
--- a/test/3rdparty/testtools-0.9.12/PKG-INFO
+++ b/test/3rdparty/testtools-0.9.34/PKG-INFO
@@ -1,8 +1,8 @@
-Metadata-Version: 1.0
+Metadata-Version: 1.1
Name: testtools
-Version: 0.9.12
+Version: 0.9.34
Summary: Extensions to the Python standard library unit testing framework
-Home-page: https://launchpad.net/testtools
+Home-page: https://github.com/testing-cabal/testtools
Author: Jonathan M. Lange
Author-email: jml+testtools@mumak.net
License: UNKNOWN
@@ -13,7 +13,7 @@ Description: ======================================
testtools is a set of extensions to the Python standard library's unit testing
framework. These extensions have been derived from many years of experience
with unit testing in Python and come from many different sources. testtools
- also ports recent unittest changes all the way back to Python 2.4.
+ supports Python versions all the way back to Python 2.6.
What better way to start than with a contrived code snippet?::
@@ -101,7 +101,13 @@ Description: ======================================
--------------------------
testtools gives you the very latest in unit testing technology in a way that
- will work with Python 2.4, 2.5, 2.6, 2.7 and 3.1.
+ will work with Python 2.6, 2.7, 3.1 and 3.2.
+
+ If you wish to use testtools with Python 2.4 or 2.5, then please use testtools
+ 0.9.15. Up to then we supported Python 2.4 and 2.5, but we found the
+ constraints involved in not using the newer language features onerous as we
+ added more support for versions post Python 3.
Platform: UNKNOWN
Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
diff --git a/test/3rdparty/testtools-0.9.12/README b/test/3rdparty/testtools-0.9.34/README.rst
index 78397de85b6..cddb5942e18 100644
--- a/test/3rdparty/testtools-0.9.12/README
+++ b/test/3rdparty/testtools-0.9.34/README.rst
@@ -31,7 +31,13 @@ under the same license as Python, see LICENSE for details.
Required Dependencies
---------------------
- * Python 2.4+ or 3.0+
+ * Python 2.6+ or 3.0+
+
+If you would like to use testtools for earlier Python's, please use testtools
+0.9.15.
+
+ * extras (helpers that we intend to push into Python itself in the near
+ future).
Optional Dependencies
@@ -49,8 +55,8 @@ Bug reports and patches
-----------------------
Please report bugs using Launchpad at <https://bugs.launchpad.net/testtools>.
-Patches can also be submitted via Launchpad, or mailed to the author. You can
-mail the author directly at jml@mumak.net.
+Patches should be submitted as Github pull requests, or mailed to the authors.
+See ``doc/hacking.rst`` for more details.
There's no mailing list for this project yet, however the testing-in-python
mailing list may be a useful resource:
diff --git a/test/3rdparty/testtools-0.9.12/doc/Makefile b/test/3rdparty/testtools-0.9.34/doc/Makefile
index b5d07af57f2..b5d07af57f2 100644
--- a/test/3rdparty/testtools-0.9.12/doc/Makefile
+++ b/test/3rdparty/testtools-0.9.34/doc/Makefile
diff --git a/test/3rdparty/testtools-0.9.12/doc/_static/placeholder.txt b/test/3rdparty/testtools-0.9.34/doc/_static/placeholder.txt
index e69de29bb2d..e69de29bb2d 100644
--- a/test/3rdparty/testtools-0.9.12/doc/_static/placeholder.txt
+++ b/test/3rdparty/testtools-0.9.34/doc/_static/placeholder.txt
diff --git a/test/3rdparty/testtools-0.9.12/doc/_templates/placeholder.txt b/test/3rdparty/testtools-0.9.34/doc/_templates/placeholder.txt
index e69de29bb2d..e69de29bb2d 100644
--- a/test/3rdparty/testtools-0.9.12/doc/_templates/placeholder.txt
+++ b/test/3rdparty/testtools-0.9.34/doc/_templates/placeholder.txt
diff --git a/test/3rdparty/testtools-0.9.12/doc/conf.py b/test/3rdparty/testtools-0.9.34/doc/conf.py
index de5fdd4224e..de5fdd4224e 100644
--- a/test/3rdparty/testtools-0.9.12/doc/conf.py
+++ b/test/3rdparty/testtools-0.9.34/doc/conf.py
diff --git a/test/3rdparty/testtools-0.9.34/doc/for-framework-folk.rst b/test/3rdparty/testtools-0.9.34/doc/for-framework-folk.rst
new file mode 100644
index 00000000000..d105b4f04e5
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/doc/for-framework-folk.rst
@@ -0,0 +1,454 @@
+============================
+testtools for framework folk
+============================
+
+Introduction
+============
+
+In addition to having many features :doc:`for test authors
+<for-test-authors>`, testtools also has many bits and pieces that are useful
+for folk who write testing frameworks.
+
+If you are the author of a test runner, are working on a very large
+unit-tested project, are trying to get one testing framework to play nicely
+with another or are hacking away at getting your test suite to run in parallel
+over a heterogenous cluster of machines, this guide is for you.
+
+This manual is a summary. You can get details by consulting the `testtools
+API docs`_.
+
+
+Extensions to TestCase
+======================
+
+In addition to the ``TestCase`` specific methods, we have extensions for
+``TestSuite`` that also apply to ``TestCase`` (because ``TestCase`` and
+``TestSuite`` follow the Composite pattern).
+
+Custom exception handling
+-------------------------
+
+testtools provides a way to control how test exceptions are handled. To do
+this, add a new exception to ``self.exception_handlers`` on a
+``testtools.TestCase``. For example::
+
+ >>> self.exception_handlers.insert(-1, (ExceptionClass, handler)).
+
+Having done this, if any of ``setUp``, ``tearDown``, or the test method raise
+``ExceptionClass``, ``handler`` will be called with the test case, test result
+and the raised exception.
+
+Use this if you want to add a new kind of test result, that is, if you think
+that ``addError``, ``addFailure`` and so forth are not enough for your needs.
+
+
+Controlling test execution
+--------------------------
+
+If you want to control more than just how exceptions are raised, you can
+provide a custom ``RunTest`` to a ``TestCase``. The ``RunTest`` object can
+change everything about how the test executes.
+
+To work with ``testtools.TestCase``, a ``RunTest`` must have a factory that
+takes a test and an optional list of exception handlers. Instances returned
+by the factory must have a ``run()`` method that takes an optional ``TestResult``
+object.
+
+The default is ``testtools.runtest.RunTest``, which calls ``setUp``, the test
+method, ``tearDown`` and clean ups (see :ref:`addCleanup`) in the normal, vanilla
+way that Python's standard unittest_ does.
+
+To specify a ``RunTest`` for all the tests in a ``TestCase`` class, do something
+like this::
+
+ class SomeTests(TestCase):
+ run_tests_with = CustomRunTestFactory
+
+To specify a ``RunTest`` for a specific test in a ``TestCase`` class, do::
+
+ class SomeTests(TestCase):
+ @run_test_with(CustomRunTestFactory, extra_arg=42, foo='whatever')
+ def test_something(self):
+ pass
+
+In addition, either of these can be overridden by passing a factory in to the
+``TestCase`` constructor with the optional ``runTest`` argument.
+
+
+Test renaming
+-------------
+
+``testtools.clone_test_with_new_id`` is a function to copy a test case
+instance to one with a new name. This is helpful for implementing test
+parameterization.
+
+.. _force_failure:
+
+Delayed Test Failure
+--------------------
+
+Setting the ``testtools.TestCase.force_failure`` instance variable to True will
+cause ``testtools.RunTest`` to fail the test case after the test has finished.
+This is useful when you want to cause a test to fail, but don't want to
+prevent the remainder of the test code from being executed.
+
+Test placeholders
+=================
+
+Sometimes, it's useful to be able to add things to a test suite that are not
+actually tests. For example, you might wish to represents import failures
+that occur during test discovery as tests, so that your test result object
+doesn't have to do special work to handle them nicely.
+
+testtools provides two such objects, called "placeholders": ``PlaceHolder``
+and ``ErrorHolder``. ``PlaceHolder`` takes a test id and an optional
+description. When it's run, it succeeds. ``ErrorHolder`` takes a test id,
+and error and an optional short description. When it's run, it reports that
+error.
+
+These placeholders are best used to log events that occur outside the test
+suite proper, but are still very relevant to its results.
+
+e.g.::
+
+ >>> suite = TestSuite()
+ >>> suite.add(PlaceHolder('I record an event'))
+ >>> suite.run(TextTestResult(verbose=True))
+ I record an event [OK]
+
+
+Test instance decorators
+========================
+
+DecorateTestCaseResult
+----------------------
+
+This object calls out to your code when ``run`` / ``__call__`` are called and
+allows the result object that will be used to run the test to be altered. This
+is very useful when working with a test runner that doesn't know your test case
+requirements. For instance, it can be used to inject a ``unittest2`` compatible
+adapter when someone attempts to run your test suite with a ``TestResult`` that
+does not support ``addSkip`` or other ``unittest2`` methods. Similarly it can
+aid the migration to ``StreamResult``.
+
+e.g.::
+
+ >>> suite = TestSuite()
+ >>> suite = DecorateTestCaseResult(suite, ExtendedToOriginalDecorator)
+
+Extensions to TestResult
+========================
+
+StreamResult
+------------
+
+``StreamResult`` is a new API for dealing with test case progress that supports
+concurrent and distributed testing without the various issues that
+``TestResult`` has such as buffering in multiplexers.
+
+The design has several key principles:
+
+* Nothing that requires up-front knowledge of all tests.
+
+* Deal with tests running in concurrent environments, potentially distributed
+ across multiple processes (or even machines). This implies allowing multiple
+ tests to be active at once, supplying time explicitly, being able to
+ differentiate between tests running in different contexts and removing any
+ assumption that tests are necessarily in the same process.
+
+* Make the API as simple as possible - each aspect should do one thing well.
+
+The ``TestResult`` API this is intended to replace has three different clients.
+
+* Each executing ``TestCase`` notifies the ``TestResult`` about activity.
+
+* The testrunner running tests uses the API to find out whether the test run
+ had errors, how many tests ran and so on.
+
+* Finally, each ``TestCase`` queries the ``TestResult`` to see whether the test
+ run should be aborted.
+
+With ``StreamResult`` we need to be able to provide a ``TestResult`` compatible
+adapter (``StreamToExtendedDecorator``) to allow incremental migration.
+However, we don't need to conflate things long term. So - we define three
+separate APIs, and merely mix them together to provide the
+``StreamToExtendedDecorator``. ``StreamResult`` is the first of these APIs -
+meeting the needs of ``TestCase`` clients. It handles events generated by
+running tests. See the API documentation for ``testtools.StreamResult`` for
+details.
+
+StreamSummary
+-------------
+
+Secondly we define the ``StreamSummary`` API which takes responsibility for
+collating errors, detecting incomplete tests and counting tests. This provides
+a compatible API with those aspects of ``TestResult``. Again, see the API
+documentation for ``testtools.StreamSummary``.
+
+TestControl
+-----------
+
+Lastly we define the ``TestControl`` API which is used to provide the
+``shouldStop`` and ``stop`` elements from ``TestResult``. Again, see the API
+documentation for ``testtools.TestControl``. ``TestControl`` can be paired with
+a ``StreamFailFast`` to trigger aborting a test run when a failure is observed.
+Aborting multiple workers in a distributed environment requires hooking
+whatever signalling mechanism the distributed environment has up to a
+``TestControl`` in each worker process.
+
+StreamTagger
+------------
+
+A ``StreamResult`` filter that adds or removes tags from events::
+
+ >>> from testtools import StreamTagger
+ >>> sink = StreamResult()
+ >>> result = StreamTagger([sink], set(['add']), set(['discard']))
+ >>> result.startTestRun()
+ >>> # Run tests against result here.
+ >>> result.stopTestRun()
+
+StreamToDict
+------------
+
+A simplified API for dealing with ``StreamResult`` streams. Each test is
+buffered until it completes and then reported as a trivial dict. This makes
+writing analysers very easy - you can ignore all the plumbing and just work
+with the result. e.g.::
+
+ >>> from testtools import StreamToDict
+ >>> def handle_test(test_dict):
+ ... print(test_dict['id'])
+ >>> result = StreamToDict(handle_test)
+ >>> result.startTestRun()
+ >>> # Run tests against result here.
+ >>> # At stopTestRun() any incomplete buffered tests are announced.
+ >>> result.stopTestRun()
+
+ExtendedToStreamDecorator
+-------------------------
+
+This is a hybrid object that combines both the ``Extended`` and ``Stream``
+``TestResult`` APIs into one class, but only emits ``StreamResult`` events.
+This is useful when a ``StreamResult`` stream is desired, but you cannot
+be sure that the tests which will run have been updated to the ``StreamResult``
+API.
+
+StreamToExtendedDecorator
+-------------------------
+
+This is a simple converter that emits the ``ExtendedTestResult`` API in
+response to events from the ``StreamResult`` API. Useful when outputting
+``StreamResult`` events from a ``TestCase`` but the supplied ``TestResult``
+does not support the ``status`` and ``file`` methods.
+
+StreamToQueue
+-------------
+
+This is a ``StreamResult`` decorator for reporting tests from multiple threads
+at once. Each method submits an event to a supplied Queue object as a simple
+dict. See ``ConcurrentStreamTestSuite`` for a convenient way to use this.
+
+TimestampingStreamResult
+------------------------
+
+This is a ``StreamResult`` decorator for adding timestamps to events that lack
+them. This allows writing the simplest possible generators of events and
+passing the events via this decorator to get timestamped data. As long as
+no buffering/queueing or blocking happen before the timestamper sees the event
+the timestamp will be as accurate as if the original event had it.
+
+StreamResultRouter
+------------------
+
+This is a ``StreamResult`` which forwards events to an arbitrary set of target
+``StreamResult`` objects. Events that have no forwarding rule are passed onto
+an fallback ``StreamResult`` for processing. The mapping can be changed at
+runtime, allowing great flexibility and responsiveness to changes. Because
+The mapping can change dynamically and there could be the same recipient for
+two different maps, ``startTestRun`` and ``stopTestRun`` handling is fine
+grained and up to the user.
+
+If no fallback has been supplied, an unroutable event will raise an exception.
+
+For instance::
+
+ >>> router = StreamResultRouter()
+ >>> sink = doubles.StreamResult()
+ >>> router.add_rule(sink, 'route_code_prefix', route_prefix='0',
+ ... consume_route=True)
+ >>> router.status(test_id='foo', route_code='0/1', test_status='uxsuccess')
+
+Would remove the ``0/`` from the route_code and forward the event like so::
+
+ >>> sink.status('test_id=foo', route_code='1', test_status='uxsuccess')
+
+See ``pydoc testtools.StreamResultRouter`` for details.
+
+TestResult.addSkip
+------------------
+
+This method is called on result objects when a test skips. The
+``testtools.TestResult`` class records skips in its ``skip_reasons`` instance
+dict. The can be reported on in much the same way as succesful tests.
+
+
+TestResult.time
+---------------
+
+This method controls the time used by a ``TestResult``, permitting accurate
+timing of test results gathered on different machines or in different threads.
+See pydoc testtools.TestResult.time for more details.
+
+
+ThreadsafeForwardingResult
+--------------------------
+
+A ``TestResult`` which forwards activity to another test result, but synchronises
+on a semaphore to ensure that all the activity for a single test arrives in a
+batch. This allows simple TestResults which do not expect concurrent test
+reporting to be fed the activity from multiple test threads, or processes.
+
+Note that when you provide multiple errors for a single test, the target sees
+each error as a distinct complete test.
+
+
+MultiTestResult
+---------------
+
+A test result that dispatches its events to many test results. Use this
+to combine multiple different test result objects into one test result object
+that can be passed to ``TestCase.run()`` or similar. For example::
+
+ a = TestResult()
+ b = TestResult()
+ combined = MultiTestResult(a, b)
+ combined.startTestRun() # Calls a.startTestRun() and b.startTestRun()
+
+Each of the methods on ``MultiTestResult`` will return a tuple of whatever the
+component test results return.
+
+
+TestResultDecorator
+-------------------
+
+Not strictly a ``TestResult``, but something that implements the extended
+``TestResult`` interface of testtools. It can be subclassed to create objects
+that wrap ``TestResults``.
+
+
+TextTestResult
+--------------
+
+A ``TestResult`` that provides a text UI very similar to the Python standard
+library UI. Key differences are that its supports the extended outcomes and
+details API, and is completely encapsulated into the result object, permitting
+it to be used without a 'TestRunner' object. Not all the Python 2.7 outcomes
+are displayed (yet). It is also a 'quiet' result with no dots or verbose mode.
+These limitations will be corrected soon.
+
+
+ExtendedToOriginalDecorator
+---------------------------
+
+Adapts legacy ``TestResult`` objects, such as those found in older Pythons, to
+meet the testtools ``TestResult`` API.
+
+
+Test Doubles
+------------
+
+In testtools.testresult.doubles there are three test doubles that testtools
+uses for its own testing: ``Python26TestResult``, ``Python27TestResult``,
+``ExtendedTestResult``. These TestResult objects implement a single variation of
+the TestResult API each, and log activity to a list ``self._events``. These are
+made available for the convenience of people writing their own extensions.
+
+
+startTestRun and stopTestRun
+----------------------------
+
+Python 2.7 added hooks ``startTestRun`` and ``stopTestRun`` which are called
+before and after the entire test run. 'stopTestRun' is particularly useful for
+test results that wish to produce summary output.
+
+``testtools.TestResult`` provides default ``startTestRun`` and ``stopTestRun``
+methods, and he default testtools runner will call these methods
+appropriately.
+
+The ``startTestRun`` method will reset any errors, failures and so forth on
+the result, making the result object look as if no tests have been run.
+
+
+Extensions to TestSuite
+=======================
+
+ConcurrentTestSuite
+-------------------
+
+A TestSuite for parallel testing. This is used in conjuction with a helper that
+runs a single suite in some parallel fashion (for instance, forking, handing
+off to a subprocess, to a compute cloud, or simple threads).
+ConcurrentTestSuite uses the helper to get a number of separate runnable
+objects with a run(result), runs them all in threads using the
+ThreadsafeForwardingResult to coalesce their activity.
+
+ConcurrentStreamTestSuite
+-------------------------
+
+A variant of ConcurrentTestSuite that uses the new StreamResult API instead of
+the TestResult API. ConcurrentStreamTestSuite coordinates running some number
+of test/suites concurrently, with one StreamToQueue per test/suite.
+
+Each test/suite gets given its own ExtendedToStreamDecorator +
+TimestampingStreamResult wrapped StreamToQueue instance, forwarding onto the
+StreamResult that ConcurrentStreamTestSuite.run was called with.
+
+ConcurrentStreamTestSuite is a thin shim and it is easy to implement your own
+specialised form if that is needed.
+
+FixtureSuite
+------------
+
+A test suite that sets up a fixture_ before running any tests, and then tears
+it down after all of the tests are run. The fixture is *not* made available to
+any of the tests due to there being no standard channel for suites to pass
+information to the tests they contain (and we don't have enough data on what
+such a channel would need to achieve to design a good one yet - or even decide
+if it is a good idea).
+
+sorted_tests
+------------
+
+Given the composite structure of TestSuite / TestCase, sorting tests is
+problematic - you can't tell what functionality is embedded into custom Suite
+implementations. In order to deliver consistent test orders when using test
+discovery (see http://bugs.python.org/issue16709), testtools flattens and
+sorts tests that have the standard TestSuite, and defines a new method
+sort_tests, which can be used by non-standard TestSuites to know when they
+should sort their tests. An example implementation can be seen at
+``FixtureSuite.sorted_tests``.
+
+If there are duplicate test ids in a suite, ValueError will be raised.
+
+filter_by_ids
+-------------
+
+Similarly to ``sorted_tests`` running a subset of tests is problematic - the
+standard run interface provides no way to limit what runs. Rather than
+confounding the two problems (selection and execution) we defined a method
+that filters the tests in a suite (or a case) by their unique test id.
+If you a writing custom wrapping suites, consider implementing filter_by_ids
+to support this (though most wrappers that subclass ``unittest.TestSuite`` will
+work just fine [see ``testtools.testsuite.filter_by_ids`` for details.]
+
+Extensions to TestRunner
+========================
+
+To facilitate custom listing of tests, ``testtools.run.TestProgram`` attempts
+to call ``list`` on the ``TestRunner``, falling back to a generic
+implementation if it is not present.
+
+.. _`testtools API docs`: http://mumak.net/testtools/apidocs/
+.. _unittest: http://docs.python.org/library/unittest.html
+.. _fixture: http://pypi.python.org/pypi/fixtures
diff --git a/test/3rdparty/testtools-0.9.12/doc/for-test-authors.rst b/test/3rdparty/testtools-0.9.34/doc/for-test-authors.rst
index 04c4be6b0db..03849e65181 100644
--- a/test/3rdparty/testtools-0.9.12/doc/for-test-authors.rst
+++ b/test/3rdparty/testtools-0.9.34/doc/for-test-authors.rst
@@ -163,7 +163,8 @@ The first argument to ``ExpectedException`` is the type of exception you
expect to see raised. The second argument is optional, and can be either a
regular expression or a matcher. If it is a regular expression, the ``str()``
of the raised exception must match the regular expression. If it is a matcher,
-then the raised exception object must match it.
+then the raised exception object must match it. The optional third argument
+``msg`` will cause the raised error to be annotated with that message.
assertIn, assertNotIn
@@ -432,7 +433,7 @@ example::
def test_keys_equal(self):
x = {'a': 1, 'b': 2}
y = {'a': 2, 'b': 3}
- self.assertThat(a, KeysEqual(b))
+ self.assertThat(x, KeysEqual(y))
MatchesRegex
@@ -445,6 +446,122 @@ be able to do, if you think about it::
self.assertThat('foo', MatchesRegex('fo+'))
+HasLength
+~~~~~~~~~
+
+Check the length of a collection. The following assertion will fail::
+
+ self.assertThat([1, 2, 3], HasLength(2))
+
+But this one won't::
+
+ self.assertThat([1, 2, 3], HasLength(3))
+
+
+File- and path-related matchers
+-------------------------------
+
+testtools also has a number of matchers to help with asserting things about
+the state of the filesystem.
+
+PathExists
+~~~~~~~~~~
+
+Matches if a path exists::
+
+ self.assertThat('/', PathExists())
+
+
+DirExists
+~~~~~~~~~
+
+Matches if a path exists and it refers to a directory::
+
+ # This will pass on most Linux systems.
+ self.assertThat('/home/', DirExists())
+ # This will not
+ self.assertThat('/home/jml/some-file.txt', DirExists())
+
+
+FileExists
+~~~~~~~~~~
+
+Matches if a path exists and it refers to a file (as opposed to a directory)::
+
+ # This will pass on most Linux systems.
+ self.assertThat('/bin/true', FileExists())
+ # This will not.
+ self.assertThat('/home/', FileExists())
+
+
+DirContains
+~~~~~~~~~~~
+
+Matches if the given directory contains the specified files and directories.
+Say we have a directory ``foo`` that has the files ``a``, ``b`` and ``c``,
+then::
+
+ self.assertThat('foo', DirContains(['a', 'b', 'c']))
+
+will match, but::
+
+ self.assertThat('foo', DirContains(['a', 'b']))
+
+will not.
+
+The matcher sorts both the input and the list of names we get back from the
+filesystem.
+
+You can use this in a more advanced way, and match the sorted directory
+listing against an arbitrary matcher::
+
+ self.assertThat('foo', DirContains(matcher=Contains('a')))
+
+
+FileContains
+~~~~~~~~~~~~
+
+Matches if the given file has the specified contents. Say there's a file
+called ``greetings.txt`` with the contents, ``Hello World!``::
+
+ self.assertThat('greetings.txt', FileContains("Hello World!"))
+
+will match.
+
+You can also use this in a more advanced way, and match the contents of the
+file against an arbitrary matcher::
+
+ self.assertThat('greetings.txt', FileContains(matcher=Contains('!')))
+
+
+HasPermissions
+~~~~~~~~~~~~~~
+
+Used for asserting that a file or directory has certain permissions. Uses
+octal-mode permissions for both input and matching. For example::
+
+ self.assertThat('/tmp', HasPermissions('1777'))
+ self.assertThat('id_rsa', HasPermissions('0600'))
+
+This is probably more useful on UNIX systems than on Windows systems.
+
+
+SamePath
+~~~~~~~~
+
+Matches if two paths actually refer to the same thing. The paths don't have
+to exist, but if they do exist, ``SamePath`` will resolve any symlinks.::
+
+ self.assertThat('somefile', SamePath('childdir/../somefile'))
+
+
+TarballContains
+~~~~~~~~~~~~~~~
+
+Matches the contents of a tarball. In many ways, much like ``DirContains``,
+but instead of matching on ``os.listdir`` matches on ``TarFile.getnames``.
+
+
Combining matchers
------------------
@@ -509,7 +626,7 @@ matching. This can be used to aid in creating trivial matchers as functions, for
example::
def test_after_preprocessing_example(self):
- def HasFileContent(content):
+ def PathHasFileContent(content):
def _read(path):
return open(path).read()
return AfterPreprocessing(_read, Equals(content))
@@ -550,7 +667,11 @@ more information in error messages is a big help.
The second reason is that it is sometimes useful to give a name to a set of
matchers. ``has_und_at_both_ends`` is a bit contrived, of course, but it is
-clear.
+clear. The ``FileExists`` and ``DirExists`` matchers included in testtools
+are perhaps better real examples.
+
+If you want only the first mismatch to be reported, pass ``first_only=True``
+as a keyword parameter to ``MatchesAll``.
MatchesAny
@@ -595,6 +716,9 @@ For example::
This is useful for writing custom, domain-specific matchers.
+If you want only the first mismatch to be reported, pass ``first_only=True``
+to ``MatchesListwise``.
+
MatchesSetwise
~~~~~~~~~~~~~~
@@ -645,6 +769,59 @@ object must equal each attribute of the example object. For example::
is exactly equivalent to ``matcher`` in the previous example.
+MatchesPredicate
+~~~~~~~~~~~~~~~~
+
+Sometimes, all you want to do is create a matcher that matches if a given
+function returns True, and mismatches if it returns False.
+
+For example, you might have an ``is_prime`` function and want to make a
+matcher based on it::
+
+ def test_prime_numbers(self):
+ IsPrime = MatchesPredicate(is_prime, '%s is not prime.')
+ self.assertThat(7, IsPrime)
+ self.assertThat(1983, IsPrime)
+ # This will fail.
+ self.assertThat(42, IsPrime)
+
+Which will produce the error message::
+
+ Traceback (most recent call last):
+ File "...", line ..., in test_prime_numbers
+ self.assertThat(42, IsPrime)
+ MismatchError: 42 is not prime.
+
+
+MatchesPredicateWithParams
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sometimes you can't use a trivial predicate and instead need to pass in some
+parameters each time. In that case, MatchesPredicateWithParams is your go-to
+tool for creating ad hoc matchers. MatchesPredicateWithParams takes a predicate
+function and message and returns a factory to produce matchers from that. The
+predicate needs to return a boolean (or any truthy object), and accept the
+object to match + whatever was passed into the factory.
+
+For example, you might have an ``divisible`` function and want to make a
+matcher based on it::
+
+ def test_divisible_numbers(self):
+ IsDivisibleBy = MatchesPredicateWithParams(
+ divisible, '{0} is not divisible by {1}')
+ self.assertThat(7, IsDivisibleBy(1))
+ self.assertThat(7, IsDivisibleBy(7))
+ self.assertThat(7, IsDivisibleBy(2)))
+ # This will fail.
+
+Which will produce the error message::
+
+ Traceback (most recent call last):
+ File "...", line ..., in test_divisible
+ self.assertThat(7, IsDivisibleBy(2))
+ MismatchError: 7 is not divisible by 2.
+
+
Raises
~~~~~~
@@ -703,9 +880,9 @@ returns a non-None value. For example::
def test_is_divisible_by_example(self):
# This succeeds, since IsDivisibleBy(5).match(10) returns None.
- self.assertThat(10, IsDivisbleBy(5))
+ self.assertThat(10, IsDivisibleBy(5))
# This fails, since IsDivisibleBy(7).match(10) returns a mismatch.
- self.assertThat(10, IsDivisbleBy(7))
+ self.assertThat(10, IsDivisibleBy(7))
The mismatch is responsible for what sort of error message the failing test
generates. Here's an example mismatch::
@@ -1066,6 +1243,13 @@ Here are some tips for converting your Trial tests into testtools tests.
``AsynchronousDeferredRunTest`` does not. If you rely on this behavior, use
``AsynchronousDeferredRunTestForBrokenTwisted``.
+force_failure
+-------------
+
+Setting the ``testtools.TestCase.force_failure`` instance variable to ``True``
+will cause the test to be marked as a failure, but won't stop the test code
+from running (see :ref:`force_failure`).
+
Test helpers
============
@@ -1145,6 +1329,29 @@ details of certain variables don't actually matter.
See pages 419-423 of `xUnit Test Patterns`_ by Gerard Meszaros for a detailed
discussion of creation methods.
+Test attributes
+---------------
+
+Inspired by the ``nosetests`` ``attr`` plugin, testtools provides support for
+marking up test methods with attributes, which are then exposed in the test
+id and can be used when filtering tests by id. (e.g. via ``--load-list``)::
+
+ from testtools.testcase import attr, WithAttributes
+
+ class AnnotatedTests(WithAttributes, TestCase):
+
+ @attr('simple')
+ def test_one(self):
+ pass
+
+ @attr('more', 'than', 'one)
+ def test_two(self):
+ pass
+
+ @attr('or')
+ @attr('stacked')
+ def test_three(self):
+ pass
General helpers
===============
@@ -1153,7 +1360,7 @@ Conditional imports
-------------------
Lots of the time we would like to conditionally import modules. testtools
-needs to do this itself, and graciously extends the ability to its users.
+uses the small library extras to do this. This used to be part of testtools.
Instead of::
@@ -1182,9 +1389,30 @@ You can do::
Safe attribute testing
----------------------
-``hasattr`` is broken_ on many versions of Python. testtools provides
-``safe_hasattr``, which can be used to safely test whether an object has a
-particular attribute.
+``hasattr`` is broken_ on many versions of Python. The helper ``safe_hasattr``
+can be used to safely test whether an object has a particular attribute. Like
+``try_import`` this used to be in testtools but is now in extras.
+
+
+Nullary callables
+-----------------
+
+Sometimes you want to be able to pass around a function with the arguments
+already specified. The normal way of doing this in Python is::
+
+ nullary = lambda: f(*args, **kwargs)
+ nullary()
+
+Which is mostly good enough, but loses a bit of debugging information. If you
+take the ``repr()`` of ``nullary``, you're only told that it's a lambda, and
+you get none of the juicy meaning that you'd get from the ``repr()`` of ``f``.
+
+The solution is to use ``Nullary`` instead::
+
+ nullary = Nullary(f, *args, **kwargs)
+ nullary()
+
+Here, ``repr(nullary)`` will be the same as ``repr(f)``.
.. _testrepository: https://launchpad.net/testrepository
diff --git a/test/3rdparty/testtools-0.9.12/doc/hacking.rst b/test/3rdparty/testtools-0.9.34/doc/hacking.rst
index b9f5ff22c6c..6434e36c535 100644
--- a/test/3rdparty/testtools-0.9.12/doc/hacking.rst
+++ b/test/3rdparty/testtools-0.9.34/doc/hacking.rst
@@ -8,9 +8,7 @@ Coding style
In general, follow `PEP 8`_ except where consistency with the standard
library's unittest_ module would suggest otherwise.
-testtools supports Python 2.4 and later, including Python 3, so avoid any
-2.5-only features like the ``with`` statement.
-
+testtools currently supports Python 2.6 and later, including Python 3.
Copyright assignment
--------------------
@@ -51,6 +49,21 @@ is often useful to see all levels of the stack. To do this, add
``run_tests_with = FullStackRunTest`` to the top of a test's class definition.
+Discussion
+----------
+
+When submitting a patch, it will help the review process a lot if there's a
+clear explanation of what the change does and why you think the change is a
+good idea. For crasher bugs, this is generally a no-brainer, but for UI bugs
+& API tweaks, the reason something is an improvement might not be obvious, so
+it's worth spelling out.
+
+If you are thinking of implementing a new feature, you might want to have that
+discussion on the [mailing list](testtools-dev@lists.launchpad.net) before the
+patch goes up for review. This is not at all mandatory, but getting feedback
+early can help avoid dead ends.
+
+
Documentation
-------------
@@ -65,7 +78,7 @@ Source layout
-------------
The top-level directory contains the ``testtools/`` package directory, and
-miscellaneous files like ``README`` and ``setup.py``.
+miscellaneous files like ``README.rst`` and ``setup.py``.
The ``testtools/`` directory is the Python package itself. It is separated
into submodules for internal clarity, but all public APIs should be “promoted”
@@ -80,13 +93,13 @@ Tests belong in ``testtools/tests/``.
Committing to trunk
-------------------
-Testtools is maintained using bzr, with its trunk at lp:testtools. This gives
-every contributor the ability to commit their work to their own branches.
-However permission must be granted to allow contributors to commit to the trunk
-branch.
+Testtools is maintained using git, with its master repo at https://github.com
+/testing-cabal/testtools. This gives every contributor the ability to commit
+their work to their own branches. However permission must be granted to allow
+contributors to commit to the trunk branch.
-Commit access to trunk is obtained by joining the testtools-committers
-Launchpad team. Membership in this team is contingent on obeying the testtools
+Commit access to trunk is obtained by joining the `testing-cabal`_, either as an
+Owner or a Committer. Commit access is contingent on obeying the testtools
contribution policy, see `Copyright Assignment`_ above.
@@ -94,16 +107,16 @@ Code Review
-----------
All code must be reviewed before landing on trunk. The process is to create a
-branch in launchpad, and submit it for merging to lp:testtools. It will then
-be reviewed before it can be merged to trunk. It will be reviewed by someone:
+branch on Github, and make a pull request into trunk. It will then be reviewed
+before it can be merged to trunk. It will be reviewed by someone:
* not the author
-* a committer (member of the `~testtools-committers`_ team)
+* a committer
-As a special exception, while the testtools committers team is small and prone
-to blocking, a merge request from a committer that has not been reviewed after
-24 hours may be merged by that committer. When the team is larger this policy
-will be revisited.
+As a special exception, since there are few testtools committers and thus
+reviews are prone to blocking, a pull request from a committer that has not been
+reviewed after 24 hours may be merged by that committer. When the team is larger
+this policy will be revisited.
Code reviewers should look for the quality of what is being submitted,
including conformance with this HACKING file.
@@ -125,30 +138,26 @@ Release tasks
-------------
#. Choose a version number, say X.Y.Z
-#. Branch from trunk to testtools-X.Y.Z
-#. In testtools-X.Y.Z, ensure __init__ has version ``(X, Y, Z, 'final', 0)``
-#. Replace NEXT in NEWS with the version number X.Y.Z, adjusting the reST.
+#. In trunk, ensure __init__ has version ``(X, Y, Z, 'final', 0)``
+#. Under NEXT in NEWS add a heading with the version number X.Y.Z.
#. Possibly write a blurb into NEWS.
-#. Replace any additional references to NEXT with the version being
- released. (There should be none other than the ones in these release tasks
- which should not be replaced).
#. Commit the changes.
-#. Tag the release, bzr tag testtools-X.Y.Z
+#. Tag the release, ``git tag -s testtools-X.Y.Z``
#. Run 'make release', this:
#. Creates a source distribution and uploads to PyPI
#. Ensures all Fix Committed bugs are in the release milestone
#. Makes a release on Launchpad and uploads the tarball
#. Marks all the Fix Committed bugs as Fix Released
#. Creates a new milestone
-#. Merge the release branch testtools-X.Y.Z into trunk. Before the commit,
- add a NEXT heading to the top of NEWS and bump the version in __init__.py.
- Push trunk to Launchpad
+#. Change __version__ in __init__.py to the probable next version.
+ e.g. to ``(X, Y, Z+1, 'dev', 0)``.
+#. Commit 'Opening X.Y.Z+1 for development.'
#. If a new series has been created (e.g. 0.10.0), make the series on Launchpad.
+#. Push trunk to Github, ``git push --tags origin master``
.. _PEP 8: http://www.python.org/dev/peps/pep-0008/
.. _unittest: http://docs.python.org/library/unittest.html
-.. _~testtools-dev: https://launchpad.net/~testtools-dev
.. _MIT license: http://www.opensource.org/licenses/mit-license.php
.. _Sphinx: http://sphinx.pocoo.org/
.. _restructuredtext: http://docutils.sourceforge.net/rst.html
-
+.. _testing-cabal: https://github.com/organizations/testing-cabal/
diff --git a/test/3rdparty/testtools-0.9.12/doc/index.rst b/test/3rdparty/testtools-0.9.34/doc/index.rst
index 4687cebb62b..bac47e43794 100644
--- a/test/3rdparty/testtools-0.9.12/doc/index.rst
+++ b/test/3rdparty/testtools-0.9.34/doc/index.rst
@@ -9,7 +9,10 @@ testtools: tasteful testing for Python
testtools is a set of extensions to the Python standard library's unit testing
framework. These extensions have been derived from many years of experience
with unit testing in Python and come from many different sources. testtools
-also ports recent unittest changes all the way back to Python 2.4.
+also ports recent unittest changes all the way back to Python 2.4. The next
+release of testtools will change that to support versions that are maintained
+by the Python community instead, to allow the use of modern language features
+within testtools.
Contents:
diff --git a/test/3rdparty/testtools-0.9.12/doc/make.bat b/test/3rdparty/testtools-0.9.34/doc/make.bat
index f8c1fd520ab..f8c1fd520ab 100644
--- a/test/3rdparty/testtools-0.9.12/doc/make.bat
+++ b/test/3rdparty/testtools-0.9.34/doc/make.bat
diff --git a/test/3rdparty/testtools-0.9.12/doc/overview.rst b/test/3rdparty/testtools-0.9.34/doc/overview.rst
index e43265fd1e0..5d9436ffc9e 100644
--- a/test/3rdparty/testtools-0.9.12/doc/overview.rst
+++ b/test/3rdparty/testtools-0.9.34/doc/overview.rst
@@ -5,7 +5,7 @@ testtools: tasteful testing for Python
testtools is a set of extensions to the Python standard library's unit testing
framework. These extensions have been derived from many years of experience
with unit testing in Python and come from many different sources. testtools
-also ports recent unittest changes all the way back to Python 2.4.
+supports Python versions all the way back to Python 2.6.
What better way to start than with a contrived code snippet?::
@@ -93,4 +93,9 @@ Cross-Python compatibility
--------------------------
testtools gives you the very latest in unit testing technology in a way that
-will work with Python 2.4, 2.5, 2.6, 2.7 and 3.1.
+will work with Python 2.6, 2.7, 3.1 and 3.2.
+
+If you wish to use testtools with Python 2.4 or 2.5, then please use testtools
+0.9.15. Up to then we supported Python 2.4 and 2.5, but we found the
+constraints involved in not using the newer language features onerous as we
+added more support for versions post Python 3.
diff --git a/test/3rdparty/testtools-0.9.34/setup.cfg b/test/3rdparty/testtools-0.9.34/setup.cfg
new file mode 100644
index 00000000000..72d49ab47be
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/setup.cfg
@@ -0,0 +1,10 @@
+[test]
+test_module = testtools.tests
+buffer = 1
+catch = 1
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/test/3rdparty/testtools-0.9.12/setup.py b/test/3rdparty/testtools-0.9.34/setup.py
index d07c8f29359..dacbf91e264 100755
--- a/test/3rdparty/testtools-0.9.12/setup.py
+++ b/test/3rdparty/testtools-0.9.34/setup.py
@@ -1,22 +1,24 @@
#!/usr/bin/env python
"""Distutils installer for testtools."""
-from distutils.core import setup
+from setuptools import setup
+from distutils.command.build_py import build_py
import email
import os
+import sys
import testtools
+cmd_class = {}
+if getattr(testtools, 'TestCommand', None) is not None:
+ cmd_class['test'] = testtools.TestCommand
-def get_revno():
- import bzrlib.errors
- import bzrlib.workingtree
- try:
- t = bzrlib.workingtree.WorkingTree.open_containing(__file__)[0]
- except (bzrlib.errors.NotBranchError, bzrlib.errors.NoWorkingTree):
- return None
- else:
- return t.branch.revno()
+class testtools_build_py(build_py):
+ def build_module(self, module, module_file, package):
+ if sys.version_info >= (3,) and module == '_compat2x':
+ return
+ return build_py.build_module(self, module, module_file, package)
+cmd_class['build_py'] = testtools_build_py
def get_version_from_pkg_info():
@@ -43,15 +45,10 @@ def get_version():
pkg_info_version = get_version_from_pkg_info()
if pkg_info_version:
return pkg_info_version
- revno = get_revno()
- if revno is None:
- return "snapshot"
- if phase == 'alpha':
- # No idea what the next version will be
- return 'next-r%s' % revno
- else:
- # Preserve the version number but give it a revno prefix
- return version + '-r%s' % revno
+ # Apparently if we just say "snapshot" then distribute won't accept it
+ # as satisfying versioned dependencies. This is a problem for the
+ # daily build version.
+ return "snapshot-%s" % (version,)
def get_long_description():
@@ -63,11 +60,27 @@ def get_long_description():
setup(name='testtools',
author='Jonathan M. Lange',
author_email='jml+testtools@mumak.net',
- url='https://launchpad.net/testtools',
+ url='https://github.com/testing-cabal/testtools',
description=('Extensions to the Python standard library unit testing '
'framework'),
long_description=get_long_description(),
version=get_version(),
- classifiers=["License :: OSI Approved :: MIT License"],
- packages=['testtools', 'testtools.testresult', 'testtools.tests'],
- cmdclass={'test': testtools.TestCommand})
+ classifiers=["License :: OSI Approved :: MIT License",
+ "Programming Language :: Python :: 3",
+ ],
+ packages=[
+ 'testtools',
+ 'testtools.matchers',
+ 'testtools.testresult',
+ 'testtools.tests',
+ 'testtools.tests.matchers',
+ ],
+ cmdclass=cmd_class,
+ zip_safe=False,
+ install_requires=[
+ 'extras',
+ # 'mimeparse' has not been uploaded by the maintainer with Python3 compat
+ # but someone kindly uploaded a fixed version as 'python-mimeparse'.
+ 'python-mimeparse',
+ ],
+ )
diff --git a/test/3rdparty/testtools-0.9.34/testtools.egg-info/PKG-INFO b/test/3rdparty/testtools-0.9.34/testtools.egg-info/PKG-INFO
new file mode 100644
index 00000000000..8bb756609df
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools.egg-info/PKG-INFO
@@ -0,0 +1,113 @@
+Metadata-Version: 1.1
+Name: testtools
+Version: 0.9.34
+Summary: Extensions to the Python standard library unit testing framework
+Home-page: https://github.com/testing-cabal/testtools
+Author: Jonathan M. Lange
+Author-email: jml+testtools@mumak.net
+License: UNKNOWN
+Description: ======================================
+ testtools: tasteful testing for Python
+ ======================================
+
+ testtools is a set of extensions to the Python standard library's unit testing
+ framework. These extensions have been derived from many years of experience
+ with unit testing in Python and come from many different sources. testtools
+ supports Python versions all the way back to Python 2.6.
+
+ What better way to start than with a contrived code snippet?::
+
+ from testtools import TestCase
+ from testtools.content import Content
+ from testtools.content_type import UTF8_TEXT
+ from testtools.matchers import Equals
+
+ from myproject import SillySquareServer
+
+ class TestSillySquareServer(TestCase):
+
+ def setUp(self):
+ super(TestSillySquare, self).setUp()
+ self.server = self.useFixture(SillySquareServer())
+ self.addCleanup(self.attach_log_file)
+
+ def attach_log_file(self):
+ self.addDetail(
+ 'log-file',
+ Content(UTF8_TEXT
+ lambda: open(self.server.logfile, 'r').readlines()))
+
+ def test_server_is_cool(self):
+ self.assertThat(self.server.temperature, Equals("cool"))
+
+ def test_square(self):
+ self.assertThat(self.server.silly_square_of(7), Equals(49))
+
+
+ Why use testtools?
+ ==================
+
+ Better assertion methods
+ ------------------------
+
+ The standard assertion methods that come with unittest aren't as helpful as
+ they could be, and there aren't quite enough of them. testtools adds
+ ``assertIn``, ``assertIs``, ``assertIsInstance`` and their negatives.
+
+
+ Matchers: better than assertion methods
+ ---------------------------------------
+
+ Of course, in any serious project you want to be able to have assertions that
+ are specific to that project and the particular problem that it is addressing.
+ Rather than forcing you to define your own assertion methods and maintain your
+ own inheritance hierarchy of ``TestCase`` classes, testtools lets you write
+ your own "matchers", custom predicates that can be plugged into a unit test::
+
+ def test_response_has_bold(self):
+ # The response has bold text.
+ response = self.server.getResponse()
+ self.assertThat(response, HTMLContains(Tag('bold', 'b')))
+
+
+ More debugging info, when you need it
+ --------------------------------------
+
+ testtools makes it easy to add arbitrary data to your test result. If you
+ want to know what's in a log file when a test fails, or what the load was on
+ the computer when a test started, or what files were open, you can add that
+ information with ``TestCase.addDetail``, and it will appear in the test
+ results if that test fails.
+
+
+ Extend unittest, but stay compatible and re-usable
+ --------------------------------------------------
+
+ testtools goes to great lengths to allow serious test authors and test
+ *framework* authors to do whatever they like with their tests and their
+ extensions while staying compatible with the standard library's unittest.
+
+ testtools has completely parametrized how exceptions raised in tests are
+ mapped to ``TestResult`` methods and how tests are actually executed (ever
+ wanted ``tearDown`` to be called regardless of whether ``setUp`` succeeds?)
+
+ It also provides many simple but handy utilities, like the ability to clone a
+ test, a ``MultiTestResult`` object that lets many result objects get the
+ results from one test suite, adapters to bring legacy ``TestResult`` objects
+ into our new golden age.
+
+
+ Cross-Python compatibility
+ --------------------------
+
+ testtools gives you the very latest in unit testing technology in a way that
+ will work with Python 2.6, 2.7, 3.1 and 3.2.
+
+ If you wish to use testtools with Python 2.4 or 2.5, then please use testtools
+ 0.9.15. Up to then we supported Python 2.4 and 2.5, but we found the
+ constraints involved in not using the newer language features onerous as we
+ added more support for versions post Python 3.
+
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
diff --git a/test/3rdparty/testtools-0.9.34/testtools.egg-info/SOURCES.txt b/test/3rdparty/testtools-0.9.34/testtools.egg-info/SOURCES.txt
new file mode 100644
index 00000000000..81fc3dd48f8
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools.egg-info/SOURCES.txt
@@ -0,0 +1,84 @@
+.gitignore
+LICENSE
+MANIFEST.in
+Makefile
+NEWS
+README.rst
+setup.cfg
+setup.py
+doc/.hacking.rst.swp
+doc/Makefile
+doc/conf.py
+doc/for-framework-folk.rst
+doc/for-framework-folk.rst~
+doc/for-test-authors.rst
+doc/for-test-authors.rst~
+doc/hacking.rst
+doc/index.rst
+doc/make.bat
+doc/overview.rst
+doc/_static/placeholder.txt
+doc/_templates/placeholder.txt
+testtools/__init__.py
+testtools/_compat2x.py
+testtools/_compat3x.py
+testtools/_spinner.py
+testtools/compat.py
+testtools/content.py
+testtools/content_type.py
+testtools/deferredruntest.py
+testtools/distutilscmd.py
+testtools/helpers.py
+testtools/monkey.py
+testtools/run.py
+testtools/runtest.py
+testtools/tags.py
+testtools/testcase.py
+testtools/testsuite.py
+testtools/utils.py
+testtools.egg-info/PKG-INFO
+testtools.egg-info/SOURCES.txt
+testtools.egg-info/dependency_links.txt
+testtools.egg-info/not-zip-safe
+testtools.egg-info/requires.txt
+testtools.egg-info/top_level.txt
+testtools/matchers/__init__.py
+testtools/matchers/_basic.py
+testtools/matchers/_datastructures.py
+testtools/matchers/_dict.py
+testtools/matchers/_doctest.py
+testtools/matchers/_exception.py
+testtools/matchers/_filesystem.py
+testtools/matchers/_higherorder.py
+testtools/matchers/_impl.py
+testtools/testresult/__init__.py
+testtools/testresult/doubles.py
+testtools/testresult/real.py
+testtools/tests/__init__.py
+testtools/tests/helpers.py
+testtools/tests/test_compat.py
+testtools/tests/test_content.py
+testtools/tests/test_content_type.py
+testtools/tests/test_deferredruntest.py
+testtools/tests/test_distutilscmd.py
+testtools/tests/test_fixturesupport.py
+testtools/tests/test_helpers.py
+testtools/tests/test_monkey.py
+testtools/tests/test_run.py
+testtools/tests/test_runtest.py
+testtools/tests/test_spinner.py
+testtools/tests/test_tags.py
+testtools/tests/test_testcase.py
+testtools/tests/test_testresult.py
+testtools/tests/test_testsuite.py
+testtools/tests/test_with_with.py
+testtools/tests/matchers/__init__.py
+testtools/tests/matchers/helpers.py
+testtools/tests/matchers/test_basic.py
+testtools/tests/matchers/test_datastructures.py
+testtools/tests/matchers/test_dict.py
+testtools/tests/matchers/test_doctest.py
+testtools/tests/matchers/test_exception.py
+testtools/tests/matchers/test_filesystem.py
+testtools/tests/matchers/test_higherorder.py
+testtools/tests/matchers/test_impl.py \ No newline at end of file
diff --git a/test/3rdparty/testtools-0.9.34/testtools.egg-info/dependency_links.txt b/test/3rdparty/testtools-0.9.34/testtools.egg-info/dependency_links.txt
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/test/3rdparty/testtools-0.9.34/testtools.egg-info/not-zip-safe b/test/3rdparty/testtools-0.9.34/testtools.egg-info/not-zip-safe
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/test/3rdparty/testtools-0.9.34/testtools.egg-info/requires.txt b/test/3rdparty/testtools-0.9.34/testtools.egg-info/requires.txt
new file mode 100644
index 00000000000..5786d3459c2
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools.egg-info/requires.txt
@@ -0,0 +1,2 @@
+extras
+python-mimeparse \ No newline at end of file
diff --git a/test/3rdparty/testtools-0.9.34/testtools.egg-info/top_level.txt b/test/3rdparty/testtools-0.9.34/testtools.egg-info/top_level.txt
new file mode 100644
index 00000000000..04da15eb365
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools.egg-info/top_level.txt
@@ -0,0 +1 @@
+testtools
diff --git a/test/3rdparty/testtools-0.9.34/testtools/__init__.py b/test/3rdparty/testtools-0.9.34/testtools/__init__.py
new file mode 100644
index 00000000000..62caae8545a
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/__init__.py
@@ -0,0 +1,125 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Extensions to the standard Python unittest library."""
+
+__all__ = [
+ 'clone_test_with_new_id',
+ 'CopyStreamResult',
+ 'ConcurrentTestSuite',
+ 'ConcurrentStreamTestSuite',
+ 'DecorateTestCaseResult',
+ 'ErrorHolder',
+ 'ExpectedException',
+ 'ExtendedToOriginalDecorator',
+ 'ExtendedToStreamDecorator',
+ 'FixtureSuite',
+ 'iterate_tests',
+ 'MultipleExceptions',
+ 'MultiTestResult',
+ 'PlaceHolder',
+ 'run_test_with',
+ 'Tagger',
+ 'TestCase',
+ 'TestCommand',
+ 'TestByTestResult',
+ 'TestResult',
+ 'TestResultDecorator',
+ 'TextTestResult',
+ 'RunTest',
+ 'skip',
+ 'skipIf',
+ 'skipUnless',
+ 'StreamFailFast',
+ 'StreamResult',
+ 'StreamResultRouter',
+ 'StreamSummary',
+ 'StreamTagger',
+ 'StreamToDict',
+ 'StreamToExtendedDecorator',
+ 'StreamToQueue',
+ 'TestControl',
+ 'ThreadsafeForwardingResult',
+ 'TimestampingStreamResult',
+ 'try_import',
+ 'try_imports',
+ ]
+
+# Compat - removal announced in 0.9.25.
+try:
+ from extras import (
+ try_import,
+ try_imports,
+ )
+except ImportError:
+ # Support reading __init__ for __version__ without extras, because pip does
+ # not support setup_requires.
+ pass
+else:
+
+ from testtools.matchers._impl import (
+ Matcher,
+ )
+# Shut up, pyflakes. We are importing for documentation, not for namespacing.
+ Matcher
+
+ from testtools.runtest import (
+ MultipleExceptions,
+ RunTest,
+ )
+ from testtools.testcase import (
+ DecorateTestCaseResult,
+ ErrorHolder,
+ ExpectedException,
+ PlaceHolder,
+ TestCase,
+ clone_test_with_new_id,
+ run_test_with,
+ skip,
+ skipIf,
+ skipUnless,
+ )
+ from testtools.testresult import (
+ CopyStreamResult,
+ ExtendedToOriginalDecorator,
+ ExtendedToStreamDecorator,
+ MultiTestResult,
+ StreamFailFast,
+ StreamResult,
+ StreamResultRouter,
+ StreamSummary,
+ StreamTagger,
+ StreamToDict,
+ StreamToExtendedDecorator,
+ StreamToQueue,
+ Tagger,
+ TestByTestResult,
+ TestControl,
+ TestResult,
+ TestResultDecorator,
+ TextTestResult,
+ ThreadsafeForwardingResult,
+ TimestampingStreamResult,
+ )
+ from testtools.testsuite import (
+ ConcurrentTestSuite,
+ ConcurrentStreamTestSuite,
+ FixtureSuite,
+ iterate_tests,
+ )
+ from testtools.distutilscmd import (
+ TestCommand,
+ )
+
+# same format as sys.version_info: "A tuple containing the five components of
+# the version number: major, minor, micro, releaselevel, and serial. All
+# values except releaselevel are integers; the release level is 'alpha',
+# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
+# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
+# releaselevel of 'dev' for unreleased under-development code.
+#
+# If the releaselevel is 'alpha' then the major/minor/micro components are not
+# established at this point, and setup.py will use a version of next-$(revno).
+# If the releaselevel is 'final', then the tarball will be major.minor.micro.
+# Otherwise it is major.minor.micro~$(revno).
+
+__version__ = (0, 9, 34, 'final', 0)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/_compat2x.py b/test/3rdparty/testtools-0.9.34/testtools/_compat2x.py
index 2b25c13e081..2b25c13e081 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/_compat2x.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/_compat2x.py
diff --git a/test/3rdparty/testtools-0.9.12/testtools/_compat3x.py b/test/3rdparty/testtools-0.9.34/testtools/_compat3x.py
index f3d569662da..7a482c14b43 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/_compat3x.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/_compat3x.py
@@ -13,5 +13,5 @@ __all__ = [
def reraise(exc_class, exc_obj, exc_tb, _marker=object()):
"""Re-raise an exception received from sys.exc_info() or similar."""
- raise exc_class(*exc_obj.args).with_traceback(exc_tb)
+ raise exc_obj.with_traceback(exc_tb)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/_spinner.py b/test/3rdparty/testtools-0.9.34/testtools/_spinner.py
index baf455a5f94..baf455a5f94 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/_spinner.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/_spinner.py
diff --git a/test/3rdparty/testtools-0.9.12/testtools/compat.py b/test/3rdparty/testtools-0.9.34/testtools/compat.py
index b7e23c8fec6..5502e0c2161 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/compat.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/compat.py
@@ -19,6 +19,7 @@ __all__ = [
]
import codecs
+import io
import linecache
import locale
import os
@@ -27,15 +28,14 @@ import sys
import traceback
import unicodedata
-from testtools.helpers import try_imports
+from extras import try_imports
BytesIO = try_imports(['StringIO.StringIO', 'io.BytesIO'])
StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
try:
from testtools import _compat2x as _compat
- _compat
-except SyntaxError:
+except (SyntaxError, ImportError):
from testtools import _compat3x as _compat
reraise = _compat.reraise
@@ -128,7 +128,7 @@ else:
def _slow_escape(text):
- """Escape unicode `text` leaving printable characters unmodified
+ """Escape unicode ``text`` leaving printable characters unmodified
The behaviour emulates the Python 3 implementation of repr, see
unicode_repr in unicodeobject.c and isprintable definition.
@@ -158,7 +158,8 @@ def _slow_escape(text):
def text_repr(text, multiline=None):
- """Rich repr for `text` returning unicode, triple quoted if `multiline`"""
+ """Rich repr for ``text`` returning unicode, triple quoted if ``multiline``.
+ """
is_py3k = sys.version_info > (3, 0)
nl = _isbytes(text) and bytes((0xA,)) or "\n"
if multiline is None:
@@ -169,7 +170,7 @@ def text_repr(text, multiline=None):
prefix = repr(text[:0])[:-2]
if multiline:
# To escape multiline strings, split and process each line in turn,
- # making sure that quotes are not escaped.
+ # making sure that quotes are not escaped.
if is_py3k:
offset = len(prefix) + 1
lines = []
@@ -215,14 +216,15 @@ def unicode_output_stream(stream):
The wrapper only allows unicode to be written, not non-ascii bytestrings,
which is a good thing to ensure sanity and sanitation.
"""
- if sys.platform == "cli":
- # Best to never encode before writing in IronPython
+ if (sys.platform == "cli" or
+ isinstance(stream, (io.TextIOWrapper, io.StringIO))):
+ # Best to never encode before writing in IronPython, or if it is
+ # already a TextIO [which in the io library has no encoding
+ # attribute).
return stream
try:
writer = codecs.getwriter(stream.encoding or "")
except (AttributeError, LookupError):
- # GZ 2010-06-16: Python 3 StringIO ends up here, but probably needs
- # different handling as it doesn't want bytestrings
return codecs.getwriter("ascii")(stream, "replace")
if writer.__module__.rsplit(".", 1)[1].startswith("utf"):
# The current stream has a unicode encoding so no error handler is needed
@@ -324,31 +326,33 @@ def _exception_to_text(evalue):
return None
-# GZ 2010-05-23: This function is huge and horrible and I welcome suggestions
-# on the best way to break it up
-_TB_HEADER = _u('Traceback (most recent call last):\n')
-def _format_exc_info(eclass, evalue, tb, limit=None):
- """Format a stack trace and the exception information as unicode
-
- Compatibility function for Python 2 which ensures each component of a
- traceback is correctly decoded according to its origins.
+def _format_stack_list(stack_lines):
+ """Format 'stack_lines' and return a list of unicode strings.
- Based on traceback.format_exception and related functions.
+ :param stack_lines: A list of filename, lineno, name, and line variables,
+ probably obtained by calling traceback.extract_tb or
+ traceback.extract_stack.
"""
fs_enc = sys.getfilesystemencoding()
- if tb:
- list = [_TB_HEADER]
- extracted_list = []
- for filename, lineno, name, line in traceback.extract_tb(tb, limit):
+ extracted_list = []
+ for filename, lineno, name, line in stack_lines:
extracted_list.append((
filename.decode(fs_enc, "replace"),
lineno,
name.decode("ascii", "replace"),
line and line.decode(
_get_source_encoding(filename), "replace")))
- list.extend(traceback.format_list(extracted_list))
- else:
- list = []
+ return traceback.format_list(extracted_list)
+
+
+def _format_exception_only(eclass, evalue):
+ """Format the excption part of a traceback.
+
+ :param eclass: The type of the exception being formatted.
+ :param evalue: The exception instance.
+ :returns: A list of unicode strings.
+ """
+ list = []
if evalue is None:
# Is a (deprecated) string exception
list.append((eclass + "\n").decode("ascii", "replace"))
@@ -377,6 +381,7 @@ def _format_exc_info(eclass, evalue, tb, limit=None):
else:
line = line.decode("ascii", "replace")
if filename:
+ fs_enc = sys.getfilesystemencoding()
filename = filename.decode(fs_enc, "replace")
evalue = eclass(msg, (filename, lineno, offset, line))
list.extend(traceback.format_exception_only(eclass, evalue))
@@ -387,7 +392,24 @@ def _format_exc_info(eclass, evalue, tb, limit=None):
list.append("%s: %s\n" % (sclass, svalue))
elif svalue is None:
# GZ 2010-05-24: Not a great fallback message, but keep for the moment
- list.append("%s: <unprintable %s object>\n" % (sclass, sclass))
+ list.append(_u("%s: <unprintable %s object>\n" % (sclass, sclass)))
else:
- list.append("%s\n" % sclass)
+ list.append(_u("%s\n" % sclass))
return list
+
+
+_TB_HEADER = _u('Traceback (most recent call last):\n')
+
+
+def _format_exc_info(eclass, evalue, tb, limit=None):
+ """Format a stack trace and the exception information as unicode
+
+ Compatibility function for Python 2 which ensures each component of a
+ traceback is correctly decoded according to its origins.
+
+ Based on traceback.format_exception and related functions.
+ """
+ return [_TB_HEADER] \
+ + _format_stack_list(traceback.extract_tb(tb, limit)) \
+ + _format_exception_only(eclass, evalue)
+
diff --git a/test/3rdparty/testtools-0.9.12/testtools/content.py b/test/3rdparty/testtools-0.9.34/testtools/content.py
index 2c6ed9f5860..09f44844524 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/content.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/content.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
"""Content - a MIME-like Content object."""
@@ -7,17 +7,30 @@ __all__ = [
'Content',
'content_from_file',
'content_from_stream',
+ 'json_content',
'text_content',
'TracebackContent',
]
import codecs
+import inspect
+import json
import os
+import sys
+import traceback
+
+from extras import try_import
+
+from testtools.compat import (
+ _b,
+ _format_exception_only,
+ _format_stack_list,
+ _TB_HEADER,
+ _u,
+ str_is_unicode,
+)
+from testtools.content_type import ContentType, JSON, UTF8_TEXT
-from testtools import try_import
-from testtools.compat import _b
-from testtools.content_type import ContentType, UTF8_TEXT
-from testtools.testresult import TestResult
functools = try_import('functools')
@@ -26,13 +39,20 @@ _join_b = _b("").join
DEFAULT_CHUNK_SIZE = 4096
+STDOUT_LINE = '\nStdout:\n%s'
+STDERR_LINE = '\nStderr:\n%s'
-def _iter_chunks(stream, chunk_size):
+
+def _iter_chunks(stream, chunk_size, seek_offset=None, seek_whence=0):
"""Read 'stream' in chunks of 'chunk_size'.
:param stream: A file-like object to read from.
:param chunk_size: The size of each read from 'stream'.
+ :param seek_offset: If non-None, seek before iterating.
+ :param seek_whence: Pass through to the seek call, if seeking.
"""
+ if seek_offset is not None:
+ stream.seek(seek_offset, seek_whence)
chunk = stream.read(chunk_size)
while chunk:
yield chunk
@@ -63,6 +83,15 @@ class Content(object):
return (self.content_type == other.content_type and
_join_b(self.iter_bytes()) == _join_b(other.iter_bytes()))
+ def as_text(self):
+ """Return all of the content as text.
+
+ This is only valid where ``iter_text`` is. It will load all of the
+ content into memory. Where this is a concern, use ``iter_text``
+ instead.
+ """
+ return _u('').join(self.iter_text())
+
def iter_bytes(self):
"""Iterate over bytestrings of the serialised content."""
return self._get_bytes()
@@ -101,25 +130,139 @@ class Content(object):
self.content_type, _join_b(self.iter_bytes()))
-class TracebackContent(Content):
- """Content object for tracebacks.
+class StackLinesContent(Content):
+ """Content object for stack lines.
+
+ This adapts a list of "preprocessed" stack lines into a content object.
+ The stack lines are most likely produced from ``traceback.extract_stack``
+ or ``traceback.extract_tb``.
- This adapts an exc_info tuple to the Content interface.
text/x-traceback;language=python is used for the mime type, in order to
provide room for other languages to format their tracebacks differently.
"""
- def __init__(self, err, test):
- """Create a TracebackContent for err."""
- if err is None:
- raise ValueError("err may not be None")
+ # Whether or not to hide layers of the stack trace that are
+ # unittest/testtools internal code. Defaults to True since the
+ # system-under-test is rarely unittest or testtools.
+ HIDE_INTERNAL_STACK = True
+
+ def __init__(self, stack_lines, prefix_content="", postfix_content=""):
+ """Create a StackLinesContent for ``stack_lines``.
+
+ :param stack_lines: A list of preprocessed stack lines, probably
+ obtained by calling ``traceback.extract_stack`` or
+ ``traceback.extract_tb``.
+ :param prefix_content: If specified, a unicode string to prepend to the
+ text content.
+ :param postfix_content: If specified, a unicode string to append to the
+ text content.
+ """
content_type = ContentType('text', 'x-traceback',
{"language": "python", "charset": "utf8"})
- self._result = TestResult()
- value = self._result._exc_info_to_unicode(err, test)
- super(TracebackContent, self).__init__(
+ value = prefix_content + \
+ self._stack_lines_to_unicode(stack_lines) + \
+ postfix_content
+ super(StackLinesContent, self).__init__(
content_type, lambda: [value.encode("utf8")])
+ def _stack_lines_to_unicode(self, stack_lines):
+ """Converts a list of pre-processed stack lines into a unicode string.
+ """
+
+ # testtools customization. When str is unicode (e.g. IronPython,
+ # Python 3), traceback.format_exception returns unicode. For Python 2,
+ # it returns bytes. We need to guarantee unicode.
+ if str_is_unicode:
+ format_stack_lines = traceback.format_list
+ else:
+ format_stack_lines = _format_stack_list
+
+ msg_lines = format_stack_lines(stack_lines)
+
+ return ''.join(msg_lines)
+
+
+def TracebackContent(err, test):
+ """Content object for tracebacks.
+
+ This adapts an exc_info tuple to the Content interface.
+ text/x-traceback;language=python is used for the mime type, in order to
+ provide room for other languages to format their tracebacks differently.
+ """
+ if err is None:
+ raise ValueError("err may not be None")
+
+ exctype, value, tb = err
+ # Skip test runner traceback levels
+ if StackLinesContent.HIDE_INTERNAL_STACK:
+ while tb and '__unittest' in tb.tb_frame.f_globals:
+ tb = tb.tb_next
+
+ # testtools customization. When str is unicode (e.g. IronPython,
+ # Python 3), traceback.format_exception_only returns unicode. For Python 2,
+ # it returns bytes. We need to guarantee unicode.
+ if str_is_unicode:
+ format_exception_only = traceback.format_exception_only
+ else:
+ format_exception_only = _format_exception_only
+
+ limit = None
+ # Disabled due to https://bugs.launchpad.net/testtools/+bug/1188420
+ if (False
+ and StackLinesContent.HIDE_INTERNAL_STACK
+ and test.failureException
+ and isinstance(value, test.failureException)):
+ # Skip assert*() traceback levels
+ limit = 0
+ while tb and not self._is_relevant_tb_level(tb):
+ limit += 1
+ tb = tb.tb_next
+
+ prefix = _TB_HEADER
+ stack_lines = traceback.extract_tb(tb, limit)
+ postfix = ''.join(format_exception_only(exctype, value))
+
+ return StackLinesContent(stack_lines, prefix, postfix)
+
+
+def StacktraceContent(prefix_content="", postfix_content=""):
+ """Content object for stack traces.
+
+ This function will create and return a content object that contains a
+ stack trace.
+
+ The mime type is set to 'text/x-traceback;language=python', so other
+ languages can format their stack traces differently.
+
+ :param prefix_content: A unicode string to add before the stack lines.
+ :param postfix_content: A unicode string to add after the stack lines.
+ """
+ stack = inspect.stack()[1:]
+
+ if StackLinesContent.HIDE_INTERNAL_STACK:
+ limit = 1
+ while limit < len(stack) and '__unittest' not in stack[limit][0].f_globals:
+ limit += 1
+ else:
+ limit = -1
+
+ frames_only = [line[0] for line in stack[:limit]]
+ processed_stack = [ ]
+ for frame in reversed(frames_only):
+ filename, line, function, context, _ = inspect.getframeinfo(frame)
+ context = ''.join(context)
+ processed_stack.append((filename, line, function, context))
+ return StackLinesContent(processed_stack, prefix_content, postfix_content)
+
+
+def json_content(json_data):
+ """Create a JSON `Content` object from JSON-encodeable data."""
+ data = json.dumps(json_data)
+ if str_is_unicode:
+ # The json module perversely returns native str not bytes
+ data = data.encode('utf8')
+ return Content(JSON, lambda: [data])
+
def text_content(text):
"""Create a `Content` object from some text.
@@ -129,7 +272,6 @@ def text_content(text):
return Content(UTF8_TEXT, lambda: [text.encode('utf8')])
-
def maybe_wrap(wrapper, func):
"""Merge metadata for func into wrapper if functools is present."""
if functools is not None:
@@ -138,7 +280,7 @@ def maybe_wrap(wrapper, func):
def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
- buffer_now=False):
+ buffer_now=False, seek_offset=None, seek_whence=0):
"""Create a `Content` object from a file on disk.
Note that unless 'read_now' is explicitly passed in as True, the file
@@ -148,9 +290,11 @@ def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
:param content_type: The type of content. If not specified, defaults
to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file.
- Defaults to `DEFAULT_CHUNK_SIZE`.
+ Defaults to ``DEFAULT_CHUNK_SIZE``.
:param buffer_now: If True, read the file from disk now and keep it in
memory. Otherwise, only read when the content is serialized.
+ :param seek_offset: If non-None, seek within the stream before reading it.
+ :param seek_whence: If supplied, pass to stream.seek() when seeking.
"""
if content_type is None:
content_type = UTF8_TEXT
@@ -159,14 +303,15 @@ def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
# We drop older python support we can make this use a context manager
# for maximum simplicity.
stream = open(path, 'rb')
- for chunk in _iter_chunks(stream, chunk_size):
+ for chunk in _iter_chunks(stream, chunk_size, seek_offset, seek_whence):
yield chunk
stream.close()
return content_from_reader(reader, content_type, buffer_now)
def content_from_stream(stream, content_type=None,
- chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False):
+ chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False,
+ seek_offset=None, seek_whence=0):
"""Create a `Content` object from a file-like stream.
Note that the stream will only be read from when ``iter_bytes`` is
@@ -177,13 +322,15 @@ def content_from_stream(stream, content_type=None,
:param content_type: The type of content. If not specified, defaults
to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file.
- Defaults to `DEFAULT_CHUNK_SIZE`.
+ Defaults to ``DEFAULT_CHUNK_SIZE``.
:param buffer_now: If True, reads from the stream right now. Otherwise,
only reads when the content is serialized. Defaults to False.
+ :param seek_offset: If non-None, seek within the stream before reading it.
+ :param seek_whence: If supplied, pass to stream.seek() when seeking.
"""
if content_type is None:
content_type = UTF8_TEXT
- reader = lambda: _iter_chunks(stream, chunk_size)
+ reader = lambda: _iter_chunks(stream, chunk_size, seek_offset, seek_whence)
return content_from_reader(reader, content_type, buffer_now)
@@ -208,7 +355,7 @@ def attach_file(detailed, path, name=None, content_type=None,
chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=True):
"""Attach a file to this test as a detail.
- This is a convenience method wrapping around `addDetail`.
+ This is a convenience method wrapping around ``addDetail``.
Note that unless 'read_now' is explicitly passed in as True, the file
*must* exist when the test result is called with the results of this
diff --git a/test/3rdparty/testtools-0.9.12/testtools/content_type.py b/test/3rdparty/testtools-0.9.34/testtools/content_type.py
index 82c301b38df..bbf314b492e 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/content_type.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/content_type.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
"""ContentType - a MIME Content Type."""
@@ -29,11 +29,13 @@ class ContentType(object):
def __repr__(self):
if self.parameters:
params = '; '
- params += ', '.join(
- '%s="%s"' % (k, v) for k, v in self.parameters.items())
+ params += '; '.join(
+ sorted('%s="%s"' % (k, v) for k, v in self.parameters.items()))
else:
params = ''
return "%s/%s%s" % (self.type, self.subtype, params)
+JSON = ContentType('application', 'json')
+
UTF8_TEXT = ContentType('text', 'plain', {'charset': 'utf8'})
diff --git a/test/3rdparty/testtools-0.9.12/testtools/deferredruntest.py b/test/3rdparty/testtools-0.9.34/testtools/deferredruntest.py
index b8bfaaaa39f..cf33c06e277 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/deferredruntest.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/deferredruntest.py
@@ -57,7 +57,7 @@ class SynchronousDeferredRunTest(_DeferredRunTest):
def run_with_log_observers(observers, function, *args, **kwargs):
"""Run 'function' with the given Twisted log observers."""
- real_observers = log.theLogPublisher.observers
+ real_observers = list(log.theLogPublisher.observers)
for observer in real_observers:
log.theLogPublisher.removeObserver(observer)
for observer in observers:
diff --git a/test/3rdparty/testtools-0.9.12/testtools/distutilscmd.py b/test/3rdparty/testtools-0.9.34/testtools/distutilscmd.py
index 91e14ca504f..91e14ca504f 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/distutilscmd.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/distutilscmd.py
diff --git a/test/3rdparty/testtools-0.9.34/testtools/helpers.py b/test/3rdparty/testtools-0.9.34/testtools/helpers.py
new file mode 100644
index 00000000000..401d2cc10ed
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/helpers.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'safe_hasattr',
+ 'try_import',
+ 'try_imports',
+ ]
+
+import sys
+
+# Compat - removal announced in 0.9.25.
+from extras import (
+ safe_hasattr,
+ try_import,
+ try_imports,
+ )
+
+
+def map_values(function, dictionary):
+ """Map ``function`` across the values of ``dictionary``.
+
+ :return: A dict with the same keys as ``dictionary``, where the value
+ of each key ``k`` is ``function(dictionary[k])``.
+ """
+ return dict((k, function(dictionary[k])) for k in dictionary)
+
+
+def filter_values(function, dictionary):
+ """Filter ``dictionary`` by its values using ``function``."""
+ return dict((k, v) for k, v in dictionary.items() if function(v))
+
+
+def dict_subtract(a, b):
+ """Return the part of ``a`` that's not in ``b``."""
+ return dict((k, a[k]) for k in set(a) - set(b))
+
+
+def list_subtract(a, b):
+ """Return a list ``a`` without the elements of ``b``.
+
+ If a particular value is in ``a`` twice and ``b`` once then the returned
+ list then that value will appear once in the returned list.
+ """
+ a_only = list(a)
+ for x in b:
+ if x in a_only:
+ a_only.remove(x)
+ return a_only
diff --git a/test/3rdparty/testtools-0.9.34/testtools/matchers/__init__.py b/test/3rdparty/testtools-0.9.34/testtools/matchers/__init__.py
new file mode 100644
index 00000000000..771d8142b32
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/matchers/__init__.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""All the matchers.
+
+Matchers, a way to express complex assertions outside the testcase.
+
+Inspired by 'hamcrest'.
+
+Matcher provides the abstract API that all matchers need to implement.
+
+Bundled matchers are listed in __all__: a list can be obtained by running
+$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
+"""
+
+__all__ = [
+ 'AfterPreprocessing',
+ 'AllMatch',
+ 'Annotate',
+ 'AnyMatch',
+ 'Contains',
+ 'ContainsAll',
+ 'ContainedByDict',
+ 'ContainsDict',
+ 'DirContains',
+ 'DirExists',
+ 'DocTestMatches',
+ 'EndsWith',
+ 'Equals',
+ 'FileContains',
+ 'FileExists',
+ 'GreaterThan',
+ 'HasLength',
+ 'HasPermissions',
+ 'Is',
+ 'IsInstance',
+ 'KeysEqual',
+ 'LessThan',
+ 'MatchesAll',
+ 'MatchesAny',
+ 'MatchesDict',
+ 'MatchesException',
+ 'MatchesListwise',
+ 'MatchesPredicate',
+ 'MatchesPredicateWithParams',
+ 'MatchesRegex',
+ 'MatchesSetwise',
+ 'MatchesStructure',
+ 'NotEquals',
+ 'Not',
+ 'PathExists',
+ 'Raises',
+ 'raises',
+ 'SamePath',
+ 'StartsWith',
+ 'TarballContains',
+ ]
+
+from ._basic import (
+ Contains,
+ EndsWith,
+ Equals,
+ GreaterThan,
+ HasLength,
+ Is,
+ IsInstance,
+ LessThan,
+ MatchesRegex,
+ NotEquals,
+ StartsWith,
+ )
+from ._datastructures import (
+ ContainsAll,
+ MatchesListwise,
+ MatchesSetwise,
+ MatchesStructure,
+ )
+from ._dict import (
+ ContainedByDict,
+ ContainsDict,
+ KeysEqual,
+ MatchesDict,
+ )
+from ._doctest import (
+ DocTestMatches,
+ )
+from ._exception import (
+ MatchesException,
+ Raises,
+ raises,
+ )
+from ._filesystem import (
+ DirContains,
+ DirExists,
+ FileContains,
+ FileExists,
+ HasPermissions,
+ PathExists,
+ SamePath,
+ TarballContains,
+ )
+from ._higherorder import (
+ AfterPreprocessing,
+ AllMatch,
+ Annotate,
+ AnyMatch,
+ MatchesAll,
+ MatchesAny,
+ MatchesPredicate,
+ MatchesPredicateWithParams,
+ Not,
+ )
+
+# XXX: These are not explicitly included in __all__. It's unclear how much of
+# the public interface they really are.
+from ._impl import (
+ Matcher,
+ Mismatch,
+ MismatchError,
+ )
diff --git a/test/3rdparty/testtools-0.9.34/testtools/matchers/_basic.py b/test/3rdparty/testtools-0.9.34/testtools/matchers/_basic.py
new file mode 100644
index 00000000000..2d9f143f10e
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/matchers/_basic.py
@@ -0,0 +1,326 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'Contains',
+ 'EndsWith',
+ 'Equals',
+ 'GreaterThan',
+ 'HasLength',
+ 'Is',
+ 'IsInstance',
+ 'LessThan',
+ 'MatchesRegex',
+ 'NotEquals',
+ 'StartsWith',
+ ]
+
+import operator
+from pprint import pformat
+import re
+
+from ..compat import (
+ _isbytes,
+ istext,
+ str_is_unicode,
+ text_repr,
+ )
+from ..helpers import list_subtract
+from ._higherorder import (
+ MatchesPredicateWithParams,
+ PostfixedMismatch,
+ )
+from ._impl import (
+ Matcher,
+ Mismatch,
+ )
+
+
+def _format(thing):
+ """
+ Blocks of text with newlines are formatted as triple-quote
+ strings. Everything else is pretty-printed.
+ """
+ if istext(thing) or _isbytes(thing):
+ return text_repr(thing)
+ return pformat(thing)
+
+
+class _BinaryComparison(object):
+ """Matcher that compares an object to another object."""
+
+ def __init__(self, expected):
+ self.expected = expected
+
+ def __str__(self):
+ return "%s(%r)" % (self.__class__.__name__, self.expected)
+
+ def match(self, other):
+ if self.comparator(other, self.expected):
+ return None
+ return _BinaryMismatch(self.expected, self.mismatch_string, other)
+
+ def comparator(self, expected, other):
+ raise NotImplementedError(self.comparator)
+
+
+class _BinaryMismatch(Mismatch):
+ """Two things did not match."""
+
+ def __init__(self, expected, mismatch_string, other):
+ self.expected = expected
+ self._mismatch_string = mismatch_string
+ self.other = other
+
+ def describe(self):
+ left = repr(self.expected)
+ right = repr(self.other)
+ if len(left) + len(right) > 70:
+ return "%s:\nreference = %s\nactual = %s\n" % (
+ self._mismatch_string, _format(self.expected),
+ _format(self.other))
+ else:
+ return "%s %s %s" % (left, self._mismatch_string, right)
+
+
+class Equals(_BinaryComparison):
+ """Matches if the items are equal."""
+
+ comparator = operator.eq
+ mismatch_string = '!='
+
+
+class NotEquals(_BinaryComparison):
+ """Matches if the items are not equal.
+
+ In most cases, this is equivalent to ``Not(Equals(foo))``. The difference
+ only matters when testing ``__ne__`` implementations.
+ """
+
+ comparator = operator.ne
+ mismatch_string = '=='
+
+
+class Is(_BinaryComparison):
+ """Matches if the items are identical."""
+
+ comparator = operator.is_
+ mismatch_string = 'is not'
+
+
+class LessThan(_BinaryComparison):
+ """Matches if the item is less than the matchers reference object."""
+
+ comparator = operator.__lt__
+ mismatch_string = 'is not >'
+
+
+class GreaterThan(_BinaryComparison):
+ """Matches if the item is greater than the matchers reference object."""
+
+ comparator = operator.__gt__
+ mismatch_string = 'is not <'
+
+
+class SameMembers(Matcher):
+ """Matches if two iterators have the same members.
+
+ This is not the same as set equivalence. The two iterators must be of the
+ same length and have the same repetitions.
+ """
+
+ def __init__(self, expected):
+ super(SameMembers, self).__init__()
+ self.expected = expected
+
+ def __str__(self):
+ return '%s(%r)' % (self.__class__.__name__, self.expected)
+
+ def match(self, observed):
+ expected_only = list_subtract(self.expected, observed)
+ observed_only = list_subtract(observed, self.expected)
+ if expected_only == observed_only == []:
+ return
+ return PostfixedMismatch(
+ "\nmissing: %s\nextra: %s" % (
+ _format(expected_only), _format(observed_only)),
+ _BinaryMismatch(self.expected, 'elements differ', observed))
+
+
+class DoesNotStartWith(Mismatch):
+
+ def __init__(self, matchee, expected):
+ """Create a DoesNotStartWith Mismatch.
+
+ :param matchee: the string that did not match.
+ :param expected: the string that 'matchee' was expected to start with.
+ """
+ self.matchee = matchee
+ self.expected = expected
+
+ def describe(self):
+ return "%s does not start with %s." % (
+ text_repr(self.matchee), text_repr(self.expected))
+
+
+class StartsWith(Matcher):
+ """Checks whether one string starts with another."""
+
+ def __init__(self, expected):
+ """Create a StartsWith Matcher.
+
+ :param expected: the string that matchees should start with.
+ """
+ self.expected = expected
+
+ def __str__(self):
+ return "StartsWith(%r)" % (self.expected,)
+
+ def match(self, matchee):
+ if not matchee.startswith(self.expected):
+ return DoesNotStartWith(matchee, self.expected)
+ return None
+
+
+class DoesNotEndWith(Mismatch):
+
+ def __init__(self, matchee, expected):
+ """Create a DoesNotEndWith Mismatch.
+
+ :param matchee: the string that did not match.
+ :param expected: the string that 'matchee' was expected to end with.
+ """
+ self.matchee = matchee
+ self.expected = expected
+
+ def describe(self):
+ return "%s does not end with %s." % (
+ text_repr(self.matchee), text_repr(self.expected))
+
+
+class EndsWith(Matcher):
+ """Checks whether one string ends with another."""
+
+ def __init__(self, expected):
+ """Create a EndsWith Matcher.
+
+ :param expected: the string that matchees should end with.
+ """
+ self.expected = expected
+
+ def __str__(self):
+ return "EndsWith(%r)" % (self.expected,)
+
+ def match(self, matchee):
+ if not matchee.endswith(self.expected):
+ return DoesNotEndWith(matchee, self.expected)
+ return None
+
+
+class IsInstance(object):
+ """Matcher that wraps isinstance."""
+
+ def __init__(self, *types):
+ self.types = tuple(types)
+
+ def __str__(self):
+ return "%s(%s)" % (self.__class__.__name__,
+ ', '.join(type.__name__ for type in self.types))
+
+ def match(self, other):
+ if isinstance(other, self.types):
+ return None
+ return NotAnInstance(other, self.types)
+
+
+class NotAnInstance(Mismatch):
+
+ def __init__(self, matchee, types):
+ """Create a NotAnInstance Mismatch.
+
+ :param matchee: the thing which is not an instance of any of types.
+ :param types: A tuple of the types which were expected.
+ """
+ self.matchee = matchee
+ self.types = types
+
+ def describe(self):
+ if len(self.types) == 1:
+ typestr = self.types[0].__name__
+ else:
+ typestr = 'any of (%s)' % ', '.join(type.__name__ for type in
+ self.types)
+ return "'%s' is not an instance of %s" % (self.matchee, typestr)
+
+
+class DoesNotContain(Mismatch):
+
+ def __init__(self, matchee, needle):
+ """Create a DoesNotContain Mismatch.
+
+ :param matchee: the object that did not contain needle.
+ :param needle: the needle that 'matchee' was expected to contain.
+ """
+ self.matchee = matchee
+ self.needle = needle
+
+ def describe(self):
+ return "%r not in %r" % (self.needle, self.matchee)
+
+
+class Contains(Matcher):
+ """Checks whether something is contained in another thing."""
+
+ def __init__(self, needle):
+ """Create a Contains Matcher.
+
+ :param needle: the thing that needs to be contained by matchees.
+ """
+ self.needle = needle
+
+ def __str__(self):
+ return "Contains(%r)" % (self.needle,)
+
+ def match(self, matchee):
+ try:
+ if self.needle not in matchee:
+ return DoesNotContain(matchee, self.needle)
+ except TypeError:
+ # e.g. 1 in 2 will raise TypeError
+ return DoesNotContain(matchee, self.needle)
+ return None
+
+
+class MatchesRegex(object):
+ """Matches if the matchee is matched by a regular expression."""
+
+ def __init__(self, pattern, flags=0):
+ self.pattern = pattern
+ self.flags = flags
+
+ def __str__(self):
+ args = ['%r' % self.pattern]
+ flag_arg = []
+ # dir() sorts the attributes for us, so we don't need to do it again.
+ for flag in dir(re):
+ if len(flag) == 1:
+ if self.flags & getattr(re, flag):
+ flag_arg.append('re.%s' % flag)
+ if flag_arg:
+ args.append('|'.join(flag_arg))
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
+
+ def match(self, value):
+ if not re.match(self.pattern, value, self.flags):
+ pattern = self.pattern
+ if not isinstance(pattern, str_is_unicode and str or unicode):
+ pattern = pattern.decode("latin1")
+ pattern = pattern.encode("unicode_escape").decode("ascii")
+ return Mismatch("%r does not match /%s/" % (
+ value, pattern.replace("\\\\", "\\")))
+
+
+def has_len(x, y):
+ return len(x) == y
+
+
+HasLength = MatchesPredicateWithParams(has_len, "len({0}) != {1}", "HasLength")
diff --git a/test/3rdparty/testtools-0.9.34/testtools/matchers/_datastructures.py b/test/3rdparty/testtools-0.9.34/testtools/matchers/_datastructures.py
new file mode 100644
index 00000000000..70de790738a
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/matchers/_datastructures.py
@@ -0,0 +1,228 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'ContainsAll',
+ 'MatchesListwise',
+ 'MatchesSetwise',
+ 'MatchesStructure',
+ ]
+
+"""Matchers that operate with knowledge of Python data structures."""
+
+from ..helpers import map_values
+from ._higherorder import (
+ Annotate,
+ MatchesAll,
+ MismatchesAll,
+ )
+from ._impl import Mismatch
+
+
+def ContainsAll(items):
+ """Make a matcher that checks whether a list of things is contained
+ in another thing.
+
+ The matcher effectively checks that the provided sequence is a subset of
+ the matchee.
+ """
+ from ._basic import Contains
+ return MatchesAll(*map(Contains, items), first_only=False)
+
+
+class MatchesListwise(object):
+ """Matches if each matcher matches the corresponding value.
+
+ More easily explained by example than in words:
+
+ >>> from ._basic import Equals
+ >>> MatchesListwise([Equals(1)]).match([1])
+ >>> MatchesListwise([Equals(1), Equals(2)]).match([1, 2])
+ >>> print (MatchesListwise([Equals(1), Equals(2)]).match([2, 1]).describe())
+ Differences: [
+ 1 != 2
+ 2 != 1
+ ]
+ >>> matcher = MatchesListwise([Equals(1), Equals(2)], first_only=True)
+ >>> print (matcher.match([3, 4]).describe())
+ 1 != 3
+ """
+
+ def __init__(self, matchers, first_only=False):
+ """Construct a MatchesListwise matcher.
+
+ :param matchers: A list of matcher that the matched values must match.
+ :param first_only: If True, then only report the first mismatch,
+ otherwise report all of them. Defaults to False.
+ """
+ self.matchers = matchers
+ self.first_only = first_only
+
+ def match(self, values):
+ from ._basic import Equals
+ mismatches = []
+ length_mismatch = Annotate(
+ "Length mismatch", Equals(len(self.matchers))).match(len(values))
+ if length_mismatch:
+ mismatches.append(length_mismatch)
+ for matcher, value in zip(self.matchers, values):
+ mismatch = matcher.match(value)
+ if mismatch:
+ if self.first_only:
+ return mismatch
+ mismatches.append(mismatch)
+ if mismatches:
+ return MismatchesAll(mismatches)
+
+
+class MatchesStructure(object):
+ """Matcher that matches an object structurally.
+
+ 'Structurally' here means that attributes of the object being matched are
+ compared against given matchers.
+
+ `fromExample` allows the creation of a matcher from a prototype object and
+ then modified versions can be created with `update`.
+
+ `byEquality` creates a matcher in much the same way as the constructor,
+ except that the matcher for each of the attributes is assumed to be
+ `Equals`.
+
+ `byMatcher` creates a similar matcher to `byEquality`, but you get to pick
+ the matcher, rather than just using `Equals`.
+ """
+
+ def __init__(self, **kwargs):
+ """Construct a `MatchesStructure`.
+
+ :param kwargs: A mapping of attributes to matchers.
+ """
+ self.kws = kwargs
+
+ @classmethod
+ def byEquality(cls, **kwargs):
+ """Matches an object where the attributes equal the keyword values.
+
+ Similar to the constructor, except that the matcher is assumed to be
+ Equals.
+ """
+ from ._basic import Equals
+ return cls.byMatcher(Equals, **kwargs)
+
+ @classmethod
+ def byMatcher(cls, matcher, **kwargs):
+ """Matches an object where the attributes match the keyword values.
+
+ Similar to the constructor, except that the provided matcher is used
+ to match all of the values.
+ """
+ return cls(**map_values(matcher, kwargs))
+
+ @classmethod
+ def fromExample(cls, example, *attributes):
+ from ._basic import Equals
+ kwargs = {}
+ for attr in attributes:
+ kwargs[attr] = Equals(getattr(example, attr))
+ return cls(**kwargs)
+
+ def update(self, **kws):
+ new_kws = self.kws.copy()
+ for attr, matcher in kws.items():
+ if matcher is None:
+ new_kws.pop(attr, None)
+ else:
+ new_kws[attr] = matcher
+ return type(self)(**new_kws)
+
+ def __str__(self):
+ kws = []
+ for attr, matcher in sorted(self.kws.items()):
+ kws.append("%s=%s" % (attr, matcher))
+ return "%s(%s)" % (self.__class__.__name__, ', '.join(kws))
+
+ def match(self, value):
+ matchers = []
+ values = []
+ for attr, matcher in sorted(self.kws.items()):
+ matchers.append(Annotate(attr, matcher))
+ values.append(getattr(value, attr))
+ return MatchesListwise(matchers).match(values)
+
+
+class MatchesSetwise(object):
+ """Matches if all the matchers match elements of the value being matched.
+
+ That is, each element in the 'observed' set must match exactly one matcher
+ from the set of matchers, with no matchers left over.
+
+ The difference compared to `MatchesListwise` is that the order of the
+ matchings does not matter.
+ """
+
+ def __init__(self, *matchers):
+ self.matchers = matchers
+
+ def match(self, observed):
+ remaining_matchers = set(self.matchers)
+ not_matched = []
+ for value in observed:
+ for matcher in remaining_matchers:
+ if matcher.match(value) is None:
+ remaining_matchers.remove(matcher)
+ break
+ else:
+ not_matched.append(value)
+ if not_matched or remaining_matchers:
+ remaining_matchers = list(remaining_matchers)
+ # There are various cases that all should be reported somewhat
+ # differently.
+
+ # There are two trivial cases:
+ # 1) There are just some matchers left over.
+ # 2) There are just some values left over.
+
+ # Then there are three more interesting cases:
+ # 3) There are the same number of matchers and values left over.
+ # 4) There are more matchers left over than values.
+ # 5) There are more values left over than matchers.
+
+ if len(not_matched) == 0:
+ if len(remaining_matchers) > 1:
+ msg = "There were %s matchers left over: " % (
+ len(remaining_matchers),)
+ else:
+ msg = "There was 1 matcher left over: "
+ msg += ', '.join(map(str, remaining_matchers))
+ return Mismatch(msg)
+ elif len(remaining_matchers) == 0:
+ if len(not_matched) > 1:
+ return Mismatch(
+ "There were %s values left over: %s" % (
+ len(not_matched), not_matched))
+ else:
+ return Mismatch(
+ "There was 1 value left over: %s" % (
+ not_matched, ))
+ else:
+ common_length = min(len(remaining_matchers), len(not_matched))
+ if common_length == 0:
+ raise AssertionError("common_length can't be 0 here")
+ if common_length > 1:
+ msg = "There were %s mismatches" % (common_length,)
+ else:
+ msg = "There was 1 mismatch"
+ if len(remaining_matchers) > len(not_matched):
+ extra_matchers = remaining_matchers[common_length:]
+ msg += " and %s extra matcher" % (len(extra_matchers), )
+ if len(extra_matchers) > 1:
+ msg += "s"
+ msg += ': ' + ', '.join(map(str, extra_matchers))
+ elif len(not_matched) > len(remaining_matchers):
+ extra_values = not_matched[common_length:]
+ msg += " and %s extra value" % (len(extra_values), )
+ if len(extra_values) > 1:
+ msg += "s"
+ msg += ': ' + str(extra_values)
+ return Annotate(
+ msg, MatchesListwise(remaining_matchers[:common_length])
+ ).match(not_matched[:common_length])
diff --git a/test/3rdparty/testtools-0.9.34/testtools/matchers/_dict.py b/test/3rdparty/testtools-0.9.34/testtools/matchers/_dict.py
new file mode 100644
index 00000000000..b1ec9151b24
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/matchers/_dict.py
@@ -0,0 +1,259 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'KeysEqual',
+ ]
+
+from ..helpers import (
+ dict_subtract,
+ filter_values,
+ map_values,
+ )
+from ._higherorder import (
+ AnnotatedMismatch,
+ PrefixedMismatch,
+ MismatchesAll,
+ )
+from ._impl import Matcher, Mismatch
+
+
+def LabelledMismatches(mismatches, details=None):
+ """A collection of mismatches, each labelled."""
+ return MismatchesAll(
+ (PrefixedMismatch(k, v) for (k, v) in sorted(mismatches.items())),
+ wrap=False)
+
+
+class MatchesAllDict(Matcher):
+ """Matches if all of the matchers it is created with match.
+
+ A lot like ``MatchesAll``, but takes a dict of Matchers and labels any
+ mismatches with the key of the dictionary.
+ """
+
+ def __init__(self, matchers):
+ super(MatchesAllDict, self).__init__()
+ self.matchers = matchers
+
+ def __str__(self):
+ return 'MatchesAllDict(%s)' % (_format_matcher_dict(self.matchers),)
+
+ def match(self, observed):
+ mismatches = {}
+ for label in self.matchers:
+ mismatches[label] = self.matchers[label].match(observed)
+ return _dict_to_mismatch(
+ mismatches, result_mismatch=LabelledMismatches)
+
+
+class DictMismatches(Mismatch):
+ """A mismatch with a dict of child mismatches."""
+
+ def __init__(self, mismatches, details=None):
+ super(DictMismatches, self).__init__(None, details=details)
+ self.mismatches = mismatches
+
+ def describe(self):
+ lines = ['{']
+ lines.extend(
+ [' %r: %s,' % (key, mismatch.describe())
+ for (key, mismatch) in sorted(self.mismatches.items())])
+ lines.append('}')
+ return '\n'.join(lines)
+
+
+def _dict_to_mismatch(data, to_mismatch=None,
+ result_mismatch=DictMismatches):
+ if to_mismatch:
+ data = map_values(to_mismatch, data)
+ mismatches = filter_values(bool, data)
+ if mismatches:
+ return result_mismatch(mismatches)
+
+
+class _MatchCommonKeys(Matcher):
+ """Match on keys in a dictionary.
+
+ Given a dictionary where the values are matchers, this will look for
+ common keys in the matched dictionary and match if and only if all common
+ keys match the given matchers.
+
+ Thus::
+
+ >>> structure = {'a': Equals('x'), 'b': Equals('y')}
+ >>> _MatchCommonKeys(structure).match({'a': 'x', 'c': 'z'})
+ None
+ """
+
+ def __init__(self, dict_of_matchers):
+ super(_MatchCommonKeys, self).__init__()
+ self._matchers = dict_of_matchers
+
+ def _compare_dicts(self, expected, observed):
+ common_keys = set(expected.keys()) & set(observed.keys())
+ mismatches = {}
+ for key in common_keys:
+ mismatch = expected[key].match(observed[key])
+ if mismatch:
+ mismatches[key] = mismatch
+ return mismatches
+
+ def match(self, observed):
+ mismatches = self._compare_dicts(self._matchers, observed)
+ if mismatches:
+ return DictMismatches(mismatches)
+
+
+class _SubDictOf(Matcher):
+ """Matches if the matched dict only has keys that are in given dict."""
+
+ def __init__(self, super_dict, format_value=repr):
+ super(_SubDictOf, self).__init__()
+ self.super_dict = super_dict
+ self.format_value = format_value
+
+ def match(self, observed):
+ excess = dict_subtract(observed, self.super_dict)
+ return _dict_to_mismatch(
+ excess, lambda v: Mismatch(self.format_value(v)))
+
+
+class _SuperDictOf(Matcher):
+ """Matches if all of the keys in the given dict are in the matched dict.
+ """
+
+ def __init__(self, sub_dict, format_value=repr):
+ super(_SuperDictOf, self).__init__()
+ self.sub_dict = sub_dict
+ self.format_value = format_value
+
+ def match(self, super_dict):
+ return _SubDictOf(super_dict, self.format_value).match(self.sub_dict)
+
+
+def _format_matcher_dict(matchers):
+ return '{%s}' % (
+ ', '.join(sorted('%r: %s' % (k, v) for k, v in matchers.items())))
+
+
+class _CombinedMatcher(Matcher):
+ """Many matchers labelled and combined into one uber-matcher.
+
+ Subclass this and then specify a dict of matcher factories that take a
+ single 'expected' value and return a matcher. The subclass will match
+ only if all of the matchers made from factories match.
+
+ Not **entirely** dissimilar from ``MatchesAll``.
+ """
+
+ matcher_factories = {}
+
+ def __init__(self, expected):
+ super(_CombinedMatcher, self).__init__()
+ self._expected = expected
+
+ def format_expected(self, expected):
+ return repr(expected)
+
+ def __str__(self):
+ return '%s(%s)' % (
+ self.__class__.__name__, self.format_expected(self._expected))
+
+ def match(self, observed):
+ matchers = dict(
+ (k, v(self._expected)) for k, v in self.matcher_factories.items())
+ return MatchesAllDict(matchers).match(observed)
+
+
+class MatchesDict(_CombinedMatcher):
+ """Match a dictionary exactly, by its keys.
+
+ Specify a dictionary mapping keys (often strings) to matchers. This is
+ the 'expected' dict. Any dictionary that matches this must have exactly
+ the same keys, and the values must match the corresponding matchers in the
+ expected dict.
+ """
+
+ matcher_factories = {
+ 'Extra': _SubDictOf,
+ 'Missing': lambda m: _SuperDictOf(m, format_value=str),
+ 'Differences': _MatchCommonKeys,
+ }
+
+ format_expected = lambda self, expected: _format_matcher_dict(expected)
+
+
+class ContainsDict(_CombinedMatcher):
+ """Match a dictionary for that contains a specified sub-dictionary.
+
+ Specify a dictionary mapping keys (often strings) to matchers. This is
+ the 'expected' dict. Any dictionary that matches this must have **at
+ least** these keys, and the values must match the corresponding matchers
+ in the expected dict. Dictionaries that have more keys will also match.
+
+ In other words, any matching dictionary must contain the dictionary given
+ to the constructor.
+
+ Does not check for strict sub-dictionary. That is, equal dictionaries
+ match.
+ """
+
+ matcher_factories = {
+ 'Missing': lambda m: _SuperDictOf(m, format_value=str),
+ 'Differences': _MatchCommonKeys,
+ }
+
+ format_expected = lambda self, expected: _format_matcher_dict(expected)
+
+
+class ContainedByDict(_CombinedMatcher):
+ """Match a dictionary for which this is a super-dictionary.
+
+ Specify a dictionary mapping keys (often strings) to matchers. This is
+ the 'expected' dict. Any dictionary that matches this must have **only**
+ these keys, and the values must match the corresponding matchers in the
+ expected dict. Dictionaries that have fewer keys can also match.
+
+ In other words, any matching dictionary must be contained by the
+ dictionary given to the constructor.
+
+ Does not check for strict super-dictionary. That is, equal dictionaries
+ match.
+ """
+
+ matcher_factories = {
+ 'Extra': _SubDictOf,
+ 'Differences': _MatchCommonKeys,
+ }
+
+ format_expected = lambda self, expected: _format_matcher_dict(expected)
+
+
+class KeysEqual(Matcher):
+ """Checks whether a dict has particular keys."""
+
+ def __init__(self, *expected):
+ """Create a `KeysEqual` Matcher.
+
+ :param expected: The keys the dict is expected to have. If a dict,
+ then we use the keys of that dict, if a collection, we assume it
+ is a collection of expected keys.
+ """
+ super(KeysEqual, self).__init__()
+ try:
+ self.expected = expected[0].keys()
+ except AttributeError:
+ self.expected = list(expected)
+
+ def __str__(self):
+ return "KeysEqual(%s)" % ', '.join(map(repr, self.expected))
+
+ def match(self, matchee):
+ from ._basic import _BinaryMismatch, Equals
+ expected = sorted(self.expected)
+ matched = Equals(expected).match(sorted(matchee.keys()))
+ if matched:
+ return AnnotatedMismatch(
+ 'Keys not equal',
+ _BinaryMismatch(expected, 'does not match', matchee))
+ return None
diff --git a/test/3rdparty/testtools-0.9.34/testtools/matchers/_doctest.py b/test/3rdparty/testtools-0.9.34/testtools/matchers/_doctest.py
new file mode 100644
index 00000000000..41f3c003e53
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/matchers/_doctest.py
@@ -0,0 +1,104 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'DocTestMatches',
+ ]
+
+import doctest
+import re
+
+from ..compat import str_is_unicode
+from ._impl import Mismatch
+
+
+class _NonManglingOutputChecker(doctest.OutputChecker):
+ """Doctest checker that works with unicode rather than mangling strings
+
+ This is needed because current Python versions have tried to fix string
+ encoding related problems, but regressed the default behaviour with
+ unicode inputs in the process.
+
+ In Python 2.6 and 2.7 ``OutputChecker.output_difference`` is was changed
+ to return a bytestring encoded as per ``sys.stdout.encoding``, or utf-8 if
+ that can't be determined. Worse, that encoding process happens in the
+ innocent looking `_indent` global function. Because the
+ `DocTestMismatch.describe` result may well not be destined for printing to
+ stdout, this is no good for us. To get a unicode return as before, the
+ method is monkey patched if ``doctest._encoding`` exists.
+
+ Python 3 has a different problem. For some reason both inputs are encoded
+ to ascii with 'backslashreplace', making an escaped string matches its
+ unescaped form. Overriding the offending ``OutputChecker._toAscii`` method
+ is sufficient to revert this.
+ """
+
+ def _toAscii(self, s):
+ """Return ``s`` unchanged rather than mangling it to ascii"""
+ return s
+
+ # Only do this overriding hackery if doctest has a broken _input function
+ if getattr(doctest, "_encoding", None) is not None:
+ from types import FunctionType as __F
+ __f = doctest.OutputChecker.output_difference.im_func
+ __g = dict(__f.func_globals)
+ def _indent(s, indent=4, _pattern=re.compile("^(?!$)", re.MULTILINE)):
+ """Prepend non-empty lines in ``s`` with ``indent`` number of spaces"""
+ return _pattern.sub(indent*" ", s)
+ __g["_indent"] = _indent
+ output_difference = __F(__f.func_code, __g, "output_difference")
+ del __F, __f, __g, _indent
+
+
+class DocTestMatches(object):
+ """See if a string matches a doctest example."""
+
+ def __init__(self, example, flags=0):
+ """Create a DocTestMatches to match example.
+
+ :param example: The example to match e.g. 'foo bar baz'
+ :param flags: doctest comparison flags to match on. e.g.
+ doctest.ELLIPSIS.
+ """
+ if not example.endswith('\n'):
+ example += '\n'
+ self.want = example # required variable name by doctest.
+ self.flags = flags
+ self._checker = _NonManglingOutputChecker()
+
+ def __str__(self):
+ if self.flags:
+ flagstr = ", flags=%d" % self.flags
+ else:
+ flagstr = ""
+ return 'DocTestMatches(%r%s)' % (self.want, flagstr)
+
+ def _with_nl(self, actual):
+ result = self.want.__class__(actual)
+ if not result.endswith('\n'):
+ result += '\n'
+ return result
+
+ def match(self, actual):
+ with_nl = self._with_nl(actual)
+ if self._checker.check_output(self.want, with_nl, self.flags):
+ return None
+ return DocTestMismatch(self, with_nl)
+
+ def _describe_difference(self, with_nl):
+ return self._checker.output_difference(self, with_nl, self.flags)
+
+
+class DocTestMismatch(Mismatch):
+ """Mismatch object for DocTestMatches."""
+
+ def __init__(self, matcher, with_nl):
+ self.matcher = matcher
+ self.with_nl = with_nl
+
+ def describe(self):
+ s = self.matcher._describe_difference(self.with_nl)
+ if str_is_unicode or isinstance(s, unicode):
+ return s
+ # GZ 2011-08-24: This is actually pretty bogus, most C0 codes should
+ # be escaped, in addition to non-ascii bytes.
+ return s.decode("latin1").encode("ascii", "backslashreplace")
diff --git a/test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py b/test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py
new file mode 100644
index 00000000000..1938f152b78
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'MatchesException',
+ 'Raises',
+ 'raises',
+ ]
+
+import sys
+
+from testtools.compat import (
+ classtypes,
+ _error_repr,
+ isbaseexception,
+ istext,
+ )
+from ._basic import MatchesRegex
+from ._higherorder import AfterPreproccessing
+from ._impl import (
+ Matcher,
+ Mismatch,
+ )
+
+
+class MatchesException(Matcher):
+ """Match an exc_info tuple against an exception instance or type."""
+
+ def __init__(self, exception, value_re=None):
+ """Create a MatchesException that will match exc_info's for exception.
+
+ :param exception: Either an exception instance or type.
+ If an instance is given, the type and arguments of the exception
+ are checked. If a type is given only the type of the exception is
+ checked. If a tuple is given, then as with isinstance, any of the
+ types in the tuple matching is sufficient to match.
+ :param value_re: If 'exception' is a type, and the matchee exception
+ is of the right type, then match against this. If value_re is a
+ string, then assume value_re is a regular expression and match
+ the str() of the exception against it. Otherwise, assume value_re
+ is a matcher, and match the exception against it.
+ """
+ Matcher.__init__(self)
+ self.expected = exception
+ if istext(value_re):
+ value_re = AfterPreproccessing(str, MatchesRegex(value_re), False)
+ self.value_re = value_re
+ expected_type = type(self.expected)
+ self._is_instance = not any(issubclass(expected_type, class_type)
+ for class_type in classtypes() + (tuple,))
+
+ def match(self, other):
+ if type(other) != tuple:
+ return Mismatch('%r is not an exc_info tuple' % other)
+ expected_class = self.expected
+ if self._is_instance:
+ expected_class = expected_class.__class__
+ if not issubclass(other[0], expected_class):
+ return Mismatch('%r is not a %r' % (other[0], expected_class))
+ if self._is_instance:
+ if other[1].args != self.expected.args:
+ return Mismatch('%s has different arguments to %s.' % (
+ _error_repr(other[1]), _error_repr(self.expected)))
+ elif self.value_re is not None:
+ return self.value_re.match(other[1])
+
+ def __str__(self):
+ if self._is_instance:
+ return "MatchesException(%s)" % _error_repr(self.expected)
+ return "MatchesException(%s)" % repr(self.expected)
+
+
+class Raises(Matcher):
+ """Match if the matchee raises an exception when called.
+
+ Exceptions which are not subclasses of Exception propogate out of the
+ Raises.match call unless they are explicitly matched.
+ """
+
+ def __init__(self, exception_matcher=None):
+ """Create a Raises matcher.
+
+ :param exception_matcher: Optional validator for the exception raised
+ by matchee. If supplied the exc_info tuple for the exception raised
+ is passed into that matcher. If no exception_matcher is supplied
+ then the simple fact of raising an exception is considered enough
+ to match on.
+ """
+ self.exception_matcher = exception_matcher
+
+ def match(self, matchee):
+ try:
+ result = matchee()
+ return Mismatch('%r returned %r' % (matchee, result))
+ # Catch all exceptions: Raises() should be able to match a
+ # KeyboardInterrupt or SystemExit.
+ except:
+ exc_info = sys.exc_info()
+ if self.exception_matcher:
+ mismatch = self.exception_matcher.match(exc_info)
+ if not mismatch:
+ del exc_info
+ return
+ else:
+ mismatch = None
+ # The exception did not match, or no explicit matching logic was
+ # performed. If the exception is a non-user exception (that is, not
+ # a subclass of Exception on Python 2.5+) then propogate it.
+ if isbaseexception(exc_info[1]):
+ del exc_info
+ raise
+ return mismatch
+
+ def __str__(self):
+ return 'Raises()'
+
+
+def raises(exception):
+ """Make a matcher that checks that a callable raises an exception.
+
+ This is a convenience function, exactly equivalent to::
+
+ return Raises(MatchesException(exception))
+
+ See `Raises` and `MatchesException` for more information.
+ """
+ return Raises(MatchesException(exception))
diff --git a/test/3rdparty/testtools-0.9.34/testtools/matchers/_filesystem.py b/test/3rdparty/testtools-0.9.34/testtools/matchers/_filesystem.py
new file mode 100644
index 00000000000..54f749b1359
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/matchers/_filesystem.py
@@ -0,0 +1,192 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""Matchers for things related to the filesystem."""
+
+__all__ = [
+ 'FileContains',
+ 'DirExists',
+ 'FileExists',
+ 'HasPermissions',
+ 'PathExists',
+ 'SamePath',
+ 'TarballContains',
+ ]
+
+import os
+import tarfile
+
+from ._basic import Equals
+from ._higherorder import (
+ MatchesAll,
+ MatchesPredicate,
+ )
+from ._impl import (
+ Matcher,
+ )
+
+
+def PathExists():
+ """Matches if the given path exists.
+
+ Use like this::
+
+ assertThat('/some/path', PathExists())
+ """
+ return MatchesPredicate(os.path.exists, "%s does not exist.")
+
+
+def DirExists():
+ """Matches if the path exists and is a directory."""
+ return MatchesAll(
+ PathExists(),
+ MatchesPredicate(os.path.isdir, "%s is not a directory."),
+ first_only=True)
+
+
+def FileExists():
+ """Matches if the given path exists and is a file."""
+ return MatchesAll(
+ PathExists(),
+ MatchesPredicate(os.path.isfile, "%s is not a file."),
+ first_only=True)
+
+
+class DirContains(Matcher):
+ """Matches if the given directory contains files with the given names.
+
+ That is, is the directory listing exactly equal to the given files?
+ """
+
+ def __init__(self, filenames=None, matcher=None):
+ """Construct a ``DirContains`` matcher.
+
+ Can be used in a basic mode where the whole directory listing is
+ matched against an expected directory listing (by passing
+ ``filenames``). Can also be used in a more advanced way where the
+ whole directory listing is matched against an arbitrary matcher (by
+ passing ``matcher`` instead).
+
+ :param filenames: If specified, match the sorted directory listing
+ against this list of filenames, sorted.
+ :param matcher: If specified, match the sorted directory listing
+ against this matcher.
+ """
+ if filenames == matcher == None:
+ raise AssertionError(
+ "Must provide one of `filenames` or `matcher`.")
+ if None not in (filenames, matcher):
+ raise AssertionError(
+ "Must provide either `filenames` or `matcher`, not both.")
+ if filenames is None:
+ self.matcher = matcher
+ else:
+ self.matcher = Equals(sorted(filenames))
+
+ def match(self, path):
+ mismatch = DirExists().match(path)
+ if mismatch is not None:
+ return mismatch
+ return self.matcher.match(sorted(os.listdir(path)))
+
+
+class FileContains(Matcher):
+ """Matches if the given file has the specified contents."""
+
+ def __init__(self, contents=None, matcher=None):
+ """Construct a ``FileContains`` matcher.
+
+ Can be used in a basic mode where the file contents are compared for
+ equality against the expected file contents (by passing ``contents``).
+ Can also be used in a more advanced way where the file contents are
+ matched against an arbitrary matcher (by passing ``matcher`` instead).
+
+ :param contents: If specified, match the contents of the file with
+ these contents.
+ :param matcher: If specified, match the contents of the file against
+ this matcher.
+ """
+ if contents == matcher == None:
+ raise AssertionError(
+ "Must provide one of `contents` or `matcher`.")
+ if None not in (contents, matcher):
+ raise AssertionError(
+ "Must provide either `contents` or `matcher`, not both.")
+ if matcher is None:
+ self.matcher = Equals(contents)
+ else:
+ self.matcher = matcher
+
+ def match(self, path):
+ mismatch = PathExists().match(path)
+ if mismatch is not None:
+ return mismatch
+ f = open(path)
+ try:
+ actual_contents = f.read()
+ return self.matcher.match(actual_contents)
+ finally:
+ f.close()
+
+ def __str__(self):
+ return "File at path exists and contains %s" % self.contents
+
+
+class HasPermissions(Matcher):
+ """Matches if a file has the given permissions.
+
+ Permissions are specified and matched as a four-digit octal string.
+ """
+
+ def __init__(self, octal_permissions):
+ """Construct a HasPermissions matcher.
+
+ :param octal_permissions: A four digit octal string, representing the
+ intended access permissions. e.g. '0775' for rwxrwxr-x.
+ """
+ super(HasPermissions, self).__init__()
+ self.octal_permissions = octal_permissions
+
+ def match(self, filename):
+ permissions = oct(os.stat(filename).st_mode)[-4:]
+ return Equals(self.octal_permissions).match(permissions)
+
+
+class SamePath(Matcher):
+ """Matches if two paths are the same.
+
+ That is, the paths are equal, or they point to the same file but in
+ different ways. The paths do not have to exist.
+ """
+
+ def __init__(self, path):
+ super(SamePath, self).__init__()
+ self.path = path
+
+ def match(self, other_path):
+ f = lambda x: os.path.abspath(os.path.realpath(x))
+ return Equals(f(self.path)).match(f(other_path))
+
+
+class TarballContains(Matcher):
+ """Matches if the given tarball contains the given paths.
+
+ Uses TarFile.getnames() to get the paths out of the tarball.
+ """
+
+ def __init__(self, paths):
+ super(TarballContains, self).__init__()
+ self.paths = paths
+ self.path_matcher = Equals(sorted(self.paths))
+
+ def match(self, tarball_path):
+ # Open underlying file first to ensure it's always closed:
+ # <http://bugs.python.org/issue10233>
+ f = open(tarball_path, "rb")
+ try:
+ tarball = tarfile.open(tarball_path, fileobj=f)
+ try:
+ return self.path_matcher.match(sorted(tarball.getnames()))
+ finally:
+ tarball.close()
+ finally:
+ f.close()
diff --git a/test/3rdparty/testtools-0.9.34/testtools/matchers/_higherorder.py b/test/3rdparty/testtools-0.9.34/testtools/matchers/_higherorder.py
new file mode 100644
index 00000000000..3570f573747
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/matchers/_higherorder.py
@@ -0,0 +1,368 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+ 'AfterPreprocessing',
+ 'AllMatch',
+ 'Annotate',
+ 'AnyMatch',
+ 'MatchesAny',
+ 'MatchesAll',
+ 'Not',
+ ]
+
+import types
+
+from ._impl import (
+ Matcher,
+ Mismatch,
+ MismatchDecorator,
+ )
+
+
+class MatchesAny(object):
+ """Matches if any of the matchers it is created with match."""
+
+ def __init__(self, *matchers):
+ self.matchers = matchers
+
+ def match(self, matchee):
+ results = []
+ for matcher in self.matchers:
+ mismatch = matcher.match(matchee)
+ if mismatch is None:
+ return None
+ results.append(mismatch)
+ return MismatchesAll(results)
+
+ def __str__(self):
+ return "MatchesAny(%s)" % ', '.join([
+ str(matcher) for matcher in self.matchers])
+
+
+class MatchesAll(object):
+ """Matches if all of the matchers it is created with match."""
+
+ def __init__(self, *matchers, **options):
+ """Construct a MatchesAll matcher.
+
+ Just list the component matchers as arguments in the ``*args``
+ style. If you want only the first mismatch to be reported, past in
+ first_only=True as a keyword argument. By default, all mismatches are
+ reported.
+ """
+ self.matchers = matchers
+ self.first_only = options.get('first_only', False)
+
+ def __str__(self):
+ return 'MatchesAll(%s)' % ', '.join(map(str, self.matchers))
+
+ def match(self, matchee):
+ results = []
+ for matcher in self.matchers:
+ mismatch = matcher.match(matchee)
+ if mismatch is not None:
+ if self.first_only:
+ return mismatch
+ results.append(mismatch)
+ if results:
+ return MismatchesAll(results)
+ else:
+ return None
+
+
+class MismatchesAll(Mismatch):
+ """A mismatch with many child mismatches."""
+
+ def __init__(self, mismatches, wrap=True):
+ self.mismatches = mismatches
+ self._wrap = wrap
+
+ def describe(self):
+ descriptions = []
+ if self._wrap:
+ descriptions = ["Differences: ["]
+ for mismatch in self.mismatches:
+ descriptions.append(mismatch.describe())
+ if self._wrap:
+ descriptions.append("]")
+ return '\n'.join(descriptions)
+
+
+class Not(object):
+ """Inverts a matcher."""
+
+ def __init__(self, matcher):
+ self.matcher = matcher
+
+ def __str__(self):
+ return 'Not(%s)' % (self.matcher,)
+
+ def match(self, other):
+ mismatch = self.matcher.match(other)
+ if mismatch is None:
+ return MatchedUnexpectedly(self.matcher, other)
+ else:
+ return None
+
+
+class MatchedUnexpectedly(Mismatch):
+ """A thing matched when it wasn't supposed to."""
+
+ def __init__(self, matcher, other):
+ self.matcher = matcher
+ self.other = other
+
+ def describe(self):
+ return "%r matches %s" % (self.other, self.matcher)
+
+
+class Annotate(object):
+ """Annotates a matcher with a descriptive string.
+
+ Mismatches are then described as '<mismatch>: <annotation>'.
+ """
+
+ def __init__(self, annotation, matcher):
+ self.annotation = annotation
+ self.matcher = matcher
+
+ @classmethod
+ def if_message(cls, annotation, matcher):
+ """Annotate ``matcher`` only if ``annotation`` is non-empty."""
+ if not annotation:
+ return matcher
+ return cls(annotation, matcher)
+
+ def __str__(self):
+ return 'Annotate(%r, %s)' % (self.annotation, self.matcher)
+
+ def match(self, other):
+ mismatch = self.matcher.match(other)
+ if mismatch is not None:
+ return AnnotatedMismatch(self.annotation, mismatch)
+
+
+class PostfixedMismatch(MismatchDecorator):
+ """A mismatch annotated with a descriptive string."""
+
+ def __init__(self, annotation, mismatch):
+ super(PostfixedMismatch, self).__init__(mismatch)
+ self.annotation = annotation
+ self.mismatch = mismatch
+
+ def describe(self):
+ return '%s: %s' % (self.original.describe(), self.annotation)
+
+
+AnnotatedMismatch = PostfixedMismatch
+
+
+class PrefixedMismatch(MismatchDecorator):
+
+ def __init__(self, prefix, mismatch):
+ super(PrefixedMismatch, self).__init__(mismatch)
+ self.prefix = prefix
+
+ def describe(self):
+ return '%s: %s' % (self.prefix, self.original.describe())
+
+
+class AfterPreprocessing(object):
+ """Matches if the value matches after passing through a function.
+
+ This can be used to aid in creating trivial matchers as functions, for
+ example::
+
+ def PathHasFileContent(content):
+ def _read(path):
+ return open(path).read()
+ return AfterPreprocessing(_read, Equals(content))
+ """
+
+ def __init__(self, preprocessor, matcher, annotate=True):
+ """Create an AfterPreprocessing matcher.
+
+ :param preprocessor: A function called with the matchee before
+ matching.
+ :param matcher: What to match the preprocessed matchee against.
+ :param annotate: Whether or not to annotate the matcher with
+ something explaining how we transformed the matchee. Defaults
+ to True.
+ """
+ self.preprocessor = preprocessor
+ self.matcher = matcher
+ self.annotate = annotate
+
+ def _str_preprocessor(self):
+ if isinstance(self.preprocessor, types.FunctionType):
+ return '<function %s>' % self.preprocessor.__name__
+ return str(self.preprocessor)
+
+ def __str__(self):
+ return "AfterPreprocessing(%s, %s)" % (
+ self._str_preprocessor(), self.matcher)
+
+ def match(self, value):
+ after = self.preprocessor(value)
+ if self.annotate:
+ matcher = Annotate(
+ "after %s on %r" % (self._str_preprocessor(), value),
+ self.matcher)
+ else:
+ matcher = self.matcher
+ return matcher.match(after)
+
+
+# This is the old, deprecated. spelling of the name, kept for backwards
+# compatibility.
+AfterPreproccessing = AfterPreprocessing
+
+
+class AllMatch(object):
+ """Matches if all provided values match the given matcher."""
+
+ def __init__(self, matcher):
+ self.matcher = matcher
+
+ def __str__(self):
+ return 'AllMatch(%s)' % (self.matcher,)
+
+ def match(self, values):
+ mismatches = []
+ for value in values:
+ mismatch = self.matcher.match(value)
+ if mismatch:
+ mismatches.append(mismatch)
+ if mismatches:
+ return MismatchesAll(mismatches)
+
+
+class AnyMatch(object):
+ """Matches if any of the provided values match the given matcher."""
+
+ def __init__(self, matcher):
+ self.matcher = matcher
+
+ def __str__(self):
+ return 'AnyMatch(%s)' % (self.matcher,)
+
+ def match(self, values):
+ mismatches = []
+ for value in values:
+ mismatch = self.matcher.match(value)
+ if mismatch:
+ mismatches.append(mismatch)
+ else:
+ return None
+ return MismatchesAll(mismatches)
+
+
+class MatchesPredicate(Matcher):
+ """Match if a given function returns True.
+
+ It is reasonably common to want to make a very simple matcher based on a
+ function that you already have that returns True or False given a single
+ argument (i.e. a predicate function). This matcher makes it very easy to
+ do so. e.g.::
+
+ IsEven = MatchesPredicate(lambda x: x % 2 == 0, '%s is not even')
+ self.assertThat(4, IsEven)
+ """
+
+ def __init__(self, predicate, message):
+ """Create a ``MatchesPredicate`` matcher.
+
+ :param predicate: A function that takes a single argument and returns
+ a value that will be interpreted as a boolean.
+ :param message: A message to describe a mismatch. It will be formatted
+ with '%' and be given whatever was passed to ``match()``. Thus, it
+ needs to contain exactly one thing like '%s', '%d' or '%f'.
+ """
+ self.predicate = predicate
+ self.message = message
+
+ def __str__(self):
+ return '%s(%r, %r)' % (
+ self.__class__.__name__, self.predicate, self.message)
+
+ def match(self, x):
+ if not self.predicate(x):
+ return Mismatch(self.message % x)
+
+
+def MatchesPredicateWithParams(predicate, message, name=None):
+ """Match if a given parameterised function returns True.
+
+ It is reasonably common to want to make a very simple matcher based on a
+ function that you already have that returns True or False given some
+ arguments. This matcher makes it very easy to do so. e.g.::
+
+ HasLength = MatchesPredicate(
+ lambda x, y: len(x) == y, 'len({0}) is not {1}')
+ # This assertion will fail, as 'len([1, 2]) == 3' is False.
+ self.assertThat([1, 2], HasLength(3))
+
+ Note that unlike MatchesPredicate MatchesPredicateWithParams returns a
+ factory which you then customise to use by constructing an actual matcher
+ from it.
+
+ The predicate function should take the object to match as its first
+ parameter. Any additional parameters supplied when constructing a matcher
+ are supplied to the predicate as additional parameters when checking for a
+ match.
+
+ :param predicate: The predicate function.
+ :param message: A format string for describing mis-matches.
+ :param name: Optional replacement name for the matcher.
+ """
+ def construct_matcher(*args, **kwargs):
+ return _MatchesPredicateWithParams(
+ predicate, message, name, *args, **kwargs)
+ return construct_matcher
+
+
+class _MatchesPredicateWithParams(Matcher):
+
+ def __init__(self, predicate, message, name, *args, **kwargs):
+ """Create a ``MatchesPredicateWithParams`` matcher.
+
+ :param predicate: A function that takes an object to match and
+ additional params as given in ``*args`` and ``**kwargs``. The
+ result of the function will be interpreted as a boolean to
+ determine a match.
+ :param message: A message to describe a mismatch. It will be formatted
+ with .format() and be given a tuple containing whatever was passed
+ to ``match()`` + ``*args`` in ``*args``, and whatever was passed to
+ ``**kwargs`` as its ``**kwargs``.
+
+ For instance, to format a single parameter::
+
+ "{0} is not a {1}"
+
+ To format a keyword arg::
+
+ "{0} is not a {type_to_check}"
+ :param name: What name to use for the matcher class. Pass None to use
+ the default.
+ """
+ self.predicate = predicate
+ self.message = message
+ self.name = name
+ self.args = args
+ self.kwargs = kwargs
+
+ def __str__(self):
+ args = [str(arg) for arg in self.args]
+ kwargs = ["%s=%s" % item for item in self.kwargs.items()]
+ args = ", ".join(args + kwargs)
+ if self.name is None:
+ name = 'MatchesPredicateWithParams(%r, %r)' % (
+ self.predicate, self.message)
+ else:
+ name = self.name
+ return '%s(%s)' % (name, args)
+
+ def match(self, x):
+ if not self.predicate(x, *self.args, **self.kwargs):
+ return Mismatch(
+ self.message.format(*((x,) + self.args), **self.kwargs))
diff --git a/test/3rdparty/testtools-0.9.34/testtools/matchers/_impl.py b/test/3rdparty/testtools-0.9.34/testtools/matchers/_impl.py
new file mode 100644
index 00000000000..36e5ee02218
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/matchers/_impl.py
@@ -0,0 +1,175 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""Matchers, a way to express complex assertions outside the testcase.
+
+Inspired by 'hamcrest'.
+
+Matcher provides the abstract API that all matchers need to implement.
+
+Bundled matchers are listed in __all__: a list can be obtained by running
+$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
+"""
+
+__all__ = [
+ 'Matcher',
+ 'Mismatch',
+ 'MismatchDecorator',
+ 'MismatchError',
+ ]
+
+from testtools.compat import (
+ _isbytes,
+ istext,
+ str_is_unicode,
+ text_repr
+ )
+
+
+class Matcher(object):
+ """A pattern matcher.
+
+ A Matcher must implement match and __str__ to be used by
+ testtools.TestCase.assertThat. Matcher.match(thing) returns None when
+ thing is completely matched, and a Mismatch object otherwise.
+
+ Matchers can be useful outside of test cases, as they are simply a
+ pattern matching language expressed as objects.
+
+ testtools.matchers is inspired by hamcrest, but is pythonic rather than
+ a Java transcription.
+ """
+
+ def match(self, something):
+ """Return None if this matcher matches something, a Mismatch otherwise.
+ """
+ raise NotImplementedError(self.match)
+
+ def __str__(self):
+ """Get a sensible human representation of the matcher.
+
+ This should include the parameters given to the matcher and any
+ state that would affect the matches operation.
+ """
+ raise NotImplementedError(self.__str__)
+
+
+class Mismatch(object):
+ """An object describing a mismatch detected by a Matcher."""
+
+ def __init__(self, description=None, details=None):
+ """Construct a `Mismatch`.
+
+ :param description: A description to use. If not provided,
+ `Mismatch.describe` must be implemented.
+ :param details: Extra details about the mismatch. Defaults
+ to the empty dict.
+ """
+ if description:
+ self._description = description
+ if details is None:
+ details = {}
+ self._details = details
+
+ def describe(self):
+ """Describe the mismatch.
+
+ This should be either a human-readable string or castable to a string.
+ In particular, is should either be plain ascii or unicode on Python 2,
+ and care should be taken to escape control characters.
+ """
+ try:
+ return self._description
+ except AttributeError:
+ raise NotImplementedError(self.describe)
+
+ def get_details(self):
+ """Get extra details about the mismatch.
+
+ This allows the mismatch to provide extra information beyond the basic
+ description, including large text or binary files, or debugging internals
+ without having to force it to fit in the output of 'describe'.
+
+ The testtools assertion assertThat will query get_details and attach
+ all its values to the test, permitting them to be reported in whatever
+ manner the test environment chooses.
+
+ :return: a dict mapping names to Content objects. name is a string to
+ name the detail, and the Content object is the detail to add
+ to the result. For more information see the API to which items from
+ this dict are passed testtools.TestCase.addDetail.
+ """
+ return getattr(self, '_details', {})
+
+ def __repr__(self):
+ return "<testtools.matchers.Mismatch object at %x attributes=%r>" % (
+ id(self), self.__dict__)
+
+
+class MismatchError(AssertionError):
+ """Raised when a mismatch occurs."""
+
+ # This class exists to work around
+ # <https://bugs.launchpad.net/testtools/+bug/804127>. It provides a
+ # guaranteed way of getting a readable exception, no matter what crazy
+ # characters are in the matchee, matcher or mismatch.
+
+ def __init__(self, matchee, matcher, mismatch, verbose=False):
+ # Have to use old-style upcalling for Python 2.4 and 2.5
+ # compatibility.
+ AssertionError.__init__(self)
+ self.matchee = matchee
+ self.matcher = matcher
+ self.mismatch = mismatch
+ self.verbose = verbose
+
+ def __str__(self):
+ difference = self.mismatch.describe()
+ if self.verbose:
+ # GZ 2011-08-24: Smelly API? Better to take any object and special
+ # case text inside?
+ if istext(self.matchee) or _isbytes(self.matchee):
+ matchee = text_repr(self.matchee, multiline=False)
+ else:
+ matchee = repr(self.matchee)
+ return (
+ 'Match failed. Matchee: %s\nMatcher: %s\nDifference: %s\n'
+ % (matchee, self.matcher, difference))
+ else:
+ return difference
+
+ if not str_is_unicode:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return self.__unicode__().encode("ascii", "backslashreplace")
+
+
+class MismatchDecorator(object):
+ """Decorate a ``Mismatch``.
+
+ Forwards all messages to the original mismatch object. Probably the best
+ way to use this is inherit from this class and then provide your own
+ custom decoration logic.
+ """
+
+ def __init__(self, original):
+ """Construct a `MismatchDecorator`.
+
+ :param original: A `Mismatch` object to decorate.
+ """
+ self.original = original
+
+ def __repr__(self):
+ return '<testtools.matchers.MismatchDecorator(%r)>' % (self.original,)
+
+ def describe(self):
+ return self.original.describe()
+
+ def get_details(self):
+ return self.original.get_details()
+
+
+# Signal that this is part of the testing framework, and that code from this
+# should not normally appear in tracebacks.
+__unittest = True
diff --git a/test/3rdparty/testtools-0.9.12/testtools/monkey.py b/test/3rdparty/testtools-0.9.34/testtools/monkey.py
index ba0ac8fd8bf..ba0ac8fd8bf 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/monkey.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/monkey.py
diff --git a/test/3rdparty/testtools-0.9.12/testtools/run.py b/test/3rdparty/testtools-0.9.34/testtools/run.py
index 72011c74cab..466da76a7d4 100755
--- a/test/3rdparty/testtools-0.9.12/testtools/run.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/run.py
@@ -8,13 +8,16 @@ For instance, to run the testtools test suite.
$ python -m testtools.run testtools.tests.test_suite
"""
+from functools import partial
import os
import unittest
import sys
+from extras import safe_hasattr
+
from testtools import TextTestResult
from testtools.compat import classtypes, istext, unicode_output_stream
-from testtools.testsuite import iterate_tests
+from testtools.testsuite import filter_by_ids, iterate_tests, sorted_tests
defaultTestLoader = unittest.defaultTestLoader
@@ -32,15 +35,61 @@ else:
have_discover = True
+def list_test(test):
+ """Return the test ids that would be run if test() was run.
+
+ When things fail to import they can be represented as well, though
+ we use an ugly hack (see http://bugs.python.org/issue19746 for details)
+ to determine that. The difference matters because if a user is
+ filtering tests to run on the returned ids, a failed import can reduce
+ the visible tests but it can be impossible to tell that the selected
+ test would have been one of the imported ones.
+
+ :return: A tuple of test ids that would run and error strings
+ describing things that failed to import.
+ """
+ unittest_import_str = 'unittest.loader.ModuleImportFailure.'
+ test_ids = []
+ errors = []
+ for test in iterate_tests(test):
+ # to this ugly.
+ if test.id().startswith(unittest_import_str):
+ errors.append(test.id()[len(unittest_import_str):])
+ else:
+ test_ids.append(test.id())
+ return test_ids, errors
+
+
class TestToolsTestRunner(object):
""" A thunk object to support unittest.TestProgram."""
- def __init__(self, stdout):
+ def __init__(self, verbosity=None, failfast=None, buffer=None,
+ stdout=None):
+ """Create a TestToolsTestRunner.
+
+ :param verbosity: Ignored.
+ :param failfast: Stop running tests at the first failure.
+ :param buffer: Ignored.
+ :param stdout: Stream to use for stdout.
+ """
+ self.failfast = failfast
self.stdout = stdout
+ def list(self, test):
+ """List the tests that would be run if test() was run."""
+ test_ids, errors = list_test(test)
+ for test_id in test_ids:
+ self.stdout.write('%s\n' % test_id)
+ if errors:
+ self.stdout.write('Failed to import\n')
+ for test_id in errors:
+ self.stdout.write('%s\n' % test_id)
+ sys.exit(2)
+
def run(self, test):
"Run the given test case or test suite."
- result = TextTestResult(unicode_output_stream(self.stdout))
+ result = TextTestResult(
+ unicode_output_stream(sys.stdout), failfast=self.failfast)
result.startTestRun()
try:
return test.run(result)
@@ -68,6 +117,8 @@ class TestToolsTestRunner(object):
# - --load-list has been added which can reduce the tests used (should be
# upstreamed).
# - The limitation of using getopt is declared to the user.
+# - http://bugs.python.org/issue16709 is worked around, by sorting tests when
+# discover is used.
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
@@ -164,16 +215,16 @@ class TestProgram(object):
finally:
source.close()
test_ids = set(line.strip().decode('utf-8') for line in lines)
- filtered = unittest.TestSuite()
- for test in iterate_tests(self.test):
- if test.id() in test_ids:
- filtered.addTest(test)
- self.test = filtered
+ self.test = filter_by_ids(self.test, test_ids)
if not self.listtests:
self.runTests()
else:
- for test in iterate_tests(self.test):
- stdout.write('%s\n' % test.id())
+ runner = self._get_runner()
+ if safe_hasattr(runner, 'list'):
+ runner.list(self.test)
+ else:
+ for test in iterate_tests(self.test):
+ stdout.write('%s\n' % test.id())
def usageExit(self, msg=None):
if msg:
@@ -269,7 +320,7 @@ class TestProgram(object):
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
- parser.add_option('-l', '--list', dest='listtests', default=False,
+ parser.add_option('-l', '--list', dest='listtests', default=False, action="store_true",
help='List tests rather than running them.')
parser.add_option('--load-list', dest='load_list', default=None,
help='Specify a filename containing the test ids to use.')
@@ -300,33 +351,49 @@ class TestProgram(object):
top_level_dir = options.top
loader = Loader()
- self.test = loader.discover(start_dir, pattern, top_level_dir)
+ # See http://bugs.python.org/issue16709
+ # While sorting here is intrusive, its better than being random.
+ # Rules for the sort:
+ # - standard suites are flattened, and the resulting tests sorted by
+ # id.
+ # - non-standard suites are preserved as-is, and sorted into position
+ # by the first test found by iterating the suite.
+ # We do this by a DSU process: flatten and grab a key, sort, strip the
+ # keys.
+ loaded = loader.discover(start_dir, pattern, top_level_dir)
+ self.test = sorted_tests(loaded)
def runTests(self):
if (self.catchbreak
and getattr(unittest, 'installHandler', None) is not None):
unittest.installHandler()
- if self.testRunner is None:
- self.testRunner = runner.TextTestRunner
- if isinstance(self.testRunner, classtypes()):
- try:
- testRunner = self.testRunner(verbosity=self.verbosity,
- failfast=self.failfast,
- buffer=self.buffer)
- except TypeError:
- # didn't accept the verbosity, buffer or failfast arguments
- testRunner = self.testRunner()
- else:
- # it is assumed to be a TestRunner instance
- testRunner = self.testRunner
+ testRunner = self._get_runner()
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
+
+ def _get_runner(self):
+ if self.testRunner is None:
+ self.testRunner = TestToolsTestRunner
+ try:
+ testRunner = self.testRunner(verbosity=self.verbosity,
+ failfast=self.failfast,
+ buffer=self.buffer)
+ except TypeError:
+ # didn't accept the verbosity, buffer or failfast arguments
+ try:
+ testRunner = self.testRunner()
+ except TypeError:
+ # it is assumed to be a TestRunner instance
+ testRunner = self.testRunner
+ return testRunner
+
+
################
def main(argv, stdout):
- runner = TestToolsTestRunner(stdout)
- program = TestProgram(argv=argv, testRunner=runner, stdout=stdout)
+ program = TestProgram(argv=argv, testRunner=partial(TestToolsTestRunner, stdout=stdout),
+ stdout=stdout)
if __name__ == '__main__':
main(sys.argv, sys.stdout)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/runtest.py b/test/3rdparty/testtools-0.9.34/testtools/runtest.py
index 507ad87c276..26ae387211b 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/runtest.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/runtest.py
@@ -135,6 +135,9 @@ class RunTest(object):
self._run_cleanups, self.result):
failed = True
finally:
+ if getattr(self.case, 'force_failure', None):
+ self._run_user(_raise_force_fail_error)
+ failed = True
if not failed:
self.result.addSuccess(self.case,
details=self.case.getDetails())
@@ -200,6 +203,10 @@ class RunTest(object):
raise e
+def _raise_force_fail_error():
+ raise AssertionError("Forced Test Failure")
+
+
# Signal that this is part of the testing framework, and that code from this
# should not normally appear in tracebacks.
__unittest = True
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tags.py b/test/3rdparty/testtools-0.9.34/testtools/tags.py
new file mode 100644
index 00000000000..b55bd38667b
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tags.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2012 testtools developers. See LICENSE for details.
+
+"""Tag support."""
+
+
+class TagContext(object):
+ """A tag context."""
+
+ def __init__(self, parent=None):
+ """Create a new TagContext.
+
+ :param parent: If provided, uses this as the parent context. Any tags
+ that are current on the parent at the time of construction are
+ current in this context.
+ """
+ self.parent = parent
+ self._tags = set()
+ if parent:
+ self._tags.update(parent.get_current_tags())
+
+ def get_current_tags(self):
+ """Return any current tags."""
+ return set(self._tags)
+
+ def change_tags(self, new_tags, gone_tags):
+ """Change the tags on this context.
+
+ :param new_tags: A set of tags to add to this context.
+ :param gone_tags: A set of tags to remove from this context.
+ :return: The tags now current on this context.
+ """
+ self._tags.update(new_tags)
+ self._tags.difference_update(gone_tags)
+ return self.get_current_tags()
diff --git a/test/3rdparty/testtools-0.9.12/testtools/testcase.py b/test/3rdparty/testtools-0.9.34/testtools/testcase.py
index ee5e296cd46..59ea2052a9a 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/testcase.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/testcase.py
@@ -4,6 +4,7 @@
__metaclass__ = type
__all__ = [
+ 'attr',
'clone_test_with_new_id',
'ExpectedException',
'gather_details',
@@ -20,9 +21,13 @@ import sys
import types
import unittest
+from extras import (
+ safe_hasattr,
+ try_import,
+ )
+
from testtools import (
content,
- try_import,
)
from testtools.compat import (
advance_iterator,
@@ -42,13 +47,16 @@ from testtools.matchers import (
)
from testtools.monkey import patch
from testtools.runtest import RunTest
-from testtools.testresult import TestResult
+from testtools.testresult import (
+ ExtendedToOriginalDecorator,
+ TestResult,
+ )
wraps = try_import('functools.wraps')
class TestSkipped(Exception):
"""Raised within TestCase.run() when a test is skipped."""
-testSkipped = try_import('unittest2.case.SkipTest', TestSkipped)
+TestSkipped = try_import('unittest2.case.SkipTest', TestSkipped)
TestSkipped = try_import('unittest.case.SkipTest', TestSkipped)
@@ -113,13 +121,13 @@ def run_test_with(test_runner, **kwargs):
def _copy_content(content_object):
"""Make a copy of the given content object.
- The content within `content_object` is iterated and saved. This is useful
- when the source of the content is volatile, a log file in a temporary
- directory for example.
+ The content within ``content_object`` is iterated and saved. This is
+ useful when the source of the content is volatile, a log file in a
+ temporary directory for example.
:param content_object: A `content.Content` instance.
:return: A `content.Content` instance with the same mime-type as
- `content_object` and a non-volatile copy of its content.
+ ``content_object`` and a non-volatile copy of its content.
"""
content_bytes = list(content_object.iter_bytes())
content_callback = lambda: content_bytes
@@ -127,7 +135,7 @@ def _copy_content(content_object):
def gather_details(source_dict, target_dict):
- """Merge the details from `source_dict` into `target_dict`.
+ """Merge the details from ``source_dict`` into ``target_dict``.
:param source_dict: A dictionary of details will be gathered.
:param target_dict: A dictionary into which details will be gathered.
@@ -147,6 +155,8 @@ class TestCase(unittest.TestCase):
:ivar exception_handlers: Exceptions to catch from setUp, runTest and
tearDown. This list is able to be modified at any time and consists of
(exception_class, handler(case, result, exception_value)) pairs.
+ :ivar force_failure: Force testtools.RunTest to fail the test after the
+ test has completed.
:cvar run_tests_with: A factory to make the ``RunTest`` to run tests with.
Defaults to ``RunTest``. The factory is expected to take a test case
and an optional list of exception handlers.
@@ -301,9 +311,7 @@ class TestCase(unittest.TestCase):
self.__exception_handlers.append(handler)
def _add_reason(self, reason):
- self.addDetail('reason', content.Content(
- content.ContentType('text', 'plain'),
- lambda: [reason.encode('utf8')]))
+ self.addDetail('reason', content.text_content(reason))
def assertEqual(self, expected, observed, message=''):
"""Assert that 'expected' is equal to 'observed'.
@@ -384,8 +392,8 @@ class TestCase(unittest.TestCase):
capture = CaptureMatchee()
matcher = Raises(MatchesAll(ReRaiseOtherTypes(),
MatchesException(excClass), capture))
-
- self.assertThat(lambda: callableObj(*args, **kwargs), matcher)
+ our_callable = Nullary(callableObj, *args, **kwargs)
+ self.assertThat(our_callable, matcher)
return capture.matchee
failUnlessRaises = assertRaises
@@ -402,14 +410,30 @@ class TestCase(unittest.TestCase):
return
existing_details = self.getDetails()
for (name, content) in mismatch.get_details().items():
- full_name = name
- suffix = 1
- while full_name in existing_details:
- full_name = "%s-%d" % (name, suffix)
- suffix += 1
- self.addDetail(full_name, content)
+ self.addDetailUniqueName(name, content)
raise MismatchError(matchee, matcher, mismatch, verbose)
+ def addDetailUniqueName(self, name, content_object):
+ """Add a detail to the test, but ensure it's name is unique.
+
+ This method checks whether ``name`` conflicts with a detail that has
+ already been added to the test. If it does, it will modify ``name`` to
+ avoid the conflict.
+
+ For more details see pydoc testtools.TestResult.
+
+ :param name: The name to give this detail.
+ :param content_object: The content object for this detail. See
+ testtools.content for more detail.
+ """
+ existing_details = self.getDetails()
+ full_name = name
+ suffix = 1
+ while full_name in existing_details:
+ full_name = "%s-%d" % (name, suffix)
+ suffix += 1
+ self.addDetail(full_name, content_object)
+
def defaultTestResult(self):
return TestResult()
@@ -504,9 +528,12 @@ class TestCase(unittest.TestCase):
def _report_traceback(self, exc_info, tb_label='traceback'):
id_gen = self._traceback_id_gens.setdefault(
tb_label, itertools.count(0))
- tb_id = advance_iterator(id_gen)
- if tb_id:
- tb_label = '%s-%d' % (tb_label, tb_id)
+ while True:
+ tb_id = advance_iterator(id_gen)
+ if tb_id:
+ tb_label = '%s-%d' % (tb_label, tb_id)
+ if tb_label not in self.getDetails():
+ break
self.addDetail(tb_label, content.TracebackContent(exc_info, self))
@staticmethod
@@ -526,10 +553,12 @@ class TestCase(unittest.TestCase):
ret = self.setUp()
if not self.__setup_called:
raise ValueError(
+ "In File: %s\n"
"TestCase.setUp was not called. Have you upcalled all the "
"way up the hierarchy from your setUp? e.g. Call "
"super(%s, self).setUp() from your setUp()."
- % self.__class__.__name__)
+ % (sys.modules[self.__class__.__module__].__file__,
+ self.__class__.__name__))
return ret
def _run_teardown(self, result):
@@ -542,10 +571,12 @@ class TestCase(unittest.TestCase):
ret = self.tearDown()
if not self.__teardown_called:
raise ValueError(
+ "In File: %s\n"
"TestCase.tearDown was not called. Have you upcalled all the "
"way up the hierarchy from your tearDown? e.g. Call "
"super(%s, self).tearDown() from your tearDown()."
- % self.__class__.__name__)
+ % (sys.modules[self.__class__.__module__].__file__,
+ self.__class__.__name__))
return ret
def _get_test_method(self):
@@ -602,21 +633,36 @@ class PlaceHolder(object):
particularly suitable for being added to TestResults.
"""
- def __init__(self, test_id, short_description=None):
+ failureException = None
+
+ def __init__(self, test_id, short_description=None, details=None,
+ outcome='addSuccess', error=None, tags=None, timestamps=(None, None)):
"""Construct a `PlaceHolder`.
:param test_id: The id of the placeholder test.
:param short_description: The short description of the place holder
test. If not provided, the id will be used instead.
+ :param details: Outcome details as accepted by addSuccess etc.
+ :param outcome: The outcome to call. Defaults to 'addSuccess'.
+ :param tags: Tags to report for the test.
+ :param timestamps: A two-tuple of timestamps for the test start and
+ finish. Each timestamp may be None to indicate it is not known.
"""
self._test_id = test_id
self._short_description = short_description
+ self._details = details or {}
+ self._outcome = outcome
+ if error is not None:
+ self._details['traceback'] = content.TracebackContent(error, self)
+ tags = tags or frozenset()
+ self._tags = frozenset(tags)
+ self._timestamps = timestamps
def __call__(self, result=None):
return self.run(result=result)
def __repr__(self):
- internal = [self._test_id]
+ internal = [self._outcome, self._test_id, self._details]
if self._short_description is not None:
internal.append(self._short_description)
return "<%s.%s(%s)>" % (
@@ -636,12 +682,24 @@ class PlaceHolder(object):
def id(self):
return self._test_id
- def run(self, result=None):
+ def _result(self, result):
if result is None:
- result = TestResult()
+ return TestResult()
+ else:
+ return ExtendedToOriginalDecorator(result)
+
+ def run(self, result=None):
+ result = self._result(result)
+ if self._timestamps[0] is not None:
+ result.time(self._timestamps[0])
+ result.tags(self._tags, set())
result.startTest(self)
- result.addSuccess(self)
+ if self._timestamps[1] is not None:
+ result.time(self._timestamps[1])
+ outcome = getattr(result, self._outcome)
+ outcome(self, details=self._details)
result.stopTest(self)
+ result.tags(set(), self._tags)
def shortDescription(self):
if self._short_description is None:
@@ -650,42 +708,33 @@ class PlaceHolder(object):
return self._short_description
-class ErrorHolder(PlaceHolder):
- """A placeholder test that will error out when run."""
-
- failureException = None
+def ErrorHolder(test_id, error, short_description=None, details=None):
+ """Construct an `ErrorHolder`.
- def __init__(self, test_id, error, short_description=None):
- """Construct an `ErrorHolder`.
-
- :param test_id: The id of the test.
- :param error: The exc info tuple that will be used as the test's error.
- :param short_description: An optional short description of the test.
- """
- super(ErrorHolder, self).__init__(
- test_id, short_description=short_description)
- self._error = error
+ :param test_id: The id of the test.
+ :param error: The exc info tuple that will be used as the test's error.
+ This is inserted into the details as 'traceback' - any existing key
+ will be overridden.
+ :param short_description: An optional short description of the test.
+ :param details: Outcome details as accepted by addSuccess etc.
+ """
+ return PlaceHolder(test_id, short_description=short_description,
+ details=details, outcome='addError', error=error)
- def __repr__(self):
- internal = [self._test_id, self._error]
- if self._short_description is not None:
- internal.append(self._short_description)
- return "<%s.%s(%s)>" % (
- self.__class__.__module__,
- self.__class__.__name__,
- ", ".join(map(repr, internal)))
- def run(self, result=None):
- if result is None:
- result = TestResult()
- result.startTest(self)
- result.addError(self, self._error)
- result.stopTest(self)
+def _clone_test_id_callback(test, callback):
+ """Copy a `TestCase`, and make it call callback for its id().
+ This is only expected to be used on tests that have been constructed but
+ not executed.
-# Python 2.4 did not know how to copy functions.
-if types.FunctionType not in copy._copy_dispatch:
- copy._copy_dispatch[types.FunctionType] = copy._copy_immutable
+ :param test: A TestCase instance.
+ :param callback: A callable that takes no parameters and returns a string.
+ :return: A copy.copy of the test with id=callback.
+ """
+ newTest = copy.copy(test)
+ newTest.id = callback
+ return newTest
def clone_test_with_new_id(test, new_id):
@@ -694,9 +743,45 @@ def clone_test_with_new_id(test, new_id):
This is only expected to be used on tests that have been constructed but
not executed.
"""
- newTest = copy.copy(test)
- newTest.id = lambda: new_id
- return newTest
+ return _clone_test_id_callback(test, lambda: new_id)
+
+
+def attr(*args):
+ """Decorator for adding attributes to WithAttributes.
+
+ :param args: The name of attributes to add.
+ :return: A callable that when applied to a WithAttributes will
+ alter its id to enumerate the added attributes.
+ """
+ def decorate(fn):
+ if not safe_hasattr(fn, '__testtools_attrs'):
+ fn.__testtools_attrs = set()
+ fn.__testtools_attrs.update(args)
+ return fn
+ return decorate
+
+
+class WithAttributes(object):
+ """A mix-in class for modifying test id by attributes.
+
+ e.g.
+ >>> class MyTest(WithAttributes, TestCase):
+ ... @attr('foo')
+ ... def test_bar(self):
+ ... pass
+ >>> MyTest('test_bar').id()
+ testtools.testcase.MyTest/test_bar[foo]
+ """
+
+ def id(self):
+ orig = super(WithAttributes, self).id()
+ # Depends on testtools.TestCase._get_test_method, be nice to support
+ # plain unittest.
+ fn = self._get_test_method()
+ attributes = getattr(fn, '__testtools_attrs', None)
+ if not attributes:
+ return orig
+ return orig + '[' + ','.join(sorted(attributes)) + ']'
def skip(reason):
@@ -719,7 +804,7 @@ def skip(reason):
def skipIf(condition, reason):
- """Skip a test if the condition is true."""
+ """A decorator to skip a test if the condition is true."""
if condition:
return skip(reason)
def _id(obj):
@@ -728,7 +813,7 @@ def skipIf(condition, reason):
def skipUnless(condition, reason):
- """Skip a test unless the condition is true."""
+ """A decorator to skip a test unless the condition is true."""
if not condition:
return skip(reason)
def _id(obj):
@@ -751,32 +836,107 @@ class ExpectedException:
exception is raised, an AssertionError will be raised.
"""
- def __init__(self, exc_type, value_re=None):
+ def __init__(self, exc_type, value_re=None, msg=None):
"""Construct an `ExpectedException`.
:param exc_type: The type of exception to expect.
:param value_re: A regular expression to match against the
'str()' of the raised exception.
+ :param msg: An optional message explaining the failure.
"""
self.exc_type = exc_type
self.value_re = value_re
+ self.msg = msg
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
- raise AssertionError('%s not raised.' % self.exc_type.__name__)
+ error_msg = '%s not raised.' % self.exc_type.__name__
+ if self.msg:
+ error_msg = error_msg + ' : ' + self.msg
+ raise AssertionError(error_msg)
if exc_type != self.exc_type:
return False
if self.value_re:
matcher = MatchesException(self.exc_type, self.value_re)
+ if self.msg:
+ matcher = Annotate(self.msg, matcher)
mismatch = matcher.match((exc_type, exc_value, traceback))
if mismatch:
raise AssertionError(mismatch.describe())
return True
+class Nullary(object):
+ """Turn a callable into a nullary callable.
+
+ The advantage of this over ``lambda: f(*args, **kwargs)`` is that it
+ preserves the ``repr()`` of ``f``.
+ """
+
+ def __init__(self, callable_object, *args, **kwargs):
+ self._callable_object = callable_object
+ self._args = args
+ self._kwargs = kwargs
+
+ def __call__(self):
+ return self._callable_object(*self._args, **self._kwargs)
+
+ def __repr__(self):
+ return repr(self._callable_object)
+
+
+class DecorateTestCaseResult(object):
+ """Decorate a TestCase and permit customisation of the result for runs."""
+
+ def __init__(self, case, callout, before_run=None, after_run=None):
+ """Construct a DecorateTestCaseResult.
+
+ :param case: The case to decorate.
+ :param callout: A callback to call when run/__call__/debug is called.
+ Must take a result parameter and return a result object to be used.
+ For instance: lambda result: result.
+ :param before_run: If set, call this with the decorated result before
+ calling into the decorated run/__call__ method.
+ :param before_run: If set, call this with the decorated result after
+ calling into the decorated run/__call__ method.
+ """
+ self.decorated = case
+ self.callout = callout
+ self.before_run = before_run
+ self.after_run = after_run
+
+ def _run(self, result, run_method):
+ result = self.callout(result)
+ if self.before_run:
+ self.before_run(result)
+ try:
+ return run_method(result)
+ finally:
+ if self.after_run:
+ self.after_run(result)
+
+ def run(self, result=None):
+ self._run(result, self.decorated.run)
+
+ def __call__(self, result=None):
+ self._run(result, self.decorated)
+
+ def __getattr__(self, name):
+ return getattr(self.decorated, name)
+
+ def __delattr__(self, name):
+ delattr(self.decorated, name)
+
+ def __setattr__(self, name, value):
+ if name in ('decorated', 'callout', 'before_run', 'after_run'):
+ self.__dict__[name] = value
+ return
+ setattr(self.decorated, name, value)
+
+
# Signal that this is part of the testing framework, and that code from this
# should not normally appear in tracebacks.
__unittest = True
diff --git a/test/3rdparty/testtools-0.9.34/testtools/testresult/__init__.py b/test/3rdparty/testtools-0.9.34/testtools/testresult/__init__.py
new file mode 100644
index 00000000000..5bf8f9c673c
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/testresult/__init__.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""Test result objects."""
+
+__all__ = [
+ 'CopyStreamResult',
+ 'ExtendedToOriginalDecorator',
+ 'ExtendedToStreamDecorator',
+ 'MultiTestResult',
+ 'StreamFailFast',
+ 'StreamResult',
+ 'StreamResultRouter',
+ 'StreamSummary',
+ 'StreamTagger',
+ 'StreamToDict',
+ 'StreamToExtendedDecorator',
+ 'StreamToQueue',
+ 'Tagger',
+ 'TestByTestResult',
+ 'TestControl',
+ 'TestResult',
+ 'TestResultDecorator',
+ 'TextTestResult',
+ 'ThreadsafeForwardingResult',
+ 'TimestampingStreamResult',
+ ]
+
+from testtools.testresult.real import (
+ CopyStreamResult,
+ ExtendedToOriginalDecorator,
+ ExtendedToStreamDecorator,
+ MultiTestResult,
+ StreamFailFast,
+ StreamResult,
+ StreamResultRouter,
+ StreamSummary,
+ StreamTagger,
+ StreamToDict,
+ StreamToExtendedDecorator,
+ StreamToQueue,
+ Tagger,
+ TestByTestResult,
+ TestControl,
+ TestResult,
+ TestResultDecorator,
+ TextTestResult,
+ ThreadsafeForwardingResult,
+ TimestampingStreamResult,
+ )
diff --git a/test/3rdparty/testtools-0.9.12/testtools/testresult/doubles.py b/test/3rdparty/testtools-0.9.34/testtools/testresult/doubles.py
index 9af5b364ffb..d86f7fae2c1 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/testresult/doubles.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/testresult/doubles.py
@@ -6,9 +6,13 @@ __all__ = [
'Python26TestResult',
'Python27TestResult',
'ExtendedTestResult',
+ 'StreamResult',
]
+from testtools.tags import TagContext
+
+
class LoggingBase(object):
"""Basic support for logging of results."""
@@ -16,6 +20,7 @@ class LoggingBase(object):
self._events = []
self.shouldStop = False
self._was_successful = True
+ self.testsRun = 0
class Python26TestResult(LoggingBase):
@@ -34,6 +39,7 @@ class Python26TestResult(LoggingBase):
def startTest(self, test):
self._events.append(('startTest', test))
+ self.testsRun += 1
def stop(self):
self.shouldStop = True
@@ -48,6 +54,20 @@ class Python26TestResult(LoggingBase):
class Python27TestResult(Python26TestResult):
"""A precisely python 2.7 like test result, that logs."""
+ def __init__(self):
+ super(Python27TestResult, self).__init__()
+ self.failfast = False
+
+ def addError(self, test, err):
+ super(Python27TestResult, self).addError(test, err)
+ if self.failfast:
+ self.stop()
+
+ def addFailure(self, test, err):
+ super(Python27TestResult, self).addFailure(test, err)
+ if self.failfast:
+ self.stop()
+
def addExpectedFailure(self, test, err):
self._events.append(('addExpectedFailure', test, err))
@@ -56,6 +76,8 @@ class Python27TestResult(Python26TestResult):
def addUnexpectedSuccess(self, test):
self._events.append(('addUnexpectedSuccess', test))
+ if self.failfast:
+ self.stop()
def startTestRun(self):
self._events.append(('startTestRun',))
@@ -67,6 +89,10 @@ class Python27TestResult(Python26TestResult):
class ExtendedTestResult(Python27TestResult):
"""A test result like the proposed extended unittest result API."""
+ def __init__(self):
+ super(ExtendedTestResult, self).__init__()
+ self._tags = TagContext()
+
def addError(self, test, err=None, details=None):
self._was_successful = False
self._events.append(('addError', test, err or details))
@@ -100,8 +126,22 @@ class ExtendedTestResult(Python27TestResult):
def startTestRun(self):
super(ExtendedTestResult, self).startTestRun()
self._was_successful = True
+ self._tags = TagContext()
+
+ def startTest(self, test):
+ super(ExtendedTestResult, self).startTest(test)
+ self._tags = TagContext(self._tags)
+
+ def stopTest(self, test):
+ self._tags = self._tags.parent
+ super(ExtendedTestResult, self).stopTest(test)
+
+ @property
+ def current_tags(self):
+ return self._tags.get_current_tags()
def tags(self, new_tags, gone_tags):
+ self._tags.change_tags(new_tags, gone_tags)
self._events.append(('tags', new_tags, gone_tags))
def time(self, time):
@@ -109,3 +149,26 @@ class ExtendedTestResult(Python27TestResult):
def wasSuccessful(self):
return self._was_successful
+
+
+class StreamResult(object):
+ """A StreamResult implementation for testing.
+
+ All events are logged to _events.
+ """
+
+ def __init__(self):
+ self._events = []
+
+ def startTestRun(self):
+ self._events.append(('startTestRun',))
+
+ def stopTestRun(self):
+ self._events.append(('stopTestRun',))
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ self._events.append(('status', test_id, test_status, test_tags,
+ runnable, file_name, file_bytes, eof, mime_type, route_code,
+ timestamp))
diff --git a/test/3rdparty/testtools-0.9.34/testtools/testresult/real.py b/test/3rdparty/testtools-0.9.34/testtools/testresult/real.py
new file mode 100644
index 00000000000..e8d70b399d7
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/testresult/real.py
@@ -0,0 +1,1776 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Test results and related things."""
+
+__metaclass__ = type
+__all__ = [
+ 'ExtendedToOriginalDecorator',
+ 'ExtendedToStreamDecorator',
+ 'MultiTestResult',
+ 'StreamFailFast',
+ 'StreamResult',
+ 'StreamSummary',
+ 'StreamTagger',
+ 'StreamToDict',
+ 'StreamToExtendedDecorator',
+ 'StreamToQueue',
+ 'Tagger',
+ 'TestControl',
+ 'TestResult',
+ 'TestResultDecorator',
+ 'ThreadsafeForwardingResult',
+ 'TimestampingStreamResult',
+ ]
+
+import datetime
+from operator import methodcaller
+import sys
+import unittest
+
+from extras import safe_hasattr, try_import, try_imports
+parse_mime_type = try_import('mimeparse.parse_mime_type')
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
+from testtools.compat import all, str_is_unicode, _u, _b
+from testtools.content import (
+ Content,
+ text_content,
+ TracebackContent,
+ )
+from testtools.content_type import ContentType
+from testtools.tags import TagContext
+# circular import
+# from testtools.testcase import PlaceHolder
+PlaceHolder = None
+
+# From http://docs.python.org/library/datetime.html
+_ZERO = datetime.timedelta(0)
+
+# A UTC class.
+
+class UTC(datetime.tzinfo):
+ """UTC"""
+
+ def utcoffset(self, dt):
+ return _ZERO
+
+ def tzname(self, dt):
+ return "UTC"
+
+ def dst(self, dt):
+ return _ZERO
+
+utc = UTC()
+
+
+class TestResult(unittest.TestResult):
+ """Subclass of unittest.TestResult extending the protocol for flexability.
+
+ This test result supports an experimental protocol for providing additional
+ data to in test outcomes. All the outcome methods take an optional dict
+ 'details'. If supplied any other detail parameters like 'err' or 'reason'
+ should not be provided. The details dict is a mapping from names to
+ MIME content objects (see testtools.content). This permits attaching
+ tracebacks, log files, or even large objects like databases that were
+ part of the test fixture. Until this API is accepted into upstream
+ Python it is considered experimental: it may be replaced at any point
+ by a newer version more in line with upstream Python. Compatibility would
+ be aimed for in this case, but may not be possible.
+
+ :ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip.
+ """
+
+ def __init__(self, failfast=False):
+ # startTestRun resets all attributes, and older clients don't know to
+ # call startTestRun, so it is called once here.
+ # Because subclasses may reasonably not expect this, we call the
+ # specific version we want to run.
+ self.failfast = failfast
+ TestResult.startTestRun(self)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ """Called when a test has failed in an expected manner.
+
+ Like with addSuccess and addError, testStopped should still be called.
+
+ :param test: The test that has been skipped.
+ :param err: The exc_info of the error that was raised.
+ :return: None
+ """
+ # This is the python 2.7 implementation
+ self.expectedFailures.append(
+ (test, self._err_details_to_string(test, err, details)))
+
+ def addError(self, test, err=None, details=None):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info().
+
+ :param details: Alternative way to supply details about the outcome.
+ see the class docstring for more information.
+ """
+ self.errors.append((test,
+ self._err_details_to_string(test, err, details)))
+ if self.failfast:
+ self.stop()
+
+ def addFailure(self, test, err=None, details=None):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info().
+
+ :param details: Alternative way to supply details about the outcome.
+ see the class docstring for more information.
+ """
+ self.failures.append((test,
+ self._err_details_to_string(test, err, details)))
+ if self.failfast:
+ self.stop()
+
+ def addSkip(self, test, reason=None, details=None):
+ """Called when a test has been skipped rather than running.
+
+ Like with addSuccess and addError, testStopped should still be called.
+
+ This must be called by the TestCase. 'addError' and 'addFailure' will
+ not call addSkip, since they have no assumptions about the kind of
+ errors that a test can raise.
+
+ :param test: The test that has been skipped.
+ :param reason: The reason for the test being skipped. For instance,
+ u"pyGL is not available".
+ :param details: Alternative way to supply details about the outcome.
+ see the class docstring for more information.
+ :return: None
+ """
+ if reason is None:
+ reason = details.get('reason')
+ if reason is None:
+ reason = 'No reason given'
+ else:
+ reason = reason.as_text()
+ skip_list = self.skip_reasons.setdefault(reason, [])
+ skip_list.append(test)
+
+ def addSuccess(self, test, details=None):
+ """Called when a test succeeded."""
+
+ def addUnexpectedSuccess(self, test, details=None):
+ """Called when a test was expected to fail, but succeed."""
+ self.unexpectedSuccesses.append(test)
+ if self.failfast:
+ self.stop()
+
+ def wasSuccessful(self):
+ """Has this result been successful so far?
+
+ If there have been any errors, failures or unexpected successes,
+ return False. Otherwise, return True.
+
+ Note: This differs from standard unittest in that we consider
+ unexpected successes to be equivalent to failures, rather than
+ successes.
+ """
+ return not (self.errors or self.failures or self.unexpectedSuccesses)
+
+ def _err_details_to_string(self, test, err=None, details=None):
+ """Convert an error in exc_info form or a contents dict to a string."""
+ if err is not None:
+ return TracebackContent(err, test).as_text()
+ return _details_to_str(details, special='traceback')
+
+ def _exc_info_to_unicode(self, err, test):
+ # Deprecated. Only present because subunit upcalls to it. See
+ # <https://bugs.launchpad.net/testtools/+bug/929063>.
+ return TracebackContent(err, test).as_text()
+
+ def _now(self):
+ """Return the current 'test time'.
+
+ If the time() method has not been called, this is equivalent to
+ datetime.now(), otherwise its the last supplied datestamp given to the
+ time() method.
+ """
+ if self.__now is None:
+ return datetime.datetime.now(utc)
+ else:
+ return self.__now
+
+ def startTestRun(self):
+ """Called before a test run starts.
+
+ New in Python 2.7. The testtools version resets the result to a
+ pristine condition ready for use in another test run. Note that this
+ is different from Python 2.7's startTestRun, which does nothing.
+ """
+ # failfast is reset by the super __init__, so stash it.
+ failfast = self.failfast
+ super(TestResult, self).__init__()
+ self.skip_reasons = {}
+ self.__now = None
+ self._tags = TagContext()
+ # -- Start: As per python 2.7 --
+ self.expectedFailures = []
+ self.unexpectedSuccesses = []
+ self.failfast = failfast
+ # -- End: As per python 2.7 --
+
+ def stopTestRun(self):
+ """Called after a test run completes
+
+ New in python 2.7
+ """
+
+ def startTest(self, test):
+ super(TestResult, self).startTest(test)
+ self._tags = TagContext(self._tags)
+
+ def stopTest(self, test):
+ self._tags = self._tags.parent
+ super(TestResult, self).stopTest(test)
+
+ @property
+ def current_tags(self):
+ """The currently set tags."""
+ return self._tags.get_current_tags()
+
+ def tags(self, new_tags, gone_tags):
+ """Add and remove tags from the test.
+
+ :param new_tags: A set of tags to be added to the stream.
+ :param gone_tags: A set of tags to be removed from the stream.
+ """
+ self._tags.change_tags(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ """Provide a timestamp to represent the current time.
+
+ This is useful when test activity is time delayed, or happening
+ concurrently and getting the system time between API calls will not
+ accurately represent the duration of tests (or the whole run).
+
+ Calling time() sets the datetime used by the TestResult object.
+ Time is permitted to go backwards when using this call.
+
+ :param a_datetime: A datetime.datetime object with TZ information or
+ None to reset the TestResult to gathering time from the system.
+ """
+ self.__now = a_datetime
+
+ def done(self):
+ """Called when the test runner is done.
+
+ deprecated in favour of stopTestRun.
+ """
+
+
+class StreamResult(object):
+ """A test result for reporting the activity of a test run.
+
+ Typical use
+ -----------
+
+ >>> result = StreamResult()
+ >>> result.startTestRun()
+ >>> try:
+ ... case.run(result)
+ ... finally:
+ ... result.stopTestRun()
+
+ The case object will be either a TestCase or a TestSuite, and
+ generally make a sequence of calls like::
+
+ >>> result.status(self.id(), 'inprogress')
+ >>> result.status(self.id(), 'success')
+
+ General concepts
+ ----------------
+
+ StreamResult is built to process events that are emitted by tests during a
+ test run or test enumeration. The test run may be running concurrently, and
+ even be spread out across multiple machines.
+
+ All events are timestamped to prevent network buffering or scheduling
+ latency causing false timing reports. Timestamps are datetime objects in
+ the UTC timezone.
+
+ A route_code is a unicode string that identifies where a particular test
+ run. This is optional in the API but very useful when multiplexing multiple
+ streams together as it allows identification of interactions between tests
+ that were run on the same hardware or in the same test process. Generally
+ actual tests never need to bother with this - it is added and processed
+ by StreamResult's that do multiplexing / run analysis. route_codes are
+ also used to route stdin back to pdb instances.
+
+ The StreamResult base class does no accounting or processing, rather it
+ just provides an empty implementation of every method, suitable for use
+ as a base class regardless of intent.
+ """
+
+ def startTestRun(self):
+ """Start a test run.
+
+ This will prepare the test result to process results (which might imply
+ connecting to a database or remote machine).
+ """
+
+ def stopTestRun(self):
+ """Stop a test run.
+
+ This informs the result that no more test updates will be received. At
+ this point any test ids that have started and not completed can be
+ considered failed-or-hung.
+ """
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ """Inform the result about a test status.
+
+ :param test_id: The test whose status is being reported. None to
+ report status about the test run as a whole.
+ :param test_status: The status for the test. There are two sorts of
+ status - interim and final status events. As many interim events
+ can be generated as desired, but only one final event. After a
+ final status event any further file or status events from the
+ same test_id+route_code may be discarded or associated with a new
+ test by the StreamResult. (But no exception will be thrown).
+
+ Interim states:
+ * None - no particular status is being reported, or status being
+ reported is not associated with a test (e.g. when reporting on
+ stdout / stderr chatter).
+ * inprogress - the test is currently running. Emitted by tests when
+ they start running and at any intermediary point they might
+ choose to indicate their continual operation.
+
+ Final states:
+ * exists - the test exists. This is used when a test is not being
+ executed. Typically this is when querying what tests could be run
+ in a test run (which is useful for selecting tests to run).
+ * xfail - the test failed but that was expected. This is purely
+ informative - the test is not considered to be a failure.
+ * uxsuccess - the test passed but was expected to fail. The test
+ will be considered a failure.
+ * success - the test has finished without error.
+ * fail - the test failed (or errored). The test will be considered
+ a failure.
+ * skip - the test was selected to run but chose to be skipped. E.g.
+ a test dependency was missing. This is purely informative - the
+ test is not considered to be a failure.
+
+ :param test_tags: Optional set of tags to apply to the test. Tags
+ have no intrinsic meaning - that is up to the test author.
+ :param runnable: Allows status reports to mark that they are for
+ tests which are not able to be explicitly run. For instance,
+ subtests will report themselves as non-runnable.
+ :param file_name: The name for the file_bytes. Any unicode string may
+ be used. While there is no semantic value attached to the name
+ of any attachment, the names 'stdout' and 'stderr' and 'traceback'
+ are recommended for use only for output sent to stdout, stderr and
+ tracebacks of exceptions. When file_name is supplied, file_bytes
+ must be a bytes instance.
+ :param file_bytes: A bytes object containing content for the named
+ file. This can just be a single chunk of the file - emitting
+ another file event with more later. Must be None unleses a
+ file_name is supplied.
+ :param eof: True if this chunk is the last chunk of the file, any
+ additional chunks with the same name should be treated as an error
+ and discarded. Ignored unless file_name has been supplied.
+ :param mime_type: An optional MIME type for the file. stdout and
+ stderr will generally be "text/plain; charset=utf8". If None,
+ defaults to application/octet-stream. Ignored unless file_name
+ has been supplied.
+ """
+
+
+def domap(*args, **kwargs):
+ return list(map(*args, **kwargs))
+
+
+class CopyStreamResult(StreamResult):
+ """Copies all event it receives to multiple results.
+
+ This provides an easy facility for combining multiple StreamResults.
+
+ For TestResult the equivalent class was ``MultiTestResult``.
+ """
+
+ def __init__(self, targets):
+ super(CopyStreamResult, self).__init__()
+ self.targets = targets
+
+ def startTestRun(self):
+ super(CopyStreamResult, self).startTestRun()
+ domap(methodcaller('startTestRun'), self.targets)
+
+ def stopTestRun(self):
+ super(CopyStreamResult, self).stopTestRun()
+ domap(methodcaller('stopTestRun'), self.targets)
+
+ def status(self, *args, **kwargs):
+ super(CopyStreamResult, self).status(*args, **kwargs)
+ domap(methodcaller('status', *args, **kwargs), self.targets)
+
+
+class StreamFailFast(StreamResult):
+ """Call the supplied callback if an error is seen in a stream.
+
+ An example callback::
+
+ def do_something():
+ pass
+ """
+
+ def __init__(self, on_error):
+ self.on_error = on_error
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ if test_status in ('uxsuccess', 'fail'):
+ self.on_error()
+
+
+class StreamResultRouter(StreamResult):
+ """A StreamResult that routes events.
+
+ StreamResultRouter forwards received events to another StreamResult object,
+ selected by a dynamic forwarding policy. Events where no destination is
+ found are forwarded to the fallback StreamResult, or an error is raised.
+
+ Typical use is to construct a router with a fallback and then either
+ create up front mapping rules, or create them as-needed from the fallback
+ handler::
+
+ >>> router = StreamResultRouter()
+ >>> sink = doubles.StreamResult()
+ >>> router.add_rule(sink, 'route_code_prefix', route_prefix='0',
+ ... consume_route=True)
+ >>> router.status(test_id='foo', route_code='0/1', test_status='uxsuccess')
+
+ StreamResultRouter has no buffering.
+
+ When adding routes (and for the fallback) whether to call startTestRun and
+ stopTestRun or to not call them is controllable by passing
+ 'do_start_stop_run'. The default is to call them for the fallback only.
+ If a route is added after startTestRun has been called, and
+ do_start_stop_run is True then startTestRun is called immediately on the
+ new route sink.
+
+ There is no a-priori defined lookup order for routes: if they are ambiguous
+ the behaviour is undefined. Only a single route is chosen for any event.
+ """
+
+ _policies = {}
+
+ def __init__(self, fallback=None, do_start_stop_run=True):
+ """Construct a StreamResultRouter with optional fallback.
+
+ :param fallback: A StreamResult to forward events to when no route
+ exists for them.
+ :param do_start_stop_run: If False do not pass startTestRun and
+ stopTestRun onto the fallback.
+ """
+ self.fallback = fallback
+ self._route_code_prefixes = {}
+ self._test_ids = {}
+ # Records sinks that should have do_start_stop_run called on them.
+ self._sinks = []
+ if do_start_stop_run and fallback:
+ self._sinks.append(fallback)
+ self._in_run = False
+
+ def startTestRun(self):
+ super(StreamResultRouter, self).startTestRun()
+ for sink in self._sinks:
+ sink.startTestRun()
+ self._in_run = True
+
+ def stopTestRun(self):
+ super(StreamResultRouter, self).stopTestRun()
+ for sink in self._sinks:
+ sink.stopTestRun()
+ self._in_run = False
+
+ def status(self, **kwargs):
+ route_code = kwargs.get('route_code', None)
+ test_id = kwargs.get('test_id', None)
+ if route_code is not None:
+ prefix = route_code.split('/')[0]
+ else:
+ prefix = route_code
+ if prefix in self._route_code_prefixes:
+ target, consume_route = self._route_code_prefixes[prefix]
+ if route_code is not None and consume_route:
+ route_code = route_code[len(prefix) + 1:]
+ if not route_code:
+ route_code = None
+ kwargs['route_code'] = route_code
+ elif test_id in self._test_ids:
+ target = self._test_ids[test_id]
+ else:
+ target = self.fallback
+ target.status(**kwargs)
+
+ def add_rule(self, sink, policy, do_start_stop_run=False, **policy_args):
+ """Add a rule to route events to sink when they match a given policy.
+
+ :param sink: A StreamResult to receive events.
+ :param policy: A routing policy. Valid policies are
+ 'route_code_prefix' and 'test_id'.
+ :param do_start_stop_run: If True then startTestRun and stopTestRun
+ events will be passed onto this sink.
+
+ :raises: ValueError if the policy is unknown
+ :raises: TypeError if the policy is given arguments it cannot handle.
+
+ ``route_code_prefix`` routes events based on a prefix of the route
+ code in the event. It takes a ``route_prefix`` argument to match on
+ (e.g. '0') and a ``consume_route`` argument, which, if True, removes
+ the prefix from the ``route_code`` when forwarding events.
+
+ ``test_id`` routes events based on the test id. It takes a single
+ argument, ``test_id``. Use ``None`` to select non-test events.
+ """
+ policy_method = StreamResultRouter._policies.get(policy, None)
+ if not policy_method:
+ raise ValueError("bad policy %r" % (policy,))
+ policy_method(self, sink, **policy_args)
+ if do_start_stop_run:
+ self._sinks.append(sink)
+ if self._in_run:
+ sink.startTestRun()
+
+ def _map_route_code_prefix(self, sink, route_prefix, consume_route=False):
+ if '/' in route_prefix:
+ raise TypeError(
+ "%r is more than one route step long" % (route_prefix,))
+ self._route_code_prefixes[route_prefix] = (sink, consume_route)
+ _policies['route_code_prefix'] = _map_route_code_prefix
+
+ def _map_test_id(self, sink, test_id):
+ self._test_ids[test_id] = sink
+ _policies['test_id'] = _map_test_id
+
+
+class StreamTagger(CopyStreamResult):
+ """Adds or discards tags from StreamResult events."""
+
+ def __init__(self, targets, add=None, discard=None):
+ """Create a StreamTagger.
+
+ :param targets: A list of targets to forward events onto.
+ :param add: Either None or an iterable of tags to add to each event.
+ :param discard: Either None or an iterable of tags to discard from each
+ event.
+ """
+ super(StreamTagger, self).__init__(targets)
+ self.add = frozenset(add or ())
+ self.discard = frozenset(discard or ())
+
+ def status(self, *args, **kwargs):
+ test_tags = kwargs.get('test_tags') or set()
+ test_tags.update(self.add)
+ test_tags.difference_update(self.discard)
+ kwargs['test_tags'] = test_tags or None
+ super(StreamTagger, self).status(*args, **kwargs)
+
+
+class StreamToDict(StreamResult):
+ """A specialised StreamResult that emits a callback as tests complete.
+
+ Top level file attachments are simply discarded. Hung tests are detected
+ by stopTestRun and notified there and then.
+
+ The callback is passed a dict with the following keys:
+
+ * id: the test id.
+ * tags: The tags for the test. A set of unicode strings.
+ * details: A dict of file attachments - ``testtools.content.Content``
+ objects.
+ * status: One of the StreamResult status codes (including inprogress) or
+ 'unknown' (used if only file events for a test were received...)
+ * timestamps: A pair of timestamps - the first one received with this
+ test id, and the one in the event that triggered the notification.
+ Hung tests have a None for the second end event. Timestamps are not
+ compared - their ordering is purely order received in the stream.
+
+ Only the most recent tags observed in the stream are reported.
+ """
+
+ def __init__(self, on_test):
+ """Create a StreamToDict calling on_test on test completions.
+
+ :param on_test: A callback that accepts one parameter - a dict
+ describing a test.
+ """
+ super(StreamToDict, self).__init__()
+ self.on_test = on_test
+ if parse_mime_type is None:
+ raise ImportError("mimeparse module missing.")
+
+ def startTestRun(self):
+ super(StreamToDict, self).startTestRun()
+ self._inprogress = {}
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ super(StreamToDict, self).status(test_id, test_status,
+ test_tags=test_tags, runnable=runnable, file_name=file_name,
+ file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+ route_code=route_code, timestamp=timestamp)
+ key = self._ensure_key(test_id, route_code, timestamp)
+ # update fields
+ if not key:
+ return
+ if test_status is not None:
+ self._inprogress[key]['status'] = test_status
+ self._inprogress[key]['timestamps'][1] = timestamp
+ case = self._inprogress[key]
+ if file_name is not None:
+ if file_name not in case['details']:
+ if mime_type is None:
+ mime_type = 'application/octet-stream'
+ primary, sub, parameters = parse_mime_type(mime_type)
+ if 'charset' in parameters:
+ if ',' in parameters['charset']:
+ # testtools was emitting a bad encoding, workaround it,
+ # Though this does lose data - probably want to drop
+ # this in a few releases.
+ parameters['charset'] = parameters['charset'][
+ :parameters['charset'].find(',')]
+ content_type = ContentType(primary, sub, parameters)
+ content_bytes = []
+ case['details'][file_name] = Content(
+ content_type, lambda:content_bytes)
+ case['details'][file_name].iter_bytes().append(file_bytes)
+ if test_tags is not None:
+ self._inprogress[key]['tags'] = test_tags
+ # notify completed tests.
+ if test_status not in (None, 'inprogress'):
+ self.on_test(self._inprogress.pop(key))
+
+ def stopTestRun(self):
+ super(StreamToDict, self).stopTestRun()
+ while self._inprogress:
+ case = self._inprogress.popitem()[1]
+ case['timestamps'][1] = None
+ self.on_test(case)
+
+ def _ensure_key(self, test_id, route_code, timestamp):
+ if test_id is None:
+ return
+ key = (test_id, route_code)
+ if key not in self._inprogress:
+ self._inprogress[key] = {
+ 'id': test_id,
+ 'tags': set(),
+ 'details': {},
+ 'status': 'unknown',
+ 'timestamps': [timestamp, None]}
+ return key
+
+
+_status_map = {
+ 'inprogress': 'addFailure',
+ 'unknown': 'addFailure',
+ 'success': 'addSuccess',
+ 'skip': 'addSkip',
+ 'fail': 'addFailure',
+ 'xfail': 'addExpectedFailure',
+ 'uxsuccess': 'addUnexpectedSuccess',
+ }
+
+
+def test_dict_to_case(test_dict):
+ """Convert a test dict into a TestCase object.
+
+ :param test_dict: A test dict as generated by StreamToDict.
+ :return: A PlaceHolder test object.
+ """
+ # Circular import.
+ global PlaceHolder
+ if PlaceHolder is None:
+ from testtools.testcase import PlaceHolder
+ outcome = _status_map[test_dict['status']]
+ return PlaceHolder(test_dict['id'], outcome=outcome,
+ details=test_dict['details'], tags=test_dict['tags'],
+ timestamps=test_dict['timestamps'])
+
+
+class StreamSummary(StreamToDict):
+ """A specialised StreamResult that summarises a stream.
+
+ The summary uses the same representation as the original
+ unittest.TestResult contract, allowing it to be consumed by any test
+ runner.
+ """
+
+ def __init__(self):
+ super(StreamSummary, self).__init__(self._gather_test)
+ self._handle_status = {
+ 'success': self._success,
+ 'skip': self._skip,
+ 'exists': self._exists,
+ 'fail': self._fail,
+ 'xfail': self._xfail,
+ 'uxsuccess': self._uxsuccess,
+ 'unknown': self._incomplete,
+ 'inprogress': self._incomplete,
+ }
+
+ def startTestRun(self):
+ super(StreamSummary, self).startTestRun()
+ self.failures = []
+ self.errors = []
+ self.testsRun = 0
+ self.skipped = []
+ self.expectedFailures = []
+ self.unexpectedSuccesses = []
+
+ def wasSuccessful(self):
+ """Return False if any failure has occured.
+
+ Note that incomplete tests can only be detected when stopTestRun is
+ called, so that should be called before checking wasSuccessful.
+ """
+ return (not self.failures and not self.errors)
+
+ def _gather_test(self, test_dict):
+ if test_dict['status'] == 'exists':
+ return
+ self.testsRun += 1
+ case = test_dict_to_case(test_dict)
+ self._handle_status[test_dict['status']](case)
+
+ def _incomplete(self, case):
+ self.errors.append((case, "Test did not complete"))
+
+ def _success(self, case):
+ pass
+
+ def _skip(self, case):
+ if 'reason' not in case._details:
+ reason = "Unknown"
+ else:
+ reason = case._details['reason'].as_text()
+ self.skipped.append((case, reason))
+
+ def _exists(self, case):
+ pass
+
+ def _fail(self, case):
+ message = _details_to_str(case._details, special="traceback")
+ self.errors.append((case, message))
+
+ def _xfail(self, case):
+ message = _details_to_str(case._details, special="traceback")
+ self.expectedFailures.append((case, message))
+
+ def _uxsuccess(self, case):
+ case._outcome = 'addUnexpectedSuccess'
+ self.unexpectedSuccesses.append(case)
+
+
+class TestControl(object):
+ """Controls a running test run, allowing it to be interrupted.
+
+ :ivar shouldStop: If True, tests should not run and should instead
+ return immediately. Similarly a TestSuite should check this between
+ each test and if set stop dispatching any new tests and return.
+ """
+
+ def __init__(self):
+ super(TestControl, self).__init__()
+ self.shouldStop = False
+
+ def stop(self):
+ """Indicate that tests should stop running."""
+ self.shouldStop = True
+
+
+class MultiTestResult(TestResult):
+ """A test result that dispatches to many test results."""
+
+ def __init__(self, *results):
+ # Setup _results first, as the base class __init__ assigns to failfast.
+ self._results = list(map(ExtendedToOriginalDecorator, results))
+ super(MultiTestResult, self).__init__()
+
+ def __repr__(self):
+ return '<%s (%s)>' % (
+ self.__class__.__name__, ', '.join(map(repr, self._results)))
+
+ def _dispatch(self, message, *args, **kwargs):
+ return tuple(
+ getattr(result, message)(*args, **kwargs)
+ for result in self._results)
+
+ def _get_failfast(self):
+ return getattr(self._results[0], 'failfast', False)
+ def _set_failfast(self, value):
+ self._dispatch('__setattr__', 'failfast', value)
+ failfast = property(_get_failfast, _set_failfast)
+
+ def _get_shouldStop(self):
+ return any(self._dispatch('__getattr__', 'shouldStop'))
+ def _set_shouldStop(self, value):
+ # Called because we subclass TestResult. Probably should not do that.
+ pass
+ shouldStop = property(_get_shouldStop, _set_shouldStop)
+
+ def startTest(self, test):
+ super(MultiTestResult, self).startTest(test)
+ return self._dispatch('startTest', test)
+
+ def stop(self):
+ return self._dispatch('stop')
+
+ def stopTest(self, test):
+ super(MultiTestResult, self).stopTest(test)
+ return self._dispatch('stopTest', test)
+
+ def addError(self, test, error=None, details=None):
+ return self._dispatch('addError', test, error, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ return self._dispatch(
+ 'addExpectedFailure', test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ return self._dispatch('addFailure', test, err, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ return self._dispatch('addSkip', test, reason, details=details)
+
+ def addSuccess(self, test, details=None):
+ return self._dispatch('addSuccess', test, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ return self._dispatch('addUnexpectedSuccess', test, details=details)
+
+ def startTestRun(self):
+ super(MultiTestResult, self).startTestRun()
+ return self._dispatch('startTestRun')
+
+ def stopTestRun(self):
+ return self._dispatch('stopTestRun')
+
+ def tags(self, new_tags, gone_tags):
+ super(MultiTestResult, self).tags(new_tags, gone_tags)
+ return self._dispatch('tags', new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ return self._dispatch('time', a_datetime)
+
+ def done(self):
+ return self._dispatch('done')
+
+ def wasSuccessful(self):
+ """Was this result successful?
+
+ Only returns True if every constituent result was successful.
+ """
+ return all(self._dispatch('wasSuccessful'))
+
+
+class TextTestResult(TestResult):
+ """A TestResult which outputs activity to a text stream."""
+
+ def __init__(self, stream, failfast=False):
+ """Construct a TextTestResult writing to stream."""
+ super(TextTestResult, self).__init__(failfast=failfast)
+ self.stream = stream
+ self.sep1 = '=' * 70 + '\n'
+ self.sep2 = '-' * 70 + '\n'
+
+ def _delta_to_float(self, a_timedelta):
+ return (a_timedelta.days * 86400.0 + a_timedelta.seconds +
+ a_timedelta.microseconds / 1000000.0)
+
+ def _show_list(self, label, error_list):
+ for test, output in error_list:
+ self.stream.write(self.sep1)
+ self.stream.write("%s: %s\n" % (label, test.id()))
+ self.stream.write(self.sep2)
+ self.stream.write(output)
+
+ def startTestRun(self):
+ super(TextTestResult, self).startTestRun()
+ self.__start = self._now()
+ self.stream.write("Tests running...\n")
+
+ def stopTestRun(self):
+ if self.testsRun != 1:
+ plural = 's'
+ else:
+ plural = ''
+ stop = self._now()
+ self._show_list('ERROR', self.errors)
+ self._show_list('FAIL', self.failures)
+ for test in self.unexpectedSuccesses:
+ self.stream.write(
+ "%sUNEXPECTED SUCCESS: %s\n%s" % (
+ self.sep1, test.id(), self.sep2))
+ self.stream.write("\nRan %d test%s in %.3fs\n" %
+ (self.testsRun, plural,
+ self._delta_to_float(stop - self.__start)))
+ if self.wasSuccessful():
+ self.stream.write("OK\n")
+ else:
+ self.stream.write("FAILED (")
+ details = []
+ details.append("failures=%d" % (
+ sum(map(len, (
+ self.failures, self.errors, self.unexpectedSuccesses)))))
+ self.stream.write(", ".join(details))
+ self.stream.write(")\n")
+ super(TextTestResult, self).stopTestRun()
+
+
+class ThreadsafeForwardingResult(TestResult):
+ """A TestResult which ensures the target does not receive mixed up calls.
+
+ Multiple ``ThreadsafeForwardingResults`` can forward to the same target
+ result, and that target result will only ever receive the complete set of
+ events for one test at a time.
+
+ This is enforced using a semaphore, which further guarantees that tests
+ will be sent atomically even if the ``ThreadsafeForwardingResults`` are in
+ different threads.
+
+ ``ThreadsafeForwardingResult`` is typically used by
+ ``ConcurrentTestSuite``, which creates one ``ThreadsafeForwardingResult``
+ per thread, each of which wraps of the TestResult that
+ ``ConcurrentTestSuite.run()`` is called with.
+
+ target.startTestRun() and target.stopTestRun() are called once for each
+ ThreadsafeForwardingResult that forwards to the same target. If the target
+ takes special action on these events, it should take care to accommodate
+ this.
+
+ time() and tags() calls are batched to be adjacent to the test result and
+ in the case of tags() are coerced into test-local scope, avoiding the
+ opportunity for bugs around global state in the target.
+ """
+
+ def __init__(self, target, semaphore):
+ """Create a ThreadsafeForwardingResult forwarding to target.
+
+ :param target: A ``TestResult``.
+ :param semaphore: A ``threading.Semaphore`` with limit 1.
+ """
+ TestResult.__init__(self)
+ self.result = ExtendedToOriginalDecorator(target)
+ self.semaphore = semaphore
+ self._test_start = None
+ self._global_tags = set(), set()
+ self._test_tags = set(), set()
+
+ def __repr__(self):
+ return '<%s %r>' % (self.__class__.__name__, self.result)
+
+ def _any_tags(self, tags):
+ return bool(tags[0] or tags[1])
+
+ def _add_result_with_semaphore(self, method, test, *args, **kwargs):
+ now = self._now()
+ self.semaphore.acquire()
+ try:
+ self.result.time(self._test_start)
+ self.result.startTest(test)
+ self.result.time(now)
+ if self._any_tags(self._global_tags):
+ self.result.tags(*self._global_tags)
+ if self._any_tags(self._test_tags):
+ self.result.tags(*self._test_tags)
+ self._test_tags = set(), set()
+ try:
+ method(test, *args, **kwargs)
+ finally:
+ self.result.stopTest(test)
+ finally:
+ self.semaphore.release()
+ self._test_start = None
+
+ def addError(self, test, err=None, details=None):
+ self._add_result_with_semaphore(self.result.addError,
+ test, err, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._add_result_with_semaphore(self.result.addExpectedFailure,
+ test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ self._add_result_with_semaphore(self.result.addFailure,
+ test, err, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ self._add_result_with_semaphore(self.result.addSkip,
+ test, reason, details=details)
+
+ def addSuccess(self, test, details=None):
+ self._add_result_with_semaphore(self.result.addSuccess,
+ test, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._add_result_with_semaphore(self.result.addUnexpectedSuccess,
+ test, details=details)
+
+ def progress(self, offset, whence):
+ pass
+
+ def startTestRun(self):
+ super(ThreadsafeForwardingResult, self).startTestRun()
+ self.semaphore.acquire()
+ try:
+ self.result.startTestRun()
+ finally:
+ self.semaphore.release()
+
+ def _get_shouldStop(self):
+ self.semaphore.acquire()
+ try:
+ return self.result.shouldStop
+ finally:
+ self.semaphore.release()
+ def _set_shouldStop(self, value):
+ # Another case where we should not subclass TestResult
+ pass
+ shouldStop = property(_get_shouldStop, _set_shouldStop)
+
+ def stop(self):
+ self.semaphore.acquire()
+ try:
+ self.result.stop()
+ finally:
+ self.semaphore.release()
+
+ def stopTestRun(self):
+ self.semaphore.acquire()
+ try:
+ self.result.stopTestRun()
+ finally:
+ self.semaphore.release()
+
+ def done(self):
+ self.semaphore.acquire()
+ try:
+ self.result.done()
+ finally:
+ self.semaphore.release()
+
+ def startTest(self, test):
+ self._test_start = self._now()
+ super(ThreadsafeForwardingResult, self).startTest(test)
+
+ def wasSuccessful(self):
+ return self.result.wasSuccessful()
+
+ def tags(self, new_tags, gone_tags):
+ """See `TestResult`."""
+ super(ThreadsafeForwardingResult, self).tags(new_tags, gone_tags)
+ if self._test_start is not None:
+ self._test_tags = _merge_tags(
+ self._test_tags, (new_tags, gone_tags))
+ else:
+ self._global_tags = _merge_tags(
+ self._global_tags, (new_tags, gone_tags))
+
+
+def _merge_tags(existing, changed):
+ new_tags, gone_tags = changed
+ result_new = set(existing[0])
+ result_gone = set(existing[1])
+ result_new.update(new_tags)
+ result_new.difference_update(gone_tags)
+ result_gone.update(gone_tags)
+ result_gone.difference_update(new_tags)
+ return result_new, result_gone
+
+
+class ExtendedToOriginalDecorator(object):
+ """Permit new TestResult API code to degrade gracefully with old results.
+
+ This decorates an existing TestResult and converts missing outcomes
+ such as addSkip to older outcomes such as addSuccess. It also supports
+ the extended details protocol. In all cases the most recent protocol
+ is attempted first, and fallbacks only occur when the decorated result
+ does not support the newer style of calling.
+ """
+
+ def __init__(self, decorated):
+ self.decorated = decorated
+ self._tags = TagContext()
+ # Only used for old TestResults that do not have failfast.
+ self._failfast = False
+
+ def __repr__(self):
+ return '<%s %r>' % (self.__class__.__name__, self.decorated)
+
+ def __getattr__(self, name):
+ return getattr(self.decorated, name)
+
+ def addError(self, test, err=None, details=None):
+ try:
+ self._check_args(err, details)
+ if details is not None:
+ try:
+ return self.decorated.addError(test, details=details)
+ except TypeError:
+ # have to convert
+ err = self._details_to_exc_info(details)
+ return self.decorated.addError(test, err)
+ finally:
+ if self.failfast:
+ self.stop()
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._check_args(err, details)
+ addExpectedFailure = getattr(
+ self.decorated, 'addExpectedFailure', None)
+ if addExpectedFailure is None:
+ return self.addSuccess(test)
+ if details is not None:
+ try:
+ return addExpectedFailure(test, details=details)
+ except TypeError:
+ # have to convert
+ err = self._details_to_exc_info(details)
+ return addExpectedFailure(test, err)
+
+ def addFailure(self, test, err=None, details=None):
+ try:
+ self._check_args(err, details)
+ if details is not None:
+ try:
+ return self.decorated.addFailure(test, details=details)
+ except TypeError:
+ # have to convert
+ err = self._details_to_exc_info(details)
+ return self.decorated.addFailure(test, err)
+ finally:
+ if self.failfast:
+ self.stop()
+
+ def addSkip(self, test, reason=None, details=None):
+ self._check_args(reason, details)
+ addSkip = getattr(self.decorated, 'addSkip', None)
+ if addSkip is None:
+ return self.decorated.addSuccess(test)
+ if details is not None:
+ try:
+ return addSkip(test, details=details)
+ except TypeError:
+ # extract the reason if it's available
+ try:
+ reason = details['reason'].as_text()
+ except KeyError:
+ reason = _details_to_str(details)
+ return addSkip(test, reason)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ try:
+ outcome = getattr(self.decorated, 'addUnexpectedSuccess', None)
+ if outcome is None:
+ try:
+ test.fail("")
+ except test.failureException:
+ return self.addFailure(test, sys.exc_info())
+ if details is not None:
+ try:
+ return outcome(test, details=details)
+ except TypeError:
+ pass
+ return outcome(test)
+ finally:
+ if self.failfast:
+ self.stop()
+
+ def addSuccess(self, test, details=None):
+ if details is not None:
+ try:
+ return self.decorated.addSuccess(test, details=details)
+ except TypeError:
+ pass
+ return self.decorated.addSuccess(test)
+
+ def _check_args(self, err, details):
+ param_count = 0
+ if err is not None:
+ param_count += 1
+ if details is not None:
+ param_count += 1
+ if param_count != 1:
+ raise ValueError("Must pass only one of err '%s' and details '%s"
+ % (err, details))
+
+ def _details_to_exc_info(self, details):
+ """Convert a details dict to an exc_info tuple."""
+ return (
+ _StringException,
+ _StringException(_details_to_str(details, special='traceback')),
+ None)
+
+ @property
+ def current_tags(self):
+ return getattr(
+ self.decorated, 'current_tags', self._tags.get_current_tags())
+
+ def done(self):
+ try:
+ return self.decorated.done()
+ except AttributeError:
+ return
+
+ def _get_failfast(self):
+ return getattr(self.decorated, 'failfast', self._failfast)
+ def _set_failfast(self, value):
+ if safe_hasattr(self.decorated, 'failfast'):
+ self.decorated.failfast = value
+ else:
+ self._failfast = value
+ failfast = property(_get_failfast, _set_failfast)
+
+ def progress(self, offset, whence):
+ method = getattr(self.decorated, 'progress', None)
+ if method is None:
+ return
+ return method(offset, whence)
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def startTest(self, test):
+ self._tags = TagContext(self._tags)
+ return self.decorated.startTest(test)
+
+ def startTestRun(self):
+ self._tags = TagContext()
+ try:
+ return self.decorated.startTestRun()
+ except AttributeError:
+ return
+
+ def stop(self):
+ return self.decorated.stop()
+
+ def stopTest(self, test):
+ self._tags = self._tags.parent
+ return self.decorated.stopTest(test)
+
+ def stopTestRun(self):
+ try:
+ return self.decorated.stopTestRun()
+ except AttributeError:
+ return
+
+ def tags(self, new_tags, gone_tags):
+ method = getattr(self.decorated, 'tags', None)
+ if method is not None:
+ return method(new_tags, gone_tags)
+ else:
+ self._tags.change_tags(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ method = getattr(self.decorated, 'time', None)
+ if method is None:
+ return
+ return method(a_datetime)
+
+ def wasSuccessful(self):
+ return self.decorated.wasSuccessful()
+
+
+class ExtendedToStreamDecorator(CopyStreamResult, StreamSummary, TestControl):
+ """Permit using old TestResult API code with new StreamResult objects.
+
+ This decorates a StreamResult and converts old (Python 2.6 / 2.7 /
+ Extended) TestResult API calls into StreamResult calls.
+
+ It also supports regular StreamResult calls, making it safe to wrap around
+ any StreamResult.
+ """
+
+ def __init__(self, decorated):
+ super(ExtendedToStreamDecorator, self).__init__([decorated])
+ # Deal with mismatched base class constructors.
+ TestControl.__init__(self)
+ self._started = False
+
+ def _get_failfast(self):
+ return len(self.targets) == 2
+ def _set_failfast(self, value):
+ if value:
+ if len(self.targets) == 2:
+ return
+ self.targets.append(StreamFailFast(self.stop))
+ else:
+ del self.targets[1:]
+ failfast = property(_get_failfast, _set_failfast)
+
+ def startTest(self, test):
+ if not self._started:
+ self.startTestRun()
+ self.status(test_id=test.id(), test_status='inprogress', timestamp=self._now())
+ self._tags = TagContext(self._tags)
+
+ def stopTest(self, test):
+ self._tags = self._tags.parent
+
+ def addError(self, test, err=None, details=None):
+ self._check_args(err, details)
+ self._convert(test, err, details, 'fail')
+ addFailure = addError
+
+ def _convert(self, test, err, details, status, reason=None):
+ if not self._started:
+ self.startTestRun()
+ test_id = test.id()
+ now = self._now()
+ if err is not None:
+ if details is None:
+ details = {}
+ details['traceback'] = TracebackContent(err, test)
+ if details is not None:
+ for name, content in details.items():
+ mime_type = repr(content.content_type)
+ for file_bytes in content.iter_bytes():
+ self.status(file_name=name, file_bytes=file_bytes,
+ mime_type=mime_type, test_id=test_id, timestamp=now)
+ self.status(file_name=name, file_bytes=_b(""), eof=True,
+ mime_type=mime_type, test_id=test_id, timestamp=now)
+ if reason is not None:
+ self.status(file_name='reason', file_bytes=reason.encode('utf8'),
+ eof=True, mime_type="text/plain; charset=utf8",
+ test_id=test_id, timestamp=now)
+ self.status(test_id=test_id, test_status=status,
+ test_tags=self.current_tags, timestamp=now)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ self._check_args(err, details)
+ self._convert(test, err, details, 'xfail')
+
+ def addSkip(self, test, reason=None, details=None):
+ self._convert(test, None, details, 'skip', reason)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ self._convert(test, None, details, 'uxsuccess')
+
+ def addSuccess(self, test, details=None):
+ self._convert(test, None, details, 'success')
+
+ def _check_args(self, err, details):
+ param_count = 0
+ if err is not None:
+ param_count += 1
+ if details is not None:
+ param_count += 1
+ if param_count != 1:
+ raise ValueError("Must pass only one of err '%s' and details '%s"
+ % (err, details))
+
+ def startTestRun(self):
+ super(ExtendedToStreamDecorator, self).startTestRun()
+ self._tags = TagContext()
+ self.shouldStop = False
+ self.__now = None
+ self._started = True
+
+ def stopTest(self, test):
+ self._tags = self._tags.parent
+
+ @property
+ def current_tags(self):
+ """The currently set tags."""
+ return self._tags.get_current_tags()
+
+ def tags(self, new_tags, gone_tags):
+ """Add and remove tags from the test.
+
+ :param new_tags: A set of tags to be added to the stream.
+ :param gone_tags: A set of tags to be removed from the stream.
+ """
+ self._tags.change_tags(new_tags, gone_tags)
+
+ def _now(self):
+ """Return the current 'test time'.
+
+ If the time() method has not been called, this is equivalent to
+ datetime.now(), otherwise its the last supplied datestamp given to the
+ time() method.
+ """
+ if self.__now is None:
+ return datetime.datetime.now(utc)
+ else:
+ return self.__now
+
+ def time(self, a_datetime):
+ self.__now = a_datetime
+
+ def wasSuccessful(self):
+ if not self._started:
+ self.startTestRun()
+ return super(ExtendedToStreamDecorator, self).wasSuccessful()
+
+
+class StreamToExtendedDecorator(StreamResult):
+ """Convert StreamResult API calls into ExtendedTestResult calls.
+
+ This will buffer all calls for all concurrently active tests, and
+ then flush each test as they complete.
+
+ Incomplete tests will be flushed as errors when the test run stops.
+
+ Non test file attachments are accumulated into a test called
+ 'testtools.extradata' flushed at the end of the run.
+ """
+
+ def __init__(self, decorated):
+ # ExtendedToOriginalDecorator takes care of thunking details back to
+ # exceptions/reasons etc.
+ self.decorated = ExtendedToOriginalDecorator(decorated)
+ # StreamToDict buffers and gives us individual tests.
+ self.hook = StreamToDict(self._handle_tests)
+
+ def status(self, test_id=None, test_status=None, *args, **kwargs):
+ if test_status == 'exists':
+ return
+ self.hook.status(
+ test_id=test_id, test_status=test_status, *args, **kwargs)
+
+ def startTestRun(self):
+ self.decorated.startTestRun()
+ self.hook.startTestRun()
+
+ def stopTestRun(self):
+ self.hook.stopTestRun()
+ self.decorated.stopTestRun()
+
+ def _handle_tests(self, test_dict):
+ case = test_dict_to_case(test_dict)
+ case.run(self.decorated)
+
+
+class StreamToQueue(StreamResult):
+ """A StreamResult which enqueues events as a dict to a queue.Queue.
+
+ Events have their route code updated to include the route code
+ StreamToQueue was constructed with before they are submitted. If the event
+ route code is None, it is replaced with the StreamToQueue route code,
+ otherwise it is prefixed with the supplied code + a hyphen.
+
+ startTestRun and stopTestRun are forwarded to the queue. Implementors that
+ dequeue events back into StreamResult calls should take care not to call
+ startTestRun / stopTestRun on other StreamResult objects multiple times
+ (e.g. by filtering startTestRun and stopTestRun).
+
+ ``StreamToQueue`` is typically used by
+ ``ConcurrentStreamTestSuite``, which creates one ``StreamToQueue``
+ per thread, forwards status events to the the StreamResult that
+ ``ConcurrentStreamTestSuite.run()`` was called with, and uses the
+ stopTestRun event to trigger calling join() on the each thread.
+
+ Unlike ThreadsafeForwardingResult which this supercedes, no buffering takes
+ place - any event supplied to a StreamToQueue will be inserted into the
+ queue immediately.
+
+ Events are forwarded as a dict with a key ``event`` which is one of
+ ``startTestRun``, ``stopTestRun`` or ``status``. When ``event`` is
+ ``status`` the dict also has keys matching the keyword arguments
+ of ``StreamResult.status``, otherwise it has one other key ``result`` which
+ is the result that invoked ``startTestRun``.
+ """
+
+ def __init__(self, queue, routing_code):
+ """Create a StreamToQueue forwarding to target.
+
+ :param queue: A ``queue.Queue`` to receive events.
+ :param routing_code: The routing code to apply to messages.
+ """
+ super(StreamToQueue, self).__init__()
+ self.queue = queue
+ self.routing_code = routing_code
+
+ def startTestRun(self):
+ self.queue.put(dict(event='startTestRun', result=self))
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ self.queue.put(dict(event='status', test_id=test_id,
+ test_status=test_status, test_tags=test_tags, runnable=runnable,
+ file_name=file_name, file_bytes=file_bytes, eof=eof,
+ mime_type=mime_type, route_code=self.route_code(route_code),
+ timestamp=timestamp))
+
+ def stopTestRun(self):
+ self.queue.put(dict(event='stopTestRun', result=self))
+
+ def route_code(self, route_code):
+ """Adjust route_code on the way through."""
+ if route_code is None:
+ return self.routing_code
+ return self.routing_code + _u("/") + route_code
+
+
+class TestResultDecorator(object):
+ """General pass-through decorator.
+
+ This provides a base that other TestResults can inherit from to
+ gain basic forwarding functionality.
+ """
+
+ def __init__(self, decorated):
+ """Create a TestResultDecorator forwarding to decorated."""
+ self.decorated = decorated
+
+ def startTest(self, test):
+ return self.decorated.startTest(test)
+
+ def startTestRun(self):
+ return self.decorated.startTestRun()
+
+ def stopTest(self, test):
+ return self.decorated.stopTest(test)
+
+ def stopTestRun(self):
+ return self.decorated.stopTestRun()
+
+ def addError(self, test, err=None, details=None):
+ return self.decorated.addError(test, err, details=details)
+
+ def addFailure(self, test, err=None, details=None):
+ return self.decorated.addFailure(test, err, details=details)
+
+ def addSuccess(self, test, details=None):
+ return self.decorated.addSuccess(test, details=details)
+
+ def addSkip(self, test, reason=None, details=None):
+ return self.decorated.addSkip(test, reason, details=details)
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ return self.decorated.addExpectedFailure(test, err, details=details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ return self.decorated.addUnexpectedSuccess(test, details=details)
+
+ def progress(self, offset, whence):
+ return self.decorated.progress(offset, whence)
+
+ def wasSuccessful(self):
+ return self.decorated.wasSuccessful()
+
+ @property
+ def current_tags(self):
+ return self.decorated.current_tags
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def stop(self):
+ return self.decorated.stop()
+
+ @property
+ def testsRun(self):
+ return self.decorated.testsRun
+
+ def tags(self, new_tags, gone_tags):
+ return self.decorated.tags(new_tags, gone_tags)
+
+ def time(self, a_datetime):
+ return self.decorated.time(a_datetime)
+
+
+class Tagger(TestResultDecorator):
+ """Tag each test individually."""
+
+ def __init__(self, decorated, new_tags, gone_tags):
+ """Wrap 'decorated' such that each test is tagged.
+
+ :param new_tags: Tags to be added for each test.
+ :param gone_tags: Tags to be removed for each test.
+ """
+ super(Tagger, self).__init__(decorated)
+ self._new_tags = set(new_tags)
+ self._gone_tags = set(gone_tags)
+
+ def startTest(self, test):
+ super(Tagger, self).startTest(test)
+ self.tags(self._new_tags, self._gone_tags)
+
+
+class TestByTestResult(TestResult):
+ """Call something every time a test completes."""
+
+ def __init__(self, on_test):
+ """Construct a ``TestByTestResult``.
+
+ :param on_test: A callable that take a test case, a status (one of
+ "success", "failure", "error", "skip", or "xfail"), a start time
+ (a ``datetime`` with timezone), a stop time, an iterable of tags,
+ and a details dict. Is called at the end of each test (i.e. on
+ ``stopTest``) with the accumulated values for that test.
+ """
+ super(TestByTestResult, self).__init__()
+ self._on_test = on_test
+
+ def startTest(self, test):
+ super(TestByTestResult, self).startTest(test)
+ self._start_time = self._now()
+ # There's no supported (i.e. tested) behaviour that relies on these
+ # being set, but it makes me more comfortable all the same. -- jml
+ self._status = None
+ self._details = None
+ self._stop_time = None
+
+ def stopTest(self, test):
+ self._stop_time = self._now()
+ tags = set(self.current_tags)
+ super(TestByTestResult, self).stopTest(test)
+ self._on_test(
+ test=test,
+ status=self._status,
+ start_time=self._start_time,
+ stop_time=self._stop_time,
+ tags=tags,
+ details=self._details)
+
+ def _err_to_details(self, test, err, details):
+ if details:
+ return details
+ return {'traceback': TracebackContent(err, test)}
+
+ def addSuccess(self, test, details=None):
+ super(TestByTestResult, self).addSuccess(test)
+ self._status = 'success'
+ self._details = details
+
+ def addFailure(self, test, err=None, details=None):
+ super(TestByTestResult, self).addFailure(test, err, details)
+ self._status = 'failure'
+ self._details = self._err_to_details(test, err, details)
+
+ def addError(self, test, err=None, details=None):
+ super(TestByTestResult, self).addError(test, err, details)
+ self._status = 'error'
+ self._details = self._err_to_details(test, err, details)
+
+ def addSkip(self, test, reason=None, details=None):
+ super(TestByTestResult, self).addSkip(test, reason, details)
+ self._status = 'skip'
+ if details is None:
+ details = {'reason': text_content(reason)}
+ elif reason:
+ # XXX: What if details already has 'reason' key?
+ details['reason'] = text_content(reason)
+ self._details = details
+
+ def addExpectedFailure(self, test, err=None, details=None):
+ super(TestByTestResult, self).addExpectedFailure(test, err, details)
+ self._status = 'xfail'
+ self._details = self._err_to_details(test, err, details)
+
+ def addUnexpectedSuccess(self, test, details=None):
+ super(TestByTestResult, self).addUnexpectedSuccess(test, details)
+ self._status = 'success'
+ self._details = details
+
+
+class TimestampingStreamResult(CopyStreamResult):
+ """A StreamResult decorator that assigns a timestamp when none is present.
+
+ This is convenient for ensuring events are timestamped.
+ """
+
+ def __init__(self, target):
+ super(TimestampingStreamResult, self).__init__([target])
+
+ def status(self, *args, **kwargs):
+ timestamp = kwargs.pop('timestamp', None)
+ if timestamp is None:
+ timestamp = datetime.datetime.now(utc)
+ super(TimestampingStreamResult, self).status(
+ *args, timestamp=timestamp, **kwargs)
+
+
+class _StringException(Exception):
+ """An exception made from an arbitrary string."""
+
+ if not str_is_unicode:
+ def __init__(self, string):
+ if type(string) is not unicode:
+ raise TypeError("_StringException expects unicode, got %r" %
+ (string,))
+ Exception.__init__(self, string)
+
+ def __str__(self):
+ return self.args[0].encode("utf-8")
+
+ def __unicode__(self):
+ return self.args[0]
+ # For 3.0 and above the default __str__ is fine, so we don't define one.
+
+ def __hash__(self):
+ return id(self)
+
+ def __eq__(self, other):
+ try:
+ return self.args == other.args
+ except AttributeError:
+ return False
+
+
+def _format_text_attachment(name, text):
+ if '\n' in text:
+ return "%s: {{{\n%s\n}}}\n" % (name, text)
+ return "%s: {{{%s}}}" % (name, text)
+
+
+def _details_to_str(details, special=None):
+ """Convert a details dict to a string.
+
+ :param details: A dictionary mapping short names to ``Content`` objects.
+ :param special: If specified, an attachment that should have special
+ attention drawn to it. The primary attachment. Normally it's the
+ traceback that caused the test to fail.
+ :return: A formatted string that can be included in text test results.
+ """
+ empty_attachments = []
+ binary_attachments = []
+ text_attachments = []
+ special_content = None
+ # sorted is for testing, may want to remove that and use a dict
+ # subclass with defined order for items instead.
+ for key, content in sorted(details.items()):
+ if content.content_type.type != 'text':
+ binary_attachments.append((key, content.content_type))
+ continue
+ text = content.as_text().strip()
+ if not text:
+ empty_attachments.append(key)
+ continue
+ # We want the 'special' attachment to be at the bottom.
+ if key == special:
+ special_content = '%s\n' % (text,)
+ continue
+ text_attachments.append(_format_text_attachment(key, text))
+ if text_attachments and not text_attachments[-1].endswith('\n'):
+ text_attachments.append('')
+ if special_content:
+ text_attachments.append(special_content)
+ lines = []
+ if binary_attachments:
+ lines.append('Binary content:\n')
+ for name, content_type in binary_attachments:
+ lines.append(' %s (%s)\n' % (name, content_type))
+ if empty_attachments:
+ lines.append('Empty attachments:\n')
+ for name in empty_attachments:
+ lines.append(' %s\n' % (name,))
+ if (binary_attachments or empty_attachments) and text_attachments:
+ lines.append('\n')
+ lines.append('\n'.join(text_attachments))
+ return _u('').join(lines)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/__init__.py b/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py
index 1b1aa38a1f9..db215ff12f8 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/__init__.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/__init__.py
@@ -1,12 +1,14 @@
+# Copyright (c) 2008-2013 testtools developers. See LICENSE for details.
+
"""Tests for testtools itself."""
-# See README for copyright and licensing details.
from unittest import TestSuite
def test_suite():
from testtools.tests import (
+ matchers,
test_compat,
test_content,
test_content_type,
@@ -14,16 +16,17 @@ def test_suite():
test_distutilscmd,
test_fixturesupport,
test_helpers,
- test_matchers,
test_monkey,
test_run,
test_runtest,
test_spinner,
+ test_tags,
test_testcase,
test_testresult,
test_testsuite,
)
modules = [
+ matchers,
test_compat,
test_content,
test_content_type,
@@ -31,11 +34,11 @@ def test_suite():
test_distutilscmd,
test_fixturesupport,
test_helpers,
- test_matchers,
test_monkey,
test_run,
test_runtest,
test_spinner,
+ test_tags,
test_testcase,
test_testresult,
test_testsuite,
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/helpers.py b/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py
index 660cfecb72d..f766da33c9f 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/helpers.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/helpers.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
"""Helpers for tests."""
@@ -8,14 +8,16 @@ __all__ = [
import sys
+from extras import safe_hasattr
+
from testtools import TestResult
-from testtools.helpers import (
- safe_hasattr,
- try_import,
- )
+from testtools.content import StackLinesContent
from testtools import runtest
+# Importing to preserve compatibility.
+safe_hasattr
+
# GZ 2010-08-12: Don't do this, pointlessly creates an exc_info cycle
try:
raise Exception
@@ -35,6 +37,10 @@ class LoggingResult(TestResult):
self._events.append(('startTest', test))
super(LoggingResult, self).startTest(test)
+ def stop(self):
+ self._events.append('stop')
+ super(LoggingResult, self).stop()
+
def stopTest(self, test):
self._events.append(('stopTest', test))
super(LoggingResult, self).stopTest(test)
@@ -67,32 +73,22 @@ class LoggingResult(TestResult):
self._events.append('done')
super(LoggingResult, self).done()
+ def tags(self, new_tags, gone_tags):
+ self._events.append(('tags', new_tags, gone_tags))
+ super(LoggingResult, self).tags(new_tags, gone_tags)
+
def time(self, a_datetime):
self._events.append(('time', a_datetime))
super(LoggingResult, self).time(a_datetime)
def is_stack_hidden():
- return safe_hasattr(runtest, '__unittest')
+ return StackLinesContent.HIDE_INTERNAL_STACK
def hide_testtools_stack(should_hide=True):
- modules = [
- 'testtools.matchers',
- 'testtools.runtest',
- 'testtools.testcase',
- ]
- result = is_stack_hidden()
- for module_name in modules:
- module = try_import(module_name)
- if should_hide:
- setattr(module, '__unittest', True)
- else:
- try:
- delattr(module, '__unittest')
- except AttributeError:
- # Attribute already doesn't exist. Our work here is done.
- pass
+ result = StackLinesContent.HIDE_INTERNAL_STACK
+ StackLinesContent.HIDE_INTERNAL_STACK = should_hide
return result
@@ -104,8 +100,9 @@ def run_with_stack_hidden(should_hide, f, *args, **kwargs):
hide_testtools_stack(old_should_hide)
-
class FullStackRunTest(runtest.RunTest):
def _run_user(self, fn, *args, **kwargs):
- return run_with_stack_hidden(False, fn, *args, **kwargs)
+ return run_with_stack_hidden(
+ False,
+ super(FullStackRunTest, self)._run_user, fn, *args, **kwargs)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py
new file mode 100644
index 00000000000..ebab308e77c
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/__init__.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+
+from unittest import TestSuite
+
+
+def test_suite():
+ from testtools.tests.matchers import (
+ test_basic,
+ test_datastructures,
+ test_dict,
+ test_doctest,
+ test_exception,
+ test_filesystem,
+ test_higherorder,
+ test_impl,
+ )
+ modules = [
+ test_basic,
+ test_datastructures,
+ test_dict,
+ test_doctest,
+ test_exception,
+ test_filesystem,
+ test_higherorder,
+ test_impl,
+ ]
+ suites = map(lambda x: x.test_suite(), modules)
+ return TestSuite(suites)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py
new file mode 100644
index 00000000000..3ff87278dae
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/helpers.py
@@ -0,0 +1,42 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+from testtools.tests.helpers import FullStackRunTest
+
+
+class TestMatchersInterface(object):
+
+ run_tests_with = FullStackRunTest
+
+ def test_matches_match(self):
+ matcher = self.matches_matcher
+ matches = self.matches_matches
+ mismatches = self.matches_mismatches
+ for candidate in matches:
+ self.assertEqual(None, matcher.match(candidate))
+ for candidate in mismatches:
+ mismatch = matcher.match(candidate)
+ self.assertNotEqual(None, mismatch)
+ self.assertNotEqual(None, getattr(mismatch, 'describe', None))
+
+ def test__str__(self):
+ # [(expected, object to __str__)].
+ from testtools.matchers._doctest import DocTestMatches
+ examples = self.str_examples
+ for expected, matcher in examples:
+ self.assertThat(matcher, DocTestMatches(expected))
+
+ def test_describe_difference(self):
+ # [(expected, matchee, matcher), ...]
+ examples = self.describe_examples
+ for difference, matchee, matcher in examples:
+ mismatch = matcher.match(matchee)
+ self.assertEqual(difference, mismatch.describe())
+
+ def test_mismatch_details(self):
+ # The mismatch object must provide get_details, which must return a
+ # dictionary mapping names to Content objects.
+ examples = self.describe_examples
+ for difference, matchee, matcher in examples:
+ mismatch = matcher.match(matchee)
+ details = mismatch.get_details()
+ self.assertEqual(dict(details), details)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py
new file mode 100644
index 00000000000..c53bc9e9c42
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_basic.py
@@ -0,0 +1,396 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import re
+
+from testtools import TestCase
+from testtools.compat import (
+ text_repr,
+ _b,
+ _u,
+ )
+from testtools.matchers._basic import (
+ _BinaryMismatch,
+ Contains,
+ DoesNotEndWith,
+ DoesNotStartWith,
+ EndsWith,
+ Equals,
+ Is,
+ IsInstance,
+ LessThan,
+ GreaterThan,
+ HasLength,
+ MatchesRegex,
+ NotEquals,
+ SameMembers,
+ StartsWith,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class Test_BinaryMismatch(TestCase):
+ """Mismatches from binary comparisons need useful describe output"""
+
+ _long_string = "This is a longish multiline non-ascii string\n\xa7"
+ _long_b = _b(_long_string)
+ _long_u = _u(_long_string)
+
+ class CustomRepr(object):
+ def __init__(self, repr_string):
+ self._repr_string = repr_string
+ def __repr__(self):
+ return _u('<object ') + _u(self._repr_string) + _u('>')
+
+ def test_short_objects(self):
+ o1, o2 = self.CustomRepr('a'), self.CustomRepr('b')
+ mismatch = _BinaryMismatch(o1, "!~", o2)
+ self.assertEqual(mismatch.describe(), "%r !~ %r" % (o1, o2))
+
+ def test_short_mixed_strings(self):
+ b, u = _b("\xa7"), _u("\xa7")
+ mismatch = _BinaryMismatch(b, "!~", u)
+ self.assertEqual(mismatch.describe(), "%r !~ %r" % (b, u))
+
+ def test_long_bytes(self):
+ one_line_b = self._long_b.replace(_b("\n"), _b(" "))
+ mismatch = _BinaryMismatch(one_line_b, "!~", self._long_b)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(one_line_b),
+ text_repr(self._long_b, multiline=True)))
+
+ def test_long_unicode(self):
+ one_line_u = self._long_u.replace("\n", " ")
+ mismatch = _BinaryMismatch(one_line_u, "!~", self._long_u)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(one_line_u),
+ text_repr(self._long_u, multiline=True)))
+
+ def test_long_mixed_strings(self):
+ mismatch = _BinaryMismatch(self._long_b, "!~", self._long_u)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(self._long_b, multiline=True),
+ text_repr(self._long_u, multiline=True)))
+
+ def test_long_bytes_and_object(self):
+ obj = object()
+ mismatch = _BinaryMismatch(self._long_b, "!~", obj)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(self._long_b, multiline=True),
+ repr(obj)))
+
+ def test_long_unicode_and_object(self):
+ obj = object()
+ mismatch = _BinaryMismatch(self._long_u, "!~", obj)
+ self.assertEqual(mismatch.describe(),
+ "%s:\nreference = %s\nactual = %s\n" % ("!~",
+ text_repr(self._long_u, multiline=True),
+ repr(obj)))
+
+
+class TestEqualsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Equals(1)
+ matches_matches = [1]
+ matches_mismatches = [2]
+
+ str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))]
+
+ describe_examples = [("1 != 2", 2, Equals(1))]
+
+
+class TestNotEqualsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = NotEquals(1)
+ matches_matches = [2]
+ matches_mismatches = [1]
+
+ str_examples = [
+ ("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))]
+
+ describe_examples = [("1 == 1", 1, NotEquals(1))]
+
+
+class TestIsInterface(TestCase, TestMatchersInterface):
+
+ foo = object()
+ bar = object()
+
+ matches_matcher = Is(foo)
+ matches_matches = [foo]
+ matches_mismatches = [bar, 1]
+
+ str_examples = [("Is(2)", Is(2))]
+
+ describe_examples = [("1 is not 2", 2, Is(1))]
+
+
+class TestIsInstanceInterface(TestCase, TestMatchersInterface):
+
+ class Foo:pass
+
+ matches_matcher = IsInstance(Foo)
+ matches_matches = [Foo()]
+ matches_mismatches = [object(), 1, Foo]
+
+ str_examples = [
+ ("IsInstance(str)", IsInstance(str)),
+ ("IsInstance(str, int)", IsInstance(str, int)),
+ ]
+
+ describe_examples = [
+ ("'foo' is not an instance of int", 'foo', IsInstance(int)),
+ ("'foo' is not an instance of any of (int, type)", 'foo',
+ IsInstance(int, type)),
+ ]
+
+
+class TestLessThanInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = LessThan(4)
+ matches_matches = [-5, 3]
+ matches_mismatches = [4, 5, 5000]
+
+ str_examples = [
+ ("LessThan(12)", LessThan(12)),
+ ]
+
+ describe_examples = [
+ ('4 is not > 5', 5, LessThan(4)),
+ ('4 is not > 4', 4, LessThan(4)),
+ ]
+
+
+class TestGreaterThanInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = GreaterThan(4)
+ matches_matches = [5, 8]
+ matches_mismatches = [-2, 0, 4]
+
+ str_examples = [
+ ("GreaterThan(12)", GreaterThan(12)),
+ ]
+
+ describe_examples = [
+ ('5 is not < 4', 4, GreaterThan(5)),
+ ('4 is not < 4', 4, GreaterThan(4)),
+ ]
+
+
+class TestContainsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Contains('foo')
+ matches_matches = ['foo', 'afoo', 'fooa']
+ matches_mismatches = ['f', 'fo', 'oo', 'faoo', 'foao']
+
+ str_examples = [
+ ("Contains(1)", Contains(1)),
+ ("Contains('foo')", Contains('foo')),
+ ]
+
+ describe_examples = [("1 not in 2", 2, Contains(1))]
+
+
+class DoesNotStartWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_describe(self):
+ mismatch = DoesNotStartWith("fo", "bo")
+ self.assertEqual("'fo' does not start with 'bo'.", mismatch.describe())
+
+ def test_describe_non_ascii_unicode(self):
+ string = _u("A\xA7")
+ suffix = _u("B\xA7")
+ mismatch = DoesNotStartWith(string, suffix)
+ self.assertEqual("%s does not start with %s." % (
+ text_repr(string), text_repr(suffix)),
+ mismatch.describe())
+
+ def test_describe_non_ascii_bytes(self):
+ string = _b("A\xA7")
+ suffix = _b("B\xA7")
+ mismatch = DoesNotStartWith(string, suffix)
+ self.assertEqual("%r does not start with %r." % (string, suffix),
+ mismatch.describe())
+
+
+class StartsWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_str(self):
+ matcher = StartsWith("bar")
+ self.assertEqual("StartsWith('bar')", str(matcher))
+
+ def test_str_with_bytes(self):
+ b = _b("\xA7")
+ matcher = StartsWith(b)
+ self.assertEqual("StartsWith(%r)" % (b,), str(matcher))
+
+ def test_str_with_unicode(self):
+ u = _u("\xA7")
+ matcher = StartsWith(u)
+ self.assertEqual("StartsWith(%r)" % (u,), str(matcher))
+
+ def test_match(self):
+ matcher = StartsWith("bar")
+ self.assertIs(None, matcher.match("barf"))
+
+ def test_mismatch_returns_does_not_start_with(self):
+ matcher = StartsWith("bar")
+ self.assertIsInstance(matcher.match("foo"), DoesNotStartWith)
+
+ def test_mismatch_sets_matchee(self):
+ matcher = StartsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("foo", mismatch.matchee)
+
+ def test_mismatch_sets_expected(self):
+ matcher = StartsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("bar", mismatch.expected)
+
+
+class DoesNotEndWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_describe(self):
+ mismatch = DoesNotEndWith("fo", "bo")
+ self.assertEqual("'fo' does not end with 'bo'.", mismatch.describe())
+
+ def test_describe_non_ascii_unicode(self):
+ string = _u("A\xA7")
+ suffix = _u("B\xA7")
+ mismatch = DoesNotEndWith(string, suffix)
+ self.assertEqual("%s does not end with %s." % (
+ text_repr(string), text_repr(suffix)),
+ mismatch.describe())
+
+ def test_describe_non_ascii_bytes(self):
+ string = _b("A\xA7")
+ suffix = _b("B\xA7")
+ mismatch = DoesNotEndWith(string, suffix)
+ self.assertEqual("%r does not end with %r." % (string, suffix),
+ mismatch.describe())
+
+
+class EndsWithTests(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_str(self):
+ matcher = EndsWith("bar")
+ self.assertEqual("EndsWith('bar')", str(matcher))
+
+ def test_str_with_bytes(self):
+ b = _b("\xA7")
+ matcher = EndsWith(b)
+ self.assertEqual("EndsWith(%r)" % (b,), str(matcher))
+
+ def test_str_with_unicode(self):
+ u = _u("\xA7")
+ matcher = EndsWith(u)
+ self.assertEqual("EndsWith(%r)" % (u,), str(matcher))
+
+ def test_match(self):
+ matcher = EndsWith("arf")
+ self.assertIs(None, matcher.match("barf"))
+
+ def test_mismatch_returns_does_not_end_with(self):
+ matcher = EndsWith("bar")
+ self.assertIsInstance(matcher.match("foo"), DoesNotEndWith)
+
+ def test_mismatch_sets_matchee(self):
+ matcher = EndsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("foo", mismatch.matchee)
+
+ def test_mismatch_sets_expected(self):
+ matcher = EndsWith("bar")
+ mismatch = matcher.match("foo")
+ self.assertEqual("bar", mismatch.expected)
+
+
+class TestSameMembers(TestCase, TestMatchersInterface):
+
+ matches_matcher = SameMembers([1, 1, 2, 3, {'foo': 'bar'}])
+ matches_matches = [
+ [1, 1, 2, 3, {'foo': 'bar'}],
+ [3, {'foo': 'bar'}, 1, 2, 1],
+ [3, 2, 1, {'foo': 'bar'}, 1],
+ (2, {'foo': 'bar'}, 3, 1, 1),
+ ]
+ matches_mismatches = [
+ set([1, 2, 3]),
+ [1, 1, 2, 3, 5],
+ [1, 2, 3, {'foo': 'bar'}],
+ 'foo',
+ ]
+
+ describe_examples = [
+ (("elements differ:\n"
+ "reference = ['apple', 'orange', 'canteloupe', 'watermelon', 'lemon', 'banana']\n"
+ "actual = ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe']\n"
+ ": \n"
+ "missing: ['watermelon']\n"
+ "extra: ['sparrow']"
+ ),
+ ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe',],
+ SameMembers(
+ ['apple', 'orange', 'canteloupe', 'watermelon',
+ 'lemon', 'banana',])),
+ ]
+
+ str_examples = [
+ ('SameMembers([1, 2, 3])', SameMembers([1, 2, 3])),
+ ]
+
+
+class TestMatchesRegex(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesRegex('a|b')
+ matches_matches = ['a', 'b']
+ matches_mismatches = ['c']
+
+ str_examples = [
+ ("MatchesRegex('a|b')", MatchesRegex('a|b')),
+ ("MatchesRegex('a|b', re.M)", MatchesRegex('a|b', re.M)),
+ ("MatchesRegex('a|b', re.I|re.M)", MatchesRegex('a|b', re.I|re.M)),
+ ("MatchesRegex(%r)" % (_b("\xA7"),), MatchesRegex(_b("\xA7"))),
+ ("MatchesRegex(%r)" % (_u("\xA7"),), MatchesRegex(_u("\xA7"))),
+ ]
+
+ describe_examples = [
+ ("'c' does not match /a|b/", 'c', MatchesRegex('a|b')),
+ ("'c' does not match /a\d/", 'c', MatchesRegex(r'a\d')),
+ ("%r does not match /\\s+\\xa7/" % (_b('c'),),
+ _b('c'), MatchesRegex(_b("\\s+\xA7"))),
+ ("%r does not match /\\s+\\xa7/" % (_u('c'),),
+ _u('c'), MatchesRegex(_u("\\s+\xA7"))),
+ ]
+
+
+class TestHasLength(TestCase, TestMatchersInterface):
+
+ matches_matcher = HasLength(2)
+ matches_matches = [[1, 2]]
+ matches_mismatches = [[], [1], [3, 2, 1]]
+
+ str_examples = [
+ ("HasLength(2)", HasLength(2)),
+ ]
+
+ describe_examples = [
+ ("len([]) != 1", [], HasLength(1)),
+ ]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py
new file mode 100644
index 00000000000..f6d9d8658c8
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_datastructures.py
@@ -0,0 +1,209 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import doctest
+import re
+import sys
+
+from testtools import TestCase
+from testtools.compat import StringIO
+from testtools.matchers import (
+ Annotate,
+ Equals,
+ LessThan,
+ MatchesRegex,
+ NotEquals,
+ )
+from testtools.matchers._datastructures import (
+ ContainsAll,
+ MatchesListwise,
+ MatchesStructure,
+ MatchesSetwise,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+def run_doctest(obj, name):
+ p = doctest.DocTestParser()
+ t = p.get_doctest(
+ obj.__doc__, sys.modules[obj.__module__].__dict__, name, '', 0)
+ r = doctest.DocTestRunner()
+ output = StringIO()
+ r.run(t, out=output.write)
+ return r.failures, output.getvalue()
+
+
+class TestMatchesListwise(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_docstring(self):
+ failure_count, output = run_doctest(
+ MatchesListwise, "MatchesListwise")
+ if failure_count:
+ self.fail("Doctest failed with %s" % output)
+
+
+class TestMatchesStructure(TestCase, TestMatchersInterface):
+
+ class SimpleClass:
+ def __init__(self, x, y):
+ self.x = x
+ self.y = y
+
+ matches_matcher = MatchesStructure(x=Equals(1), y=Equals(2))
+ matches_matches = [SimpleClass(1, 2)]
+ matches_mismatches = [
+ SimpleClass(2, 2),
+ SimpleClass(1, 1),
+ SimpleClass(3, 3),
+ ]
+
+ str_examples = [
+ ("MatchesStructure(x=Equals(1))", MatchesStructure(x=Equals(1))),
+ ("MatchesStructure(y=Equals(2))", MatchesStructure(y=Equals(2))),
+ ("MatchesStructure(x=Equals(1), y=Equals(2))",
+ MatchesStructure(x=Equals(1), y=Equals(2))),
+ ]
+
+ describe_examples = [
+ ("""\
+Differences: [
+3 != 1: x
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(3), y=Equals(2))),
+ ("""\
+Differences: [
+3 != 2: y
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(1), y=Equals(3))),
+ ("""\
+Differences: [
+0 != 1: x
+0 != 2: y
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(0), y=Equals(0))),
+ ]
+
+ def test_fromExample(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure.fromExample(self.SimpleClass(1, 3), 'x'))
+
+ def test_byEquality(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure.byEquality(x=1))
+
+ def test_withStructure(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure.byMatcher(LessThan, x=2))
+
+ def test_update(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure(x=NotEquals(1)).update(x=Equals(1)))
+
+ def test_update_none(self):
+ self.assertThat(
+ self.SimpleClass(1, 2),
+ MatchesStructure(x=Equals(1), z=NotEquals(42)).update(
+ z=None))
+
+
+class TestMatchesSetwise(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def assertMismatchWithDescriptionMatching(self, value, matcher,
+ description_matcher):
+ mismatch = matcher.match(value)
+ if mismatch is None:
+ self.fail("%s matched %s" % (matcher, value))
+ actual_description = mismatch.describe()
+ self.assertThat(
+ actual_description,
+ Annotate(
+ "%s matching %s" % (matcher, value),
+ description_matcher))
+
+ def test_matches(self):
+ self.assertIs(
+ None, MatchesSetwise(Equals(1), Equals(2)).match([2, 1]))
+
+ def test_mismatches(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex('.*There was 1 mismatch$', re.S))
+
+ def test_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
+ Equals('There was 1 matcher left over: Equals(1)'))
+
+ def test_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [1, 2, 3], MatchesSetwise(Equals(1), Equals(2)),
+ Equals('There was 1 value left over: [3]'))
+
+ def test_two_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
+ MatchesRegex(
+ 'There were 2 matchers left over: Equals\([12]\), '
+ 'Equals\([12]\)'))
+
+ def test_two_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [1, 2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex(
+ 'There were 2 values left over: \[[34], [34]\]'))
+
+ def test_mismatch_and_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3], MatchesSetwise(Equals(0), Equals(1), Equals(2)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 1 extra matcher: Equals\([01]\)',
+ re.S))
+
+ def test_mismatch_and_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 1 extra value: \[[34]\]',
+ re.S))
+
+ def test_mismatch_and_two_too_many_matchers(self):
+ self.assertMismatchWithDescriptionMatching(
+ [3, 4], MatchesSetwise(
+ Equals(0), Equals(1), Equals(2), Equals(3)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 2 extra matchers: '
+ 'Equals\([012]\), Equals\([012]\)', re.S))
+
+ def test_mismatch_and_two_too_many_values(self):
+ self.assertMismatchWithDescriptionMatching(
+ [2, 3, 4, 5], MatchesSetwise(Equals(1), Equals(2)),
+ MatchesRegex(
+ '.*There was 1 mismatch and 2 extra values: \[[145], [145]\]',
+ re.S))
+
+
+class TestContainsAllInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = ContainsAll(['foo', 'bar'])
+ matches_matches = [['foo', 'bar'], ['foo', 'z', 'bar'], ['bar', 'foo']]
+ matches_mismatches = [['f', 'g'], ['foo', 'baz'], []]
+
+ str_examples = [(
+ "MatchesAll(Contains('foo'), Contains('bar'))",
+ ContainsAll(['foo', 'bar'])),
+ ]
+
+ describe_examples = [("""Differences: [
+'baz' not in 'foo'
+]""",
+ 'foo', ContainsAll(['foo', 'baz']))]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py
new file mode 100644
index 00000000000..00368dd6ceb
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_dict.py
@@ -0,0 +1,227 @@
+from testtools import TestCase
+from testtools.matchers import (
+ Equals,
+ NotEquals,
+ Not,
+ )
+from testtools.matchers._dict import (
+ ContainedByDict,
+ ContainsDict,
+ KeysEqual,
+ MatchesAllDict,
+ MatchesDict,
+ _SubDictOf,
+ )
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class TestMatchesAllDictInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})
+ matches_matches = [3, 4]
+ matches_mismatches = [1, 2]
+
+ str_examples = [
+ ("MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})",
+ matches_matcher)]
+
+ describe_examples = [
+ ("""a: 1 == 1""", 1, matches_matcher),
+ ]
+
+
+class TestKeysEqualWithList(TestCase, TestMatchersInterface):
+
+ matches_matcher = KeysEqual('foo', 'bar')
+ matches_matches = [
+ {'foo': 0, 'bar': 1},
+ ]
+ matches_mismatches = [
+ {},
+ {'foo': 0},
+ {'bar': 1},
+ {'foo': 0, 'bar': 1, 'baz': 2},
+ {'a': None, 'b': None, 'c': None},
+ ]
+
+ str_examples = [
+ ("KeysEqual('foo', 'bar')", KeysEqual('foo', 'bar')),
+ ]
+
+ describe_examples = []
+
+ def test_description(self):
+ matchee = {'foo': 0, 'bar': 1, 'baz': 2}
+ mismatch = KeysEqual('foo', 'bar').match(matchee)
+ description = mismatch.describe()
+ self.assertThat(
+ description, Equals(
+ "['bar', 'foo'] does not match %r: Keys not equal"
+ % (matchee,)))
+
+
+class TestKeysEqualWithDict(TestKeysEqualWithList):
+
+ matches_matcher = KeysEqual({'foo': 3, 'bar': 4})
+
+
+class TestSubDictOf(TestCase, TestMatchersInterface):
+
+ matches_matcher = _SubDictOf({'foo': 'bar', 'baz': 'qux'})
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bar'},
+ ]
+
+ matches_mismatches = [
+ {'foo': 'bar', 'baz': 'qux', 'cat': 'dog'},
+ {'foo': 'bar', 'cat': 'dog'},
+ ]
+
+ str_examples = []
+ describe_examples = []
+
+
+class TestMatchesDict(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesDict(
+ {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': None},
+ {'foo': 'bar', 'baz': 'quux'},
+ ]
+ matches_mismatches = [
+ {},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux'},
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+ {'foo': 'bar', 'cat': 'dog'},
+ ]
+
+ str_examples = [
+ ("MatchesDict({'baz': %s, 'foo': %s})" % (
+ Not(Equals('qux')), Equals('bar')),
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ " 'foo': Equals('bar'),\n"
+ "}",
+ {}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ "}",
+ {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ " 'foo': 'bar' != 'bop',\n"
+ "}",
+ {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+ ("Extra: {\n"
+ " 'cat': 'dog',\n"
+ "}",
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'}, matches_matcher),
+ ("Extra: {\n"
+ " 'cat': 'dog',\n"
+ "}\n"
+ "Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ "}",
+ {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+ ]
+
+
+class TestContainsDict(TestCase, TestMatchersInterface):
+
+ matches_matcher = ContainsDict(
+ {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': None},
+ {'foo': 'bar', 'baz': 'quux'},
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+ ]
+ matches_mismatches = [
+ {},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux'},
+ {'foo': 'bar', 'cat': 'dog'},
+ {'foo': 'bar'},
+ ]
+
+ str_examples = [
+ ("ContainsDict({'baz': %s, 'foo': %s})" % (
+ Not(Equals('qux')), Equals('bar')),
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ " 'foo': Equals('bar'),\n"
+ "}",
+ {}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ "}",
+ {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ " 'foo': 'bar' != 'bop',\n"
+ "}",
+ {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+ ("Missing: {\n"
+ " 'baz': Not(Equals('qux')),\n"
+ "}",
+ {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+ ]
+
+
+class TestContainedByDict(TestCase, TestMatchersInterface):
+
+ matches_matcher = ContainedByDict(
+ {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+ matches_matches = [
+ {},
+ {'foo': 'bar'},
+ {'foo': 'bar', 'baz': 'quux'},
+ {'baz': 'quux'},
+ ]
+ matches_mismatches = [
+ {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux'},
+ {'foo': 'bar', 'cat': 'dog'},
+ ]
+
+ str_examples = [
+ ("ContainedByDict({'baz': %s, 'foo': %s})" % (
+ Not(Equals('qux')), Equals('bar')),
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ "}",
+ {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+ ("Differences: {\n"
+ " 'baz': 'qux' matches Equals('qux'),\n"
+ " 'foo': 'bar' != 'bop',\n"
+ "}",
+ {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+ ("Extra: {\n"
+ " 'cat': 'dog',\n"
+ "}",
+ {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+ ]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py
new file mode 100644
index 00000000000..81b9579dbf0
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_doctest.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import doctest
+
+from testtools import TestCase
+from testtools.compat import (
+ str_is_unicode,
+ _b,
+ _u,
+ )
+from testtools.matchers._doctest import DocTestMatches
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+
+class TestDocTestMatchesInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS)
+ matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"]
+ matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"]
+
+ str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')",
+ DocTestMatches("Ran 1 test in ...s")),
+ ("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)),
+ ]
+
+ describe_examples = [('Expected:\n Ran 1 tests in ...s\nGot:\n'
+ ' Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s",
+ DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))]
+
+
+class TestDocTestMatchesInterfaceUnicode(TestCase, TestMatchersInterface):
+
+ matches_matcher = DocTestMatches(_u("\xa7..."), doctest.ELLIPSIS)
+ matches_matches = [_u("\xa7"), _u("\xa7 more\n")]
+ matches_mismatches = ["\\xa7", _u("more \xa7"), _u("\n\xa7")]
+
+ str_examples = [("DocTestMatches(%r)" % (_u("\xa7\n"),),
+ DocTestMatches(_u("\xa7"))),
+ ]
+
+ describe_examples = [(
+ _u("Expected:\n \xa7\nGot:\n a\n"),
+ "a",
+ DocTestMatches(_u("\xa7"), doctest.ELLIPSIS))]
+
+
+class TestDocTestMatchesSpecific(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test___init__simple(self):
+ matcher = DocTestMatches("foo")
+ self.assertEqual("foo\n", matcher.want)
+
+ def test___init__flags(self):
+ matcher = DocTestMatches("bar\n", doctest.ELLIPSIS)
+ self.assertEqual("bar\n", matcher.want)
+ self.assertEqual(doctest.ELLIPSIS, matcher.flags)
+
+ def test_describe_non_ascii_bytes(self):
+ """Even with bytestrings, the mismatch should be coercible to unicode
+
+ DocTestMatches is intended for text, but the Python 2 str type also
+ permits arbitrary binary inputs. This is a slightly bogus thing to do,
+ and under Python 3 using bytes objects will reasonably raise an error.
+ """
+ header = _b("\x89PNG\r\n\x1a\n...")
+ if str_is_unicode:
+ self.assertRaises(TypeError,
+ DocTestMatches, header, doctest.ELLIPSIS)
+ return
+ matcher = DocTestMatches(header, doctest.ELLIPSIS)
+ mismatch = matcher.match(_b("GIF89a\1\0\1\0\0\0\0;"))
+ # Must be treatable as unicode text, the exact output matters less
+ self.assertTrue(unicode(mismatch.describe()))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py
new file mode 100644
index 00000000000..ef7185f19a4
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_exception.py
@@ -0,0 +1,192 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import sys
+
+from testtools import TestCase
+from testtools.matchers import (
+ AfterPreprocessing,
+ Equals,
+ )
+from testtools.matchers._exception import (
+ MatchesException,
+ Raises,
+ raises,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+def make_error(type, *args, **kwargs):
+ try:
+ raise type(*args, **kwargs)
+ except type:
+ return sys.exc_info()
+
+
+class TestMatchesExceptionInstanceInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(ValueError("foo"))
+ error_foo = make_error(ValueError, 'foo')
+ error_bar = make_error(ValueError, 'bar')
+ error_base_foo = make_error(Exception, 'foo')
+ matches_matches = [error_foo]
+ matches_mismatches = [error_bar, error_base_foo]
+
+ str_examples = [
+ ("MatchesException(Exception('foo',))",
+ MatchesException(Exception('foo')))
+ ]
+ describe_examples = [
+ ("%r is not a %r" % (Exception, ValueError),
+ error_base_foo,
+ MatchesException(ValueError("foo"))),
+ ("ValueError('bar',) has different arguments to ValueError('foo',).",
+ error_bar,
+ MatchesException(ValueError("foo"))),
+ ]
+
+
+class TestMatchesExceptionTypeInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(ValueError)
+ error_foo = make_error(ValueError, 'foo')
+ error_sub = make_error(UnicodeError, 'bar')
+ error_base_foo = make_error(Exception, 'foo')
+ matches_matches = [error_foo, error_sub]
+ matches_mismatches = [error_base_foo]
+
+ str_examples = [
+ ("MatchesException(%r)" % Exception,
+ MatchesException(Exception))
+ ]
+ describe_examples = [
+ ("%r is not a %r" % (Exception, ValueError),
+ error_base_foo,
+ MatchesException(ValueError)),
+ ]
+
+
+class TestMatchesExceptionTypeReInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(ValueError, 'fo.')
+ error_foo = make_error(ValueError, 'foo')
+ error_sub = make_error(UnicodeError, 'foo')
+ error_bar = make_error(ValueError, 'bar')
+ matches_matches = [error_foo, error_sub]
+ matches_mismatches = [error_bar]
+
+ str_examples = [
+ ("MatchesException(%r)" % Exception,
+ MatchesException(Exception, 'fo.'))
+ ]
+ describe_examples = [
+ ("'bar' does not match /fo./",
+ error_bar, MatchesException(ValueError, "fo.")),
+ ]
+
+
+class TestMatchesExceptionTypeMatcherInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesException(
+ ValueError, AfterPreprocessing(str, Equals('foo')))
+ error_foo = make_error(ValueError, 'foo')
+ error_sub = make_error(UnicodeError, 'foo')
+ error_bar = make_error(ValueError, 'bar')
+ matches_matches = [error_foo, error_sub]
+ matches_mismatches = [error_bar]
+
+ str_examples = [
+ ("MatchesException(%r)" % Exception,
+ MatchesException(Exception, Equals('foo')))
+ ]
+ describe_examples = [
+ ("5 != %r" % (error_bar[1],),
+ error_bar, MatchesException(ValueError, Equals(5))),
+ ]
+
+
+class TestRaisesInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Raises()
+ def boom():
+ raise Exception('foo')
+ matches_matches = [boom]
+ matches_mismatches = [lambda:None]
+
+ # Tricky to get function objects to render constantly, and the interfaces
+ # helper uses assertEqual rather than (for instance) DocTestMatches.
+ str_examples = []
+
+ describe_examples = []
+
+
+class TestRaisesExceptionMatcherInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Raises(
+ exception_matcher=MatchesException(Exception('foo')))
+ def boom_bar():
+ raise Exception('bar')
+ def boom_foo():
+ raise Exception('foo')
+ matches_matches = [boom_foo]
+ matches_mismatches = [lambda:None, boom_bar]
+
+ # Tricky to get function objects to render constantly, and the interfaces
+ # helper uses assertEqual rather than (for instance) DocTestMatches.
+ str_examples = []
+
+ describe_examples = []
+
+
+class TestRaisesBaseTypes(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def raiser(self):
+ raise KeyboardInterrupt('foo')
+
+ def test_KeyboardInterrupt_matched(self):
+ # When KeyboardInterrupt is matched, it is swallowed.
+ matcher = Raises(MatchesException(KeyboardInterrupt))
+ self.assertThat(self.raiser, matcher)
+
+ def test_KeyboardInterrupt_propogates(self):
+ # The default 'it raised' propogates KeyboardInterrupt.
+ match_keyb = Raises(MatchesException(KeyboardInterrupt))
+ def raise_keyb_from_match():
+ matcher = Raises()
+ matcher.match(self.raiser)
+ self.assertThat(raise_keyb_from_match, match_keyb)
+
+ def test_KeyboardInterrupt_match_Exception_propogates(self):
+ # If the raised exception isn't matched, and it is not a subclass of
+ # Exception, it is propogated.
+ match_keyb = Raises(MatchesException(KeyboardInterrupt))
+ def raise_keyb_from_match():
+ if sys.version_info > (2, 5):
+ matcher = Raises(MatchesException(Exception))
+ else:
+ # On Python 2.4 KeyboardInterrupt is a StandardError subclass
+ # but should propogate from less generic exception matchers
+ matcher = Raises(MatchesException(EnvironmentError))
+ matcher.match(self.raiser)
+ self.assertThat(raise_keyb_from_match, match_keyb)
+
+
+class TestRaisesConvenience(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_exc_type(self):
+ self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
+
+ def test_exc_value(self):
+ e = RuntimeError("You lose!")
+ def raiser():
+ raise e
+ self.assertThat(raiser, raises(e))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py
new file mode 100644
index 00000000000..917ff2ed058
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_filesystem.py
@@ -0,0 +1,243 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import os
+import shutil
+import tarfile
+import tempfile
+
+from testtools import TestCase
+from testtools.matchers import (
+ Contains,
+ DocTestMatches,
+ Equals,
+ )
+from testtools.matchers._filesystem import (
+ DirContains,
+ DirExists,
+ FileContains,
+ FileExists,
+ HasPermissions,
+ PathExists,
+ SamePath,
+ TarballContains,
+ )
+
+
+class PathHelpers(object):
+
+ def mkdtemp(self):
+ directory = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, directory)
+ return directory
+
+ def create_file(self, filename, contents=''):
+ fp = open(filename, 'w')
+ try:
+ fp.write(contents)
+ finally:
+ fp.close()
+
+ def touch(self, filename):
+ return self.create_file(filename)
+
+
+class TestPathExists(TestCase, PathHelpers):
+
+ def test_exists(self):
+ tempdir = self.mkdtemp()
+ self.assertThat(tempdir, PathExists())
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = PathExists().match(doesntexist)
+ self.assertThat(
+ "%s does not exist." % doesntexist, Equals(mismatch.describe()))
+
+
+class TestDirExists(TestCase, PathHelpers):
+
+ def test_exists(self):
+ tempdir = self.mkdtemp()
+ self.assertThat(tempdir, DirExists())
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = DirExists().match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_not_a_directory(self):
+ filename = os.path.join(self.mkdtemp(), 'foo')
+ self.touch(filename)
+ mismatch = DirExists().match(filename)
+ self.assertThat(
+ "%s is not a directory." % filename, Equals(mismatch.describe()))
+
+
+class TestFileExists(TestCase, PathHelpers):
+
+ def test_exists(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'filename')
+ self.touch(filename)
+ self.assertThat(filename, FileExists())
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = FileExists().match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_not_a_file(self):
+ tempdir = self.mkdtemp()
+ mismatch = FileExists().match(tempdir)
+ self.assertThat(
+ "%s is not a file." % tempdir, Equals(mismatch.describe()))
+
+
+class TestDirContains(TestCase, PathHelpers):
+
+ def test_empty(self):
+ tempdir = self.mkdtemp()
+ self.assertThat(tempdir, DirContains([]))
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = DirContains([]).match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_contains_files(self):
+ tempdir = self.mkdtemp()
+ self.touch(os.path.join(tempdir, 'foo'))
+ self.touch(os.path.join(tempdir, 'bar'))
+ self.assertThat(tempdir, DirContains(['bar', 'foo']))
+
+ def test_matcher(self):
+ tempdir = self.mkdtemp()
+ self.touch(os.path.join(tempdir, 'foo'))
+ self.touch(os.path.join(tempdir, 'bar'))
+ self.assertThat(tempdir, DirContains(matcher=Contains('bar')))
+
+ def test_neither_specified(self):
+ self.assertRaises(AssertionError, DirContains)
+
+ def test_both_specified(self):
+ self.assertRaises(
+ AssertionError, DirContains, filenames=[], matcher=Contains('a'))
+
+ def test_does_not_contain_files(self):
+ tempdir = self.mkdtemp()
+ self.touch(os.path.join(tempdir, 'foo'))
+ mismatch = DirContains(['bar', 'foo']).match(tempdir)
+ self.assertThat(
+ Equals(['bar', 'foo']).match(['foo']).describe(),
+ Equals(mismatch.describe()))
+
+
+class TestFileContains(TestCase, PathHelpers):
+
+ def test_not_exists(self):
+ doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+ mismatch = FileContains('').match(doesntexist)
+ self.assertThat(
+ PathExists().match(doesntexist).describe(),
+ Equals(mismatch.describe()))
+
+ def test_contains(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'foo')
+ self.create_file(filename, 'Hello World!')
+ self.assertThat(filename, FileContains('Hello World!'))
+
+ def test_matcher(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'foo')
+ self.create_file(filename, 'Hello World!')
+ self.assertThat(
+ filename, FileContains(matcher=DocTestMatches('Hello World!')))
+
+ def test_neither_specified(self):
+ self.assertRaises(AssertionError, FileContains)
+
+ def test_both_specified(self):
+ self.assertRaises(
+ AssertionError, FileContains, contents=[], matcher=Contains('a'))
+
+ def test_does_not_contain(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'foo')
+ self.create_file(filename, 'Goodbye Cruel World!')
+ mismatch = FileContains('Hello World!').match(filename)
+ self.assertThat(
+ Equals('Hello World!').match('Goodbye Cruel World!').describe(),
+ Equals(mismatch.describe()))
+class TestTarballContains(TestCase, PathHelpers):
+
+ def test_match(self):
+ tempdir = self.mkdtemp()
+ in_temp_dir = lambda x: os.path.join(tempdir, x)
+ self.touch(in_temp_dir('a'))
+ self.touch(in_temp_dir('b'))
+ tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
+ tarball.add(in_temp_dir('a'), 'a')
+ tarball.add(in_temp_dir('b'), 'b')
+ tarball.close()
+ self.assertThat(
+ in_temp_dir('foo.tar.gz'), TarballContains(['b', 'a']))
+
+ def test_mismatch(self):
+ tempdir = self.mkdtemp()
+ in_temp_dir = lambda x: os.path.join(tempdir, x)
+ self.touch(in_temp_dir('a'))
+ self.touch(in_temp_dir('b'))
+ tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
+ tarball.add(in_temp_dir('a'), 'a')
+ tarball.add(in_temp_dir('b'), 'b')
+ tarball.close()
+ mismatch = TarballContains(['d', 'c']).match(in_temp_dir('foo.tar.gz'))
+ self.assertEqual(
+ mismatch.describe(),
+ Equals(['c', 'd']).match(['a', 'b']).describe())
+
+
+class TestSamePath(TestCase, PathHelpers):
+
+ def test_same_string(self):
+ self.assertThat('foo', SamePath('foo'))
+
+ def test_relative_and_absolute(self):
+ path = 'foo'
+ abspath = os.path.abspath(path)
+ self.assertThat(path, SamePath(abspath))
+ self.assertThat(abspath, SamePath(path))
+
+ def test_real_path(self):
+ tempdir = self.mkdtemp()
+ source = os.path.join(tempdir, 'source')
+ self.touch(source)
+ target = os.path.join(tempdir, 'target')
+ try:
+ os.symlink(source, target)
+ except (AttributeError, NotImplementedError):
+ self.skip("No symlink support")
+ self.assertThat(source, SamePath(target))
+ self.assertThat(target, SamePath(source))
+
+
+class TestHasPermissions(TestCase, PathHelpers):
+
+ def test_match(self):
+ tempdir = self.mkdtemp()
+ filename = os.path.join(tempdir, 'filename')
+ self.touch(filename)
+ permissions = oct(os.stat(filename).st_mode)[-4:]
+ self.assertThat(filename, HasPermissions(permissions))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py
new file mode 100644
index 00000000000..fb86b7fe2f9
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_higherorder.py
@@ -0,0 +1,254 @@
+# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.matchers import (
+ DocTestMatches,
+ Equals,
+ LessThan,
+ MatchesStructure,
+ Mismatch,
+ NotEquals,
+ )
+from testtools.matchers._higherorder import (
+ AfterPreprocessing,
+ AllMatch,
+ Annotate,
+ AnnotatedMismatch,
+ AnyMatch,
+ MatchesAny,
+ MatchesAll,
+ MatchesPredicate,
+ MatchesPredicateWithParams,
+ Not,
+ )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class TestAllMatch(TestCase, TestMatchersInterface):
+
+ matches_matcher = AllMatch(LessThan(10))
+ matches_matches = [
+ [9, 9, 9],
+ (9, 9),
+ iter([9, 9, 9, 9, 9]),
+ ]
+ matches_mismatches = [
+ [11, 9, 9],
+ iter([9, 12, 9, 11]),
+ ]
+
+ str_examples = [
+ ("AllMatch(LessThan(12))", AllMatch(LessThan(12))),
+ ]
+
+ describe_examples = [
+ ('Differences: [\n'
+ '10 is not > 11\n'
+ '10 is not > 10\n'
+ ']',
+ [11, 9, 10],
+ AllMatch(LessThan(10))),
+ ]
+
+
+class TestAnyMatch(TestCase, TestMatchersInterface):
+
+ matches_matcher = AnyMatch(Equals('elephant'))
+ matches_matches = [
+ ['grass', 'cow', 'steak', 'milk', 'elephant'],
+ (13, 'elephant'),
+ ['elephant', 'elephant', 'elephant'],
+ set(['hippo', 'rhino', 'elephant']),
+ ]
+ matches_mismatches = [
+ [],
+ ['grass', 'cow', 'steak', 'milk'],
+ (13, 12, 10),
+ ['element', 'hephalump', 'pachyderm'],
+ set(['hippo', 'rhino', 'diplodocus']),
+ ]
+
+ str_examples = [
+ ("AnyMatch(Equals('elephant'))", AnyMatch(Equals('elephant'))),
+ ]
+
+ describe_examples = [
+ ('Differences: [\n'
+ '7 != 11\n'
+ '7 != 9\n'
+ '7 != 10\n'
+ ']',
+ [11, 9, 10],
+ AnyMatch(Equals(7))),
+ ]
+
+
+class TestAfterPreprocessing(TestCase, TestMatchersInterface):
+
+ def parity(x):
+ return x % 2
+
+ matches_matcher = AfterPreprocessing(parity, Equals(1))
+ matches_matches = [3, 5]
+ matches_mismatches = [2]
+
+ str_examples = [
+ ("AfterPreprocessing(<function parity>, Equals(1))",
+ AfterPreprocessing(parity, Equals(1))),
+ ]
+
+ describe_examples = [
+ ("1 != 0: after <function parity> on 2", 2,
+ AfterPreprocessing(parity, Equals(1))),
+ ("1 != 0", 2,
+ AfterPreprocessing(parity, Equals(1), annotate=False)),
+ ]
+
+class TestMatchersAnyInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2"))
+ matches_matches = ["1", "2"]
+ matches_mismatches = ["3"]
+
+ str_examples = [(
+ "MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))",
+ MatchesAny(DocTestMatches("1"), DocTestMatches("2"))),
+ ]
+
+ describe_examples = [("""Differences: [
+Expected:
+ 1
+Got:
+ 3
+
+Expected:
+ 2
+Got:
+ 3
+
+]""",
+ "3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))]
+
+
+class TestMatchesAllInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAll(NotEquals(1), NotEquals(2))
+ matches_matches = [3, 4]
+ matches_mismatches = [1, 2]
+
+ str_examples = [
+ ("MatchesAll(NotEquals(1), NotEquals(2))",
+ MatchesAll(NotEquals(1), NotEquals(2)))]
+
+ describe_examples = [
+ ("""Differences: [
+1 == 1
+]""",
+ 1, MatchesAll(NotEquals(1), NotEquals(2))),
+ ("1 == 1", 1,
+ MatchesAll(NotEquals(2), NotEquals(1), Equals(3), first_only=True)),
+ ]
+
+
+class TestAnnotate(TestCase, TestMatchersInterface):
+
+ matches_matcher = Annotate("foo", Equals(1))
+ matches_matches = [1]
+ matches_mismatches = [2]
+
+ str_examples = [
+ ("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))]
+
+ describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))]
+
+ def test_if_message_no_message(self):
+ # Annotate.if_message returns the given matcher if there is no
+ # message.
+ matcher = Equals(1)
+ not_annotated = Annotate.if_message('', matcher)
+ self.assertIs(matcher, not_annotated)
+
+ def test_if_message_given_message(self):
+ # Annotate.if_message returns an annotated version of the matcher if a
+ # message is provided.
+ matcher = Equals(1)
+ expected = Annotate('foo', matcher)
+ annotated = Annotate.if_message('foo', matcher)
+ self.assertThat(
+ annotated,
+ MatchesStructure.fromExample(expected, 'annotation', 'matcher'))
+
+
+class TestAnnotatedMismatch(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_forwards_details(self):
+ x = Mismatch('description', {'foo': 'bar'})
+ annotated = AnnotatedMismatch("annotation", x)
+ self.assertEqual(x.get_details(), annotated.get_details())
+
+
+class TestNotInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Not(Equals(1))
+ matches_matches = [2]
+ matches_mismatches = [1]
+
+ str_examples = [
+ ("Not(Equals(1))", Not(Equals(1))),
+ ("Not(Equals('1'))", Not(Equals('1')))]
+
+ describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))]
+
+
+def is_even(x):
+ return x % 2 == 0
+
+
+class TestMatchesPredicate(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesPredicate(is_even, "%s is not even")
+ matches_matches = [2, 4, 6, 8]
+ matches_mismatches = [3, 5, 7, 9]
+
+ str_examples = [
+ ("MatchesPredicate(%r, %r)" % (is_even, "%s is not even"),
+ MatchesPredicate(is_even, "%s is not even")),
+ ]
+
+ describe_examples = [
+ ('7 is not even', 7, MatchesPredicate(is_even, "%s is not even")),
+ ]
+
+
+def between(x, low, high):
+ return low < x < high
+
+
+class TestMatchesPredicateWithParams(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}")(1, 9)
+ matches_matches = [2, 4, 6, 8]
+ matches_mismatches = [0, 1, 9, 10]
+
+ str_examples = [
+ ("MatchesPredicateWithParams(%r, %r)(%s)" % (
+ between, "{0} is not between {1} and {2}", "1, 2"),
+ MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}")(1, 2)),
+ ("Between(1, 2)", MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}", "Between")(1, 2)),
+ ]
+
+ describe_examples = [
+ ('1 is not between 2 and 3', 1, MatchesPredicateWithParams(
+ between, "{0} is not between {1} and {2}")(2, 3)),
+ ]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py
new file mode 100644
index 00000000000..10967ead25b
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/matchers/test_impl.py
@@ -0,0 +1,132 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Tests for matchers."""
+
+from testtools import (
+ Matcher, # check that Matcher is exposed at the top level for docs.
+ TestCase,
+ )
+from testtools.compat import (
+ str_is_unicode,
+ text_repr,
+ _u,
+ )
+from testtools.matchers import (
+ Equals,
+ MatchesException,
+ Raises,
+ )
+from testtools.matchers._impl import (
+ Mismatch,
+ MismatchDecorator,
+ MismatchError,
+ )
+from testtools.tests.helpers import FullStackRunTest
+
+# Silence pyflakes.
+Matcher
+
+
+class TestMismatch(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_constructor_arguments(self):
+ mismatch = Mismatch("some description", {'detail': "things"})
+ self.assertEqual("some description", mismatch.describe())
+ self.assertEqual({'detail': "things"}, mismatch.get_details())
+
+ def test_constructor_no_arguments(self):
+ mismatch = Mismatch()
+ self.assertThat(mismatch.describe,
+ Raises(MatchesException(NotImplementedError)))
+ self.assertEqual({}, mismatch.get_details())
+
+
+class TestMismatchError(TestCase):
+
+ def test_is_assertion_error(self):
+ # MismatchError is an AssertionError, so that most of the time, it
+ # looks like a test failure, rather than an error.
+ def raise_mismatch_error():
+ raise MismatchError(2, Equals(3), Equals(3).match(2))
+ self.assertRaises(AssertionError, raise_mismatch_error)
+
+ def test_default_description_is_mismatch(self):
+ mismatch = Equals(3).match(2)
+ e = MismatchError(2, Equals(3), mismatch)
+ self.assertEqual(mismatch.describe(), str(e))
+
+ def test_default_description_unicode(self):
+ matchee = _u('\xa7')
+ matcher = Equals(_u('a'))
+ mismatch = matcher.match(matchee)
+ e = MismatchError(matchee, matcher, mismatch)
+ self.assertEqual(mismatch.describe(), str(e))
+
+ def test_verbose_description(self):
+ matchee = 2
+ matcher = Equals(3)
+ mismatch = matcher.match(2)
+ e = MismatchError(matchee, matcher, mismatch, True)
+ expected = (
+ 'Match failed. Matchee: %r\n'
+ 'Matcher: %s\n'
+ 'Difference: %s\n' % (
+ matchee,
+ matcher,
+ matcher.match(matchee).describe(),
+ ))
+ self.assertEqual(expected, str(e))
+
+ def test_verbose_unicode(self):
+ # When assertThat is given matchees or matchers that contain non-ASCII
+ # unicode strings, we can still provide a meaningful error.
+ matchee = _u('\xa7')
+ matcher = Equals(_u('a'))
+ mismatch = matcher.match(matchee)
+ expected = (
+ 'Match failed. Matchee: %s\n'
+ 'Matcher: %s\n'
+ 'Difference: %s\n' % (
+ text_repr(matchee),
+ matcher,
+ mismatch.describe(),
+ ))
+ e = MismatchError(matchee, matcher, mismatch, True)
+ if str_is_unicode:
+ actual = str(e)
+ else:
+ actual = unicode(e)
+ # Using str() should still work, and return ascii only
+ self.assertEqual(
+ expected.replace(matchee, matchee.encode("unicode-escape")),
+ str(e).decode("ascii"))
+ self.assertEqual(expected, actual)
+
+
+class TestMismatchDecorator(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def test_forwards_description(self):
+ x = Mismatch("description", {'foo': 'bar'})
+ decorated = MismatchDecorator(x)
+ self.assertEqual(x.describe(), decorated.describe())
+
+ def test_forwards_details(self):
+ x = Mismatch("description", {'foo': 'bar'})
+ decorated = MismatchDecorator(x)
+ self.assertEqual(x.get_details(), decorated.get_details())
+
+ def test_repr(self):
+ x = Mismatch("description", {'foo': 'bar'})
+ decorated = MismatchDecorator(x)
+ self.assertEqual(
+ '<testtools.matchers.MismatchDecorator(%r)>' % (x,),
+ repr(decorated))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_compat.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py
index 5e385bf48ce..84e57be472c 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_compat.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_compat.py
@@ -2,6 +2,7 @@
"""Tests for miscellaneous compatibility functions"""
+import io
import linecache
import os
import sys
@@ -13,13 +14,20 @@ import testtools
from testtools.compat import (
_b,
_detect_encoding,
+ _format_exc_info,
+ _format_exception_only,
+ _format_stack_list,
_get_source_encoding,
_u,
+ reraise,
str_is_unicode,
text_repr,
unicode_output_stream,
)
from testtools.matchers import (
+ Equals,
+ Is,
+ IsInstance,
MatchesException,
Not,
Raises,
@@ -95,7 +103,7 @@ class TestDetectEncoding(testtools.TestCase):
"\xef\xbb\xbfimport sys\n",
))
self._check_encoding("utf-8", (
- "\xef\xbb\xbf# File encoding: UTF-8\n",
+ "\xef\xbb\xbf# File encoding: utf-8\n",
))
self._check_encoding("utf-8", (
'\xef\xbb\xbf"""Module docstring\n',
@@ -105,7 +113,8 @@ class TestDetectEncoding(testtools.TestCase):
'\xef\xbb\xbfThose should be latin-1 bytes"""\n'))
self._check_encoding("utf-8", (
"\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n",
- '"""Module docstring say \xe2\x98\x86"""\n'))
+ '"""Module docstring say \xe2\x98\x86"""\n'),
+ possibly_invalid=True)
def test_multiple_coding_comments(self):
"""Test only the first of multiple coding declarations counts"""
@@ -255,12 +264,30 @@ class TestUnicodeOutputStream(testtools.TestCase):
newio = True
sout = StringIO()
soutwrapper = unicode_output_stream(sout)
- if newio:
- self.expectFailure("Python 3 StringIO expects text not bytes",
- self.assertThat, lambda: soutwrapper.write(self.uni),
- Not(Raises(MatchesException(TypeError))))
soutwrapper.write(self.uni)
- self.assertEqual("pa???n", sout.getvalue())
+ if newio:
+ self.assertEqual(self.uni, sout.getvalue())
+ else:
+ self.assertEqual("pa???n", sout.getvalue())
+
+ def test_io_stringio(self):
+ # io.StringIO only accepts unicode so should be returned as itself.
+ s = io.StringIO()
+ self.assertEqual(s, unicode_output_stream(s))
+
+ def test_io_bytesio(self):
+ # io.BytesIO only accepts bytes so should be wrapped.
+ bytes_io = io.BytesIO()
+ self.assertThat(bytes_io, Not(Is(unicode_output_stream(bytes_io))))
+ # Will error if s was not wrapped properly.
+ unicode_output_stream(bytes_io).write(_u('foo'))
+
+ def test_io_textwrapper(self):
+ # textwrapper is unicode, should be returned as itself.
+ text_io = io.TextIOWrapper(io.BytesIO())
+ self.assertThat(unicode_output_stream(text_io), Is(text_io))
+ # To be sure...
+ unicode_output_stream(text_io).write(_u('foo'))
class TestTextRepr(testtools.TestCase):
@@ -389,6 +416,188 @@ class TestTextRepr(testtools.TestCase):
self.assertEqual(eval(actual), u)
+
+class TestReraise(testtools.TestCase):
+ """Tests for trivial reraise wrapper needed for Python 2/3 changes"""
+
+ def test_exc_info(self):
+ """After reraise exc_info matches plus some extra traceback"""
+ try:
+ raise ValueError("Bad value")
+ except ValueError:
+ _exc_info = sys.exc_info()
+ try:
+ reraise(*_exc_info)
+ except ValueError:
+ _new_exc_info = sys.exc_info()
+ self.assertIs(_exc_info[0], _new_exc_info[0])
+ self.assertIs(_exc_info[1], _new_exc_info[1])
+ expected_tb = traceback.extract_tb(_exc_info[2])
+ self.assertEqual(expected_tb,
+ traceback.extract_tb(_new_exc_info[2])[-len(expected_tb):])
+
+ def test_custom_exception_no_args(self):
+ """Reraising does not require args attribute to contain params"""
+
+ class CustomException(Exception):
+ """Exception that expects and sets attrs but not args"""
+
+ def __init__(self, value):
+ Exception.__init__(self)
+ self.value = value
+
+ try:
+ raise CustomException("Some value")
+ except CustomException:
+ _exc_info = sys.exc_info()
+ self.assertRaises(CustomException, reraise, *_exc_info)
+
+
+class Python2CompatibilityTests(testtools.TestCase):
+
+ def setUp(self):
+ super(Python2CompatibilityTests, self).setUp()
+ if sys.version[0] >= '3':
+ self.skip("These tests are only applicable to python 2.")
+
+
+class TestExceptionFormatting(Python2CompatibilityTests):
+ """Test the _format_exception_only function."""
+
+ def _assert_exception_format(self, eclass, evalue, expected):
+ actual = _format_exception_only(eclass, evalue)
+ self.assertThat(actual, Equals(expected))
+ self.assertThat(''.join(actual), IsInstance(unicode))
+
+ def test_supports_string_exception(self):
+ self._assert_exception_format(
+ "String_Exception",
+ None,
+ [_u("String_Exception\n")]
+ )
+
+ def test_supports_regular_exception(self):
+ self._assert_exception_format(
+ RuntimeError,
+ RuntimeError("Something went wrong"),
+ [_u("RuntimeError: Something went wrong\n")]
+ )
+
+ def test_supports_unprintable_exceptions(self):
+ """Verify support for exception classes that raise an exception when
+ __unicode__ or __str__ is called.
+ """
+ class UnprintableException(Exception):
+
+ def __str__(self):
+ raise Exception()
+
+ def __unicode__(self):
+ raise Exception()
+
+ self._assert_exception_format(
+ UnprintableException,
+ UnprintableException("Foo"),
+ [_u("UnprintableException: <unprintable UnprintableException object>\n")]
+ )
+
+ def test_supports_exceptions_with_no_string_value(self):
+ class NoStringException(Exception):
+
+ def __str__(self):
+ return ""
+
+ def __unicode__(self):
+ return _u("")
+
+ self._assert_exception_format(
+ NoStringException,
+ NoStringException("Foo"),
+ [_u("NoStringException\n")]
+ )
+
+ def test_supports_strange_syntax_error(self):
+ """Test support for syntax errors with unusual number of arguments"""
+ self._assert_exception_format(
+ SyntaxError,
+ SyntaxError("Message"),
+ [_u("SyntaxError: Message\n")]
+ )
+
+ def test_supports_syntax_error(self):
+ self._assert_exception_format(
+ SyntaxError,
+ SyntaxError(
+ "Some Syntax Message",
+ (
+ "/path/to/file",
+ 12,
+ 2,
+ "This is the line of code",
+ )
+ ),
+ [
+ _u(' File "/path/to/file", line 12\n'),
+ _u(' This is the line of code\n'),
+ _u(' ^\n'),
+ _u('SyntaxError: Some Syntax Message\n'),
+ ]
+ )
+
+
+class StackListFormattingTests(Python2CompatibilityTests):
+ """Test the _format_stack_list function."""
+
+ def _assert_stack_format(self, stack_lines, expected_output):
+ actual = _format_stack_list(stack_lines)
+ self.assertThat(actual, Equals([expected_output]))
+
+ def test_single_complete_stack_line(self):
+ stack_lines = [(
+ '/path/to/filename',
+ 12,
+ 'func_name',
+ 'some_code()',
+ )]
+ expected = \
+ _u(' File "/path/to/filename", line 12, in func_name\n' \
+ ' some_code()\n')
+
+ self._assert_stack_format(stack_lines, expected)
+
+ def test_single_stack_line_no_code(self):
+ stack_lines = [(
+ '/path/to/filename',
+ 12,
+ 'func_name',
+ None
+ )]
+ expected = _u(' File "/path/to/filename", line 12, in func_name\n')
+ self._assert_stack_format(stack_lines, expected)
+
+
+class FormatExceptionInfoTests(Python2CompatibilityTests):
+
+ def test_individual_functions_called(self):
+ self.patch(
+ testtools.compat,
+ '_format_stack_list',
+ lambda stack_list: [_u("format stack list called\n")]
+ )
+ self.patch(
+ testtools.compat,
+ '_format_exception_only',
+ lambda etype, evalue: [_u("format exception only called\n")]
+ )
+ result = _format_exc_info(None, None, None)
+ expected = [
+ _u("Traceback (most recent call last):\n"),
+ _u("format stack list called\n"),
+ _u("format exception only called\n"),
+ ]
+ self.assertThat(expected, Equals(result))
+
+
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_content.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py
index 14f400f04ec..9ed1b2ffba5 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_content.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_content.py
@@ -1,5 +1,6 @@
-# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+import json
import os
import tempfile
import unittest
@@ -8,6 +9,7 @@ from testtools import TestCase
from testtools.compat import (
_b,
_u,
+ BytesIO,
StringIO,
)
from testtools.content import (
@@ -15,6 +17,10 @@ from testtools.content import (
Content,
content_from_file,
content_from_stream,
+ JSON,
+ json_content,
+ StackLinesContent,
+ StacktraceContent,
TracebackContent,
text_content,
)
@@ -87,6 +93,12 @@ class TestContent(TestCase):
content = Content(content_type, lambda: [iso_version])
self.assertEqual([text], list(content.iter_text()))
+ def test_as_text(self):
+ content_type = ContentType("text", "strange", {"charset": "utf8"})
+ content = Content(
+ content_type, lambda: [_u("bytes\xea").encode("utf8")])
+ self.assertEqual(_u("bytes\xea"), content.as_text())
+
def test_from_file(self):
fd, path = tempfile.mkstemp()
self.addCleanup(os.remove, path)
@@ -116,6 +128,26 @@ class TestContent(TestCase):
self.assertThat(
''.join(content.iter_text()), Equals('some data'))
+ def test_from_file_with_simple_seek(self):
+ f = tempfile.NamedTemporaryFile()
+ f.write(_b('some data'))
+ f.flush()
+ self.addCleanup(f.close)
+ content = content_from_file(
+ f.name, UTF8_TEXT, chunk_size=50, seek_offset=5)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
+ def test_from_file_with_whence_seek(self):
+ f = tempfile.NamedTemporaryFile()
+ f.write(_b('some data'))
+ f.flush()
+ self.addCleanup(f.close)
+ content = content_from_file(
+ f.name, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
def test_from_stream(self):
data = StringIO('some data')
content = content_from_stream(data, UTF8_TEXT, chunk_size=2)
@@ -130,19 +162,82 @@ class TestContent(TestCase):
def test_from_stream_eager_loading(self):
fd, path = tempfile.mkstemp()
self.addCleanup(os.remove, path)
+ self.addCleanup(os.close, fd)
os.write(fd, _b('some data'))
stream = open(path, 'rb')
+ self.addCleanup(stream.close)
content = content_from_stream(stream, UTF8_TEXT, buffer_now=True)
os.write(fd, _b('more data'))
- os.close(fd)
self.assertThat(
''.join(content.iter_text()), Equals('some data'))
+ def test_from_stream_with_simple_seek(self):
+ data = BytesIO(_b('some data'))
+ content = content_from_stream(
+ data, UTF8_TEXT, chunk_size=50, seek_offset=5)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
+ def test_from_stream_with_whence_seek(self):
+ data = BytesIO(_b('some data'))
+ content = content_from_stream(
+ data, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
+ self.assertThat(
+ list(content.iter_bytes()), Equals([_b('data')]))
+
def test_from_text(self):
data = _u("some data")
expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')])
self.assertEqual(expected, text_content(data))
+ def test_json_content(self):
+ data = {'foo': 'bar'}
+ expected = Content(JSON, lambda: [_b('{"foo": "bar"}')])
+ self.assertEqual(expected, json_content(data))
+
+
+class TestStackLinesContent(TestCase):
+
+ def _get_stack_line_and_expected_output(self):
+ stack_lines = [
+ ('/path/to/file', 42, 'some_function', 'print("Hello World")'),
+ ]
+ expected = ' File "/path/to/file", line 42, in some_function\n' \
+ ' print("Hello World")\n'
+ return stack_lines, expected
+
+ def test_single_stack_line(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ actual = StackLinesContent(stack_lines).as_text()
+
+ self.assertEqual(expected, actual)
+
+ def test_prefix_content(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ prefix = self.getUniqueString() + '\n'
+ content = StackLinesContent(stack_lines, prefix_content=prefix)
+ actual = content.as_text()
+ expected = prefix + expected
+
+ self.assertEqual(expected, actual)
+
+ def test_postfix_content(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ postfix = '\n' + self.getUniqueString()
+ content = StackLinesContent(stack_lines, postfix_content=postfix)
+ actual = content.as_text()
+ expected = expected + postfix
+
+ self.assertEqual(expected, actual)
+
+ def test___init___sets_content_type(self):
+ stack_lines, expected = self._get_stack_line_and_expected_output()
+ content = StackLinesContent(stack_lines)
+ expected_content_type = ContentType("text", "x-traceback",
+ {"language": "python", "charset": "utf8"})
+
+ self.assertEqual(expected_content_type, content.content_type)
+
class TestTracebackContent(TestCase):
@@ -160,6 +255,33 @@ class TestTracebackContent(TestCase):
self.assertEqual(expected, ''.join(list(content.iter_text())))
+class TestStacktraceContent(TestCase):
+
+ def test___init___sets_ivars(self):
+ content = StacktraceContent()
+ content_type = ContentType("text", "x-traceback",
+ {"language": "python", "charset": "utf8"})
+
+ self.assertEqual(content_type, content.content_type)
+
+ def test_prefix_is_used(self):
+ prefix = self.getUniqueString()
+ actual = StacktraceContent(prefix_content=prefix).as_text()
+
+ self.assertTrue(actual.startswith(prefix))
+
+ def test_postfix_is_used(self):
+ postfix = self.getUniqueString()
+ actual = StacktraceContent(postfix_content=postfix).as_text()
+
+ self.assertTrue(actual.endswith(postfix))
+
+ def test_top_frame_is_skipped_when_no_stack_is_specified(self):
+ actual = StacktraceContent().as_text()
+
+ self.assertTrue('testtools/content.py' not in actual)
+
+
class TestAttachFile(TestCase):
def make_file(self, data):
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_content_type.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py
index 9d8c0f6f7af..2d34f95e479 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_content_type.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_content_type.py
@@ -1,8 +1,12 @@
-# Copyright (c) 2008 testtools developers. See LICENSE for details.
+# Copyright (c) 2008, 2012 testtools developers. See LICENSE for details.
from testtools import TestCase
from testtools.matchers import Equals, MatchesException, Raises
-from testtools.content_type import ContentType, UTF8_TEXT
+from testtools.content_type import (
+ ContentType,
+ JSON,
+ UTF8_TEXT,
+ )
class TestContentType(TestCase):
@@ -39,7 +43,7 @@ class TestContentType(TestCase):
content_type = ContentType(
'text', 'plain', {'foo': 'bar', 'baz': 'qux'})
self.assertThat(
- repr(content_type), Equals('text/plain; foo="bar", baz="qux"'))
+ repr(content_type), Equals('text/plain; baz="qux"; foo="bar"'))
class TestBuiltinContentTypes(TestCase):
@@ -50,6 +54,12 @@ class TestBuiltinContentTypes(TestCase):
self.assertThat(UTF8_TEXT.subtype, Equals('plain'))
self.assertThat(UTF8_TEXT.parameters, Equals({'charset': 'utf8'}))
+ def test_json_content(self):
+ # The JSON content type represents implictly UTF-8 application/json.
+ self.assertThat(JSON.type, Equals('application'))
+ self.assertThat(JSON.subtype, Equals('json'))
+ self.assertThat(JSON.parameters, Equals({}))
+
def test_suite():
from unittest import TestLoader
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_deferredruntest.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py
index ab0fd87890a..f0510dc9a9f 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_deferredruntest.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_deferredruntest.py
@@ -5,6 +5,8 @@
import os
import signal
+from extras import try_import
+
from testtools import (
skipIf,
TestCase,
@@ -13,7 +15,6 @@ from testtools import (
from testtools.content import (
text_content,
)
-from testtools.helpers import try_import
from testtools.matchers import (
Equals,
KeysEqual,
@@ -545,7 +546,7 @@ class TestAsynchronousDeferredRunTest(NeedsTwistedTestCase):
self.addCleanup(lambda: 3 / 0)
# Dirty the reactor.
from twisted.internet.protocol import ServerFactory
- reactor.listenTCP(0, ServerFactory())
+ reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
# Unhandled error.
defer.maybeDeferred(lambda: 2 / 0)
# Actual error.
@@ -746,6 +747,19 @@ class TestAssertFailsWith(NeedsTwistedTestCase):
lambda x: self.fail("Should not have succeeded"), check_result)
+class TestRunWithLogObservers(NeedsTwistedTestCase):
+
+ def test_restores_observers(self):
+ from testtools.deferredruntest import run_with_log_observers
+ from twisted.python import log
+ # Make sure there's at least one observer. This reproduces bug
+ # #926189.
+ log.addObserver(lambda *args: None)
+ observers = list(log.theLogPublisher.observers)
+ run_with_log_observers([], lambda: None)
+ self.assertEqual(observers, log.theLogPublisher.observers)
+
+
def test_suite():
from unittest import TestLoader, TestSuite
return TestSuite(
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_distutilscmd.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py
index c485a473d39..7bfc1fa267b 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_distutilscmd.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_distutilscmd.py
@@ -4,11 +4,13 @@
from distutils.dist import Distribution
+from extras import try_import
+
from testtools.compat import (
_b,
+ _u,
BytesIO,
)
-from testtools.helpers import try_import
fixtures = try_import('fixtures')
import testtools
@@ -52,7 +54,7 @@ class TestCommandTest(TestCase):
def test_test_module(self):
self.useFixture(SampleTestFixture())
- stream = BytesIO()
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
dist = Distribution()
dist.script_name = 'setup.py'
dist.script_args = ['test']
@@ -60,11 +62,11 @@ class TestCommandTest(TestCase):
dist.command_options = {
'test': {'test_module': ('command line', 'testtools.runexample')}}
cmd = dist.reinitialize_command('test')
- cmd.runner.stdout = stream
- dist.run_command('test')
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ dist.run_command('test')
self.assertThat(
- stream.getvalue(),
- MatchesRegex(_b("""Tests running...
+ stdout.getDetails()['stdout'].as_text(),
+ MatchesRegex(_u("""Tests running...
Ran 2 tests in \\d.\\d\\d\\ds
OK
@@ -72,7 +74,7 @@ OK
def test_test_suite(self):
self.useFixture(SampleTestFixture())
- stream = BytesIO()
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
dist = Distribution()
dist.script_name = 'setup.py'
dist.script_args = ['test']
@@ -82,11 +84,11 @@ OK
'test_suite': (
'command line', 'testtools.runexample.test_suite')}}
cmd = dist.reinitialize_command('test')
- cmd.runner.stdout = stream
- dist.run_command('test')
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ dist.run_command('test')
self.assertThat(
- stream.getvalue(),
- MatchesRegex(_b("""Tests running...
+ stdout.getDetails()['stdout'].as_text(),
+ MatchesRegex(_u("""Tests running...
Ran 2 tests in \\d.\\d\\d\\ds
OK
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_fixturesupport.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py
index ae6f2ec86e3..2ccd1e853a0 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_fixturesupport.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_fixturesupport.py
@@ -2,13 +2,14 @@
import unittest
+from extras import try_import
+
from testtools import (
TestCase,
content,
content_type,
)
from testtools.compat import _b, _u
-from testtools.helpers import try_import
from testtools.testresult.doubles import (
ExtendedTestResult,
)
@@ -70,9 +71,9 @@ class TestFixtureSupport(TestCase):
self.assertEqual('addSuccess', result._events[-2][0])
details = result._events[-2][2]
self.assertEqual(['content', 'content-1'], sorted(details.keys()))
- self.assertEqual('foo', _u('').join(details['content'].iter_text()))
+ self.assertEqual('foo', details['content'].as_text())
self.assertEqual('content available until cleanUp',
- ''.join(details['content-1'].iter_text()))
+ details['content-1'].as_text())
def test_useFixture_multiple_details_captured(self):
class DetailsFixture(fixtures.Fixture):
@@ -89,8 +90,8 @@ class TestFixtureSupport(TestCase):
self.assertEqual('addSuccess', result._events[-2][0])
details = result._events[-2][2]
self.assertEqual(['aaa', 'bbb'], sorted(details))
- self.assertEqual('foo', ''.join(details['aaa'].iter_text()))
- self.assertEqual('bar', ''.join(details['bbb'].iter_text()))
+ self.assertEqual(_u('foo'), details['aaa'].as_text())
+ self.assertEqual(_u('bar'), details['bbb'].as_text())
def test_useFixture_details_captured_from_setUp(self):
# Details added during fixture set-up are gathered even if setUp()
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py
new file mode 100644
index 00000000000..848c2f0b489
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_helpers.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.tests.helpers import (
+ FullStackRunTest,
+ hide_testtools_stack,
+ is_stack_hidden,
+ )
+
+
+class TestStackHiding(TestCase):
+
+ run_tests_with = FullStackRunTest
+
+ def setUp(self):
+ super(TestStackHiding, self).setUp()
+ self.addCleanup(hide_testtools_stack, is_stack_hidden())
+
+ def test_is_stack_hidden_consistent_true(self):
+ hide_testtools_stack(True)
+ self.assertEqual(True, is_stack_hidden())
+
+ def test_is_stack_hidden_consistent_false(self):
+ hide_testtools_stack(False)
+ self.assertEqual(False, is_stack_hidden())
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_monkey.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py
index 540a2ee909f..540a2ee909f 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_monkey.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_monkey.py
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py
new file mode 100644
index 00000000000..e89ecdc26a4
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_run.py
@@ -0,0 +1,248 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Tests for the test runner logic."""
+
+from unittest import TestSuite
+import sys
+
+from extras import try_import
+fixtures = try_import('fixtures')
+testresources = try_import('testresources')
+
+import testtools
+from testtools import TestCase, run
+from testtools.compat import (
+ _b,
+ StringIO,
+ )
+from testtools.matchers import Contains
+
+
+if fixtures:
+ class SampleTestFixture(fixtures.Fixture):
+ """Creates testtools.runexample temporarily."""
+
+ def __init__(self, broken=False):
+ """Create a SampleTestFixture.
+
+ :param broken: If True, the sample file will not be importable.
+ """
+ if not broken:
+ init_contents = _b("""\
+from testtools import TestCase
+
+class TestFoo(TestCase):
+ def test_bar(self):
+ pass
+ def test_quux(self):
+ pass
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
+""")
+ else:
+ init_contents = b"class not in\n"
+ self.package = fixtures.PythonPackage(
+ 'runexample', [('__init__.py', init_contents)])
+
+ def setUp(self):
+ super(SampleTestFixture, self).setUp()
+ self.useFixture(self.package)
+ testtools.__path__.append(self.package.base)
+ self.addCleanup(testtools.__path__.remove, self.package.base)
+ self.addCleanup(sys.modules.pop, 'testtools.runexample', None)
+
+
+if fixtures and testresources:
+ class SampleResourcedFixture(fixtures.Fixture):
+ """Creates a test suite that uses testresources."""
+
+ def __init__(self):
+ super(SampleResourcedFixture, self).__init__()
+ self.package = fixtures.PythonPackage(
+ 'resourceexample', [('__init__.py', _b("""
+from fixtures import Fixture
+from testresources import (
+ FixtureResource,
+ OptimisingTestSuite,
+ ResourcedTestCase,
+ )
+from testtools import TestCase
+
+class Printer(Fixture):
+
+ def setUp(self):
+ super(Printer, self).setUp()
+ print('Setting up Printer')
+
+ def reset(self):
+ pass
+
+class TestFoo(TestCase, ResourcedTestCase):
+ # When run, this will print just one Setting up Printer, unless the
+ # OptimisingTestSuite is not honoured, when one per test case will print.
+ resources=[('res', FixtureResource(Printer()))]
+ def test_bar(self):
+ pass
+ def test_foo(self):
+ pass
+ def test_quux(self):
+ pass
+def test_suite():
+ from unittest import TestLoader
+ return OptimisingTestSuite(TestLoader().loadTestsFromName(__name__))
+"""))])
+
+ def setUp(self):
+ super(SampleResourcedFixture, self).setUp()
+ self.useFixture(self.package)
+ self.addCleanup(testtools.__path__.remove, self.package.base)
+ testtools.__path__.append(self.package.base)
+
+
+class TestRun(TestCase):
+
+ def setUp(self):
+ super(TestRun, self).setUp()
+ if fixtures is None:
+ self.skipTest("Need fixtures")
+
+ def test_run_custom_list(self):
+ self.useFixture(SampleTestFixture())
+ tests = []
+ class CaptureList(run.TestToolsTestRunner):
+ def list(self, test):
+ tests.append(set([case.id() for case
+ in testtools.testsuite.iterate_tests(test)]))
+ out = StringIO()
+ try:
+ program = run.TestProgram(
+ argv=['prog', '-l', 'testtools.runexample.test_suite'],
+ stdout=out, testRunner=CaptureList)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual([set(['testtools.runexample.TestFoo.test_bar',
+ 'testtools.runexample.TestFoo.test_quux'])], tests)
+
+ def test_run_list(self):
+ self.useFixture(SampleTestFixture())
+ out = StringIO()
+ try:
+ run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual("""testtools.runexample.TestFoo.test_bar
+testtools.runexample.TestFoo.test_quux
+""", out.getvalue())
+
+ def test_run_list_failed_import(self):
+ if not run.have_discover:
+ self.skipTest("Need discover")
+ broken = self.useFixture(SampleTestFixture(broken=True))
+ out = StringIO()
+ exc = self.assertRaises(
+ SystemExit,
+ run.main, ['prog', 'discover', '-l', broken.package.base, '*.py'], out)
+ self.assertEqual(2, exc.args[0])
+ self.assertEqual("""Failed to import
+runexample.__init__
+""", out.getvalue())
+
+ def test_run_orders_tests(self):
+ self.useFixture(SampleTestFixture())
+ out = StringIO()
+ # We load two tests - one that exists and one that doesn't, and we
+ # should get the one that exists and neither the one that doesn't nor
+ # the unmentioned one that does.
+ tempdir = self.useFixture(fixtures.TempDir())
+ tempname = tempdir.path + '/tests.list'
+ f = open(tempname, 'wb')
+ try:
+ f.write(_b("""
+testtools.runexample.TestFoo.test_bar
+testtools.runexample.missingtest
+"""))
+ finally:
+ f.close()
+ try:
+ run.main(['prog', '-l', '--load-list', tempname,
+ 'testtools.runexample.test_suite'], out)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual("""testtools.runexample.TestFoo.test_bar
+""", out.getvalue())
+
+ def test_run_load_list(self):
+ self.useFixture(SampleTestFixture())
+ out = StringIO()
+ # We load two tests - one that exists and one that doesn't, and we
+ # should get the one that exists and neither the one that doesn't nor
+ # the unmentioned one that does.
+ tempdir = self.useFixture(fixtures.TempDir())
+ tempname = tempdir.path + '/tests.list'
+ f = open(tempname, 'wb')
+ try:
+ f.write(_b("""
+testtools.runexample.TestFoo.test_bar
+testtools.runexample.missingtest
+"""))
+ finally:
+ f.close()
+ try:
+ run.main(['prog', '-l', '--load-list', tempname,
+ 'testtools.runexample.test_suite'], out)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ raise AssertionError("-l tried to exit. %r" % exc_info[1])
+ self.assertEqual("""testtools.runexample.TestFoo.test_bar
+""", out.getvalue())
+
+ def test_load_list_preserves_custom_suites(self):
+ if testresources is None:
+ self.skipTest("Need testresources")
+ self.useFixture(SampleResourcedFixture())
+ # We load two tests, not loading one. Both share a resource, so we
+ # should see just one resource setup occur.
+ tempdir = self.useFixture(fixtures.TempDir())
+ tempname = tempdir.path + '/tests.list'
+ f = open(tempname, 'wb')
+ try:
+ f.write(_b("""
+testtools.resourceexample.TestFoo.test_bar
+testtools.resourceexample.TestFoo.test_foo
+"""))
+ finally:
+ f.close()
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ try:
+ run.main(['prog', '--load-list', tempname,
+ 'testtools.resourceexample.test_suite'], stdout.stream)
+ except SystemExit:
+ # Evil resides in TestProgram.
+ pass
+ out = stdout.getDetails()['stdout'].as_text()
+ self.assertEqual(1, out.count('Setting up Printer'), "%r" % out)
+
+ def test_run_failfast(self):
+ stdout = self.useFixture(fixtures.StringStream('stdout'))
+
+ class Failing(TestCase):
+ def test_a(self):
+ self.fail('a')
+ def test_b(self):
+ self.fail('b')
+ runner = run.TestToolsTestRunner(failfast=True)
+ with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+ runner.run(TestSuite([Failing('test_a'), Failing('test_b')]))
+ self.assertThat(
+ stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test'))
+
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_runtest.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py
index afbb8baf395..afbb8baf395 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_runtest.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_runtest.py
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_spinner.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py
index 3d677bd7545..6112252acd9 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_spinner.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_spinner.py
@@ -5,11 +5,12 @@
import os
import signal
+from extras import try_import
+
from testtools import (
skipIf,
TestCase,
)
-from testtools.helpers import try_import
from testtools.matchers import (
Equals,
Is,
@@ -231,7 +232,7 @@ class TestRunInReactor(NeedsTwistedTestCase):
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
- port = reactor.listenTCP(0, ServerFactory())
+ port = reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
spinner.run(self.make_timeout(), lambda: None)
results = spinner.get_junk()
self.assertThat(results, Equals([port]))
@@ -261,7 +262,7 @@ class TestRunInReactor(NeedsTwistedTestCase):
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
port = spinner.run(
- self.make_timeout(), reactor.listenTCP, 0, ServerFactory())
+ self.make_timeout(), reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
self.assertThat(spinner.get_junk(), Equals([port]))
def test_will_not_run_with_previous_junk(self):
@@ -271,7 +272,7 @@ class TestRunInReactor(NeedsTwistedTestCase):
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
- spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
+ spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
self.assertThat(lambda: spinner.run(timeout, lambda: None),
Raises(MatchesException(_spinner.StaleJunkError)))
@@ -282,7 +283,7 @@ class TestRunInReactor(NeedsTwistedTestCase):
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
- port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
+ port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
junk = spinner.clear_junk()
self.assertThat(junk, Equals([port]))
self.assertThat(spinner.get_junk(), Equals([]))
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py
new file mode 100644
index 00000000000..5010f9ac12c
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_tags.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2012 testtools developers. See LICENSE for details.
+
+"""Test tag support."""
+
+
+from testtools import TestCase
+from testtools.tags import TagContext
+
+
+class TestTags(TestCase):
+
+ def test_no_tags(self):
+ # A tag context has no tags initially.
+ tag_context = TagContext()
+ self.assertEqual(set(), tag_context.get_current_tags())
+
+ def test_add_tag(self):
+ # A tag added with change_tags appears in get_current_tags.
+ tag_context = TagContext()
+ tag_context.change_tags(set(['foo']), set())
+ self.assertEqual(set(['foo']), tag_context.get_current_tags())
+
+ def test_add_tag_twice(self):
+ # Calling change_tags twice to add tags adds both tags to the current
+ # tags.
+ tag_context = TagContext()
+ tag_context.change_tags(set(['foo']), set())
+ tag_context.change_tags(set(['bar']), set())
+ self.assertEqual(
+ set(['foo', 'bar']), tag_context.get_current_tags())
+
+ def test_change_tags_returns_tags(self):
+ # change_tags returns the current tags. This is a convenience.
+ tag_context = TagContext()
+ tags = tag_context.change_tags(set(['foo']), set())
+ self.assertEqual(set(['foo']), tags)
+
+ def test_remove_tag(self):
+ # change_tags can remove tags from the context.
+ tag_context = TagContext()
+ tag_context.change_tags(set(['foo']), set())
+ tag_context.change_tags(set(), set(['foo']))
+ self.assertEqual(set(), tag_context.get_current_tags())
+
+ def test_child_context(self):
+ # A TagContext can have a parent. If so, its tags are the tags of the
+ # parent at the moment of construction.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ self.assertEqual(
+ parent.get_current_tags(), child.get_current_tags())
+
+ def test_add_to_child(self):
+ # Adding a tag to the child context doesn't affect the parent.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ child.change_tags(set(['bar']), set())
+ self.assertEqual(set(['foo', 'bar']), child.get_current_tags())
+ self.assertEqual(set(['foo']), parent.get_current_tags())
+
+ def test_remove_in_child(self):
+ # A tag that was in the parent context can be removed from the child
+ # context without affect the parent.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ child.change_tags(set(), set(['foo']))
+ self.assertEqual(set(), child.get_current_tags())
+ self.assertEqual(set(['foo']), parent.get_current_tags())
+
+ def test_parent(self):
+ # The parent can be retrieved from a child context.
+ parent = TagContext()
+ parent.change_tags(set(['foo']), set())
+ child = TagContext(parent)
+ child.change_tags(set(), set(['foo']))
+ self.assertEqual(parent, child.parent)
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_testcase.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py
index 52f93c3c526..680368db4a1 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_testcase.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_testcase.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
"""Tests for extensions to the base test library."""
@@ -8,6 +8,7 @@ import sys
import unittest
from testtools import (
+ DecorateTestCaseResult,
ErrorHolder,
MultipleExceptions,
PlaceHolder,
@@ -23,19 +24,28 @@ from testtools.compat import (
_b,
_u,
)
+from testtools.content import (
+ text_content,
+ TracebackContent,
+ )
from testtools.matchers import (
Annotate,
DocTestMatches,
Equals,
+ HasLength,
MatchesException,
Raises,
)
+from testtools.testcase import (
+ attr,
+ Nullary,
+ WithAttributes,
+ )
from testtools.testresult.doubles import (
Python26TestResult,
Python27TestResult,
ExtendedTestResult,
)
-from testtools.testresult.real import TestResult
from testtools.tests.helpers import (
an_exc_info,
FullStackRunTest,
@@ -76,16 +86,21 @@ class TestPlaceHolder(TestCase):
# repr(placeholder) shows you how the object was constructed.
test = PlaceHolder("test id")
self.assertEqual(
- "<testtools.testcase.PlaceHolder(%s)>" % repr(test.id()),
- repr(test))
+ "<testtools.testcase.PlaceHolder('addSuccess', %s, {})>" % repr(
+ test.id()), repr(test))
def test_repr_with_description(self):
# repr(placeholder) shows you how the object was constructed.
test = PlaceHolder("test id", "description")
self.assertEqual(
- "<testtools.testcase.PlaceHolder(%r, %r)>" % (
- test.id(), test.shortDescription()),
- repr(test))
+ "<testtools.testcase.PlaceHolder('addSuccess', %r, {}, %r)>" % (
+ test.id(), test.shortDescription()), repr(test))
+
+ def test_repr_custom_outcome(self):
+ test = PlaceHolder("test id", outcome='addSkip')
+ self.assertEqual(
+ "<testtools.testcase.PlaceHolder('addSkip', %r, {})>" % (
+ test.id()), repr(test))
def test_counts_as_one_test(self):
# A placeholder test counts as one test.
@@ -103,9 +118,39 @@ class TestPlaceHolder(TestCase):
log = []
test.run(LoggingResult(log))
self.assertEqual(
- [('startTest', test), ('addSuccess', test), ('stopTest', test)],
+ [('tags', set(), set()), ('startTest', test), ('addSuccess', test),
+ ('stopTest', test), ('tags', set(), set()),],
log)
+ def test_supplies_details(self):
+ details = {'quux':None}
+ test = PlaceHolder('foo', details=details)
+ result = ExtendedTestResult()
+ test.run(result)
+ self.assertEqual(
+ [('tags', set(), set()),
+ ('startTest', test),
+ ('addSuccess', test, details),
+ ('stopTest', test),
+ ('tags', set(), set()),
+ ],
+ result._events)
+
+ def test_supplies_timestamps(self):
+ test = PlaceHolder('foo', details={}, timestamps=["A", "B"])
+ result = ExtendedTestResult()
+ test.run(result)
+ self.assertEqual(
+ [('time', "A"),
+ ('tags', set(), set()),
+ ('startTest', test),
+ ('time', "B"),
+ ('addSuccess', test),
+ ('stopTest', test),
+ ('tags', set(), set()),
+ ],
+ result._events)
+
def test_call_is_run(self):
# A PlaceHolder can be called, in which case it behaves like run.
test = self.makePlaceHolder()
@@ -124,8 +169,23 @@ class TestPlaceHolder(TestCase):
# A PlaceHolder can be debugged.
self.makePlaceHolder().debug()
+ def test_supports_tags(self):
+ result = ExtendedTestResult()
+ tags = set(['foo', 'bar'])
+ case = PlaceHolder("foo", tags=tags)
+ case.run(result)
+ self.assertEqual([
+ ('tags', tags, set()),
+ ('startTest', case),
+ ('addSuccess', case),
+ ('stopTest', case),
+ ('tags', set(), tags),
+ ], result._events)
+
class TestErrorHolder(TestCase):
+ # Note that these tests exist because ErrorHolder exists - it could be
+ # deprecated and dropped at this point.
run_test_with = FullStackRunTest
@@ -157,23 +217,6 @@ class TestErrorHolder(TestCase):
test = ErrorHolder("test id", self.makeException(), "description")
self.assertEqual("description", test.shortDescription())
- def test_repr_just_id(self):
- # repr(placeholder) shows you how the object was constructed.
- error = self.makeException()
- test = ErrorHolder("test id", error)
- self.assertEqual(
- "<testtools.testcase.ErrorHolder(%r, %r)>" % (test.id(), error),
- repr(test))
-
- def test_repr_with_description(self):
- # repr(placeholder) shows you how the object was constructed.
- error = self.makeException()
- test = ErrorHolder("test id", error, "description")
- self.assertEqual(
- "<testtools.testcase.ErrorHolder(%r, %r, %r)>" % (
- test.id(), error, test.shortDescription()),
- repr(test))
-
def test_counts_as_one_test(self):
# A placeholder test counts as one test.
test = self.makePlaceHolder()
@@ -185,15 +228,18 @@ class TestErrorHolder(TestCase):
self.assertEqual(test.id(), str(test))
def test_runs_as_error(self):
- # When run, a PlaceHolder test records a success.
+ # When run, an ErrorHolder test records an error.
error = self.makeException()
test = self.makePlaceHolder(error=error)
- log = []
- test.run(LoggingResult(log))
+ result = ExtendedTestResult()
+ log = result._events
+ test.run(result)
self.assertEqual(
- [('startTest', test),
- ('addError', test, error),
- ('stopTest', test)], log)
+ [('tags', set(), set()),
+ ('startTest', test),
+ ('addError', test, test._details),
+ ('stopTest', test),
+ ('tags', set(), set())], log)
def test_call_is_run(self):
# A PlaceHolder can be called, in which case it behaves like run.
@@ -256,11 +302,25 @@ class TestAssertions(TestCase):
# assertRaises asserts that a callable raises a particular exception.
self.assertRaises(RuntimeError, self.raiseError, RuntimeError)
+ def test_assertRaises_exception_w_metaclass(self):
+ # assertRaises works when called for exceptions with custom metaclasses
+ class MyExMeta(type):
+ def __init__(cls, name, bases, dct):
+ """ Do some dummy metaclass stuff """
+ dct.update({'answer': 42})
+ type.__init__(cls, name, bases, dct)
+
+ class MyEx(Exception):
+ __metaclass__ = MyExMeta
+
+ self.assertRaises(MyEx, self.raiseError, MyEx)
+
def test_assertRaises_fails_when_no_error_raised(self):
# assertRaises raises self.failureException when it's passed a
# callable that raises no error.
ret = ('orange', 42)
- self.assertFails("<function <lambda> at ...> returned ('orange', 42)",
+ self.assertFails(
+ "<function ...<lambda> at ...> returned ('orange', 42)",
self.assertRaises, RuntimeError, lambda: ret)
def test_assertRaises_fails_when_different_error_raised(self):
@@ -303,12 +363,23 @@ class TestAssertions(TestCase):
# a callable that doesn't raise an exception, then fail with an
# appropriate error message.
expectedExceptions = (RuntimeError, ZeroDivisionError)
- failure = self.assertRaises(
+ self.assertRaises(
self.failureException,
self.assertRaises, expectedExceptions, lambda: None)
- self.assertFails('<function <lambda> at ...> returned None',
+ self.assertFails('<function ...<lambda> at ...> returned None',
self.assertRaises, expectedExceptions, lambda: None)
+ def test_assertRaises_function_repr_in_exception(self):
+ # When assertRaises fails, it includes the repr of the invoked
+ # function in the error message, so it's easy to locate the problem.
+ def foo():
+ """An arbitrary function."""
+ pass
+ self.assertThat(
+ lambda: self.assertRaises(Exception, foo),
+ Raises(
+ MatchesException(self.failureException, '.*%r.*' % (foo,))))
+
def assertFails(self, message, function, *args, **kwargs):
"""Assert that function raises a failure with the given message."""
failure = self.assertRaises(
@@ -498,6 +569,16 @@ class TestAssertions(TestCase):
self.assertFails(
expected, self.assertThat, matchee, matcher, verbose=True)
+ def test__force_failure_fails_test(self):
+ class Test(TestCase):
+ def test_foo(self):
+ self.force_failure = True
+ self.remaining_code_run = True
+ test = Test('test_foo')
+ result = test.run()
+ self.assertFalse(result.wasSuccessful())
+ self.assertTrue(test.remaining_code_run)
+
def get_error_string(self, e):
"""Get the string showing how 'e' would be formatted in test output.
@@ -510,7 +591,7 @@ class TestAssertions(TestCase):
about stack traces and formats the exception class. We don't care
about either of these, so we take its output and parse it a little.
"""
- error = TestResult()._exc_info_to_unicode((e.__class__, e, None), self)
+ error = TracebackContent((e.__class__, e, None), self).as_text()
# We aren't at all interested in the traceback.
if error.startswith('Traceback (most recent call last):\n'):
lines = error.splitlines(True)[1:]
@@ -550,7 +631,7 @@ class TestAssertions(TestCase):
expected_error = '\n'.join([
'!=:',
'reference = %s' % pformat(a),
- 'actual = %s' % pformat(b),
+ 'actual = %s' % pformat(b),
': ' + message,
])
self.assertFails(expected_error, self.assertEqual, a, b, message)
@@ -575,7 +656,7 @@ class TestAssertions(TestCase):
'a',
repr('\xa7')[1:-1],
"'''",
- 'actual = %r' % (b,),
+ 'actual = %r' % (b,),
': ' + message,
])
self.assertFails(expected_error, self.assertEqual, a, b, message)
@@ -594,6 +675,18 @@ class TestAssertions(TestCase):
self.assertFails(expected_error, self.assertIsNotNone, None)
+ def test_fail_preserves_traceback_detail(self):
+ class Test(TestCase):
+ def test(self):
+ self.addDetail('traceback', text_content('foo'))
+ self.fail('bar')
+ test = Test('test')
+ result = ExtendedTestResult()
+ test.run(result)
+ self.assertEqual(set(['traceback', 'traceback-1']),
+ set(result._events[1][2].keys()))
+
+
class TestAddCleanup(TestCase):
"""Tests for TestCase.addCleanup."""
@@ -1024,6 +1117,15 @@ class TestDetailsProvided(TestWithDetails):
self.assertDetailsProvided(Case("test"), "addFailure",
["foo", "foo-1", "traceback"])
+ def test_addDetailUniqueName_works(self):
+ content = self.get_content()
+ class Case(TestCase):
+ def test(self):
+ self.addDetailUniqueName("foo", content)
+ self.addDetailUniqueName("foo", content)
+ self.assertDetailsProvided(Case("test"), "addSuccess",
+ ["foo", "foo-1"])
+
class TestSetupTearDown(TestCase):
@@ -1037,7 +1139,11 @@ class TestSetupTearDown(TestCase):
pass
result = unittest.TestResult()
DoesnotcallsetUp('test_method').run(result)
- self.assertEqual(1, len(result.errors))
+ self.assertThat(result.errors, HasLength(1))
+ self.assertThat(result.errors[0][1],
+ DocTestMatches(
+ "...ValueError...File...testtools/tests/test_testcase.py...",
+ ELLIPSIS))
def test_tearDownNotCalled(self):
class DoesnotcalltearDown(TestCase):
@@ -1047,7 +1153,11 @@ class TestSetupTearDown(TestCase):
pass
result = unittest.TestResult()
DoesnotcalltearDown('test_method').run(result)
- self.assertEqual(1, len(result.errors))
+ self.assertThat(result.errors, HasLength(1))
+ self.assertThat(result.errors[0][1],
+ DocTestMatches(
+ "...ValueError...File...testtools/tests/test_testcase.py...",
+ ELLIPSIS))
class TestSkipping(TestCase):
@@ -1072,7 +1182,7 @@ class TestSkipping(TestCase):
case.run(result)
self.assertEqual('addSkip', result._events[1][0])
self.assertEqual('no reason given.',
- ''.join(result._events[1][2]['reason'].iter_text()))
+ result._events[1][2]['reason'].as_text())
def test_skipException_in_setup_calls_result_addSkip(self):
class TestThatRaisesInSetUp(TestCase):
@@ -1283,6 +1393,158 @@ class TestTestCaseSuper(TestCase):
self.assertTrue(test.teardown_called)
+class TestNullary(TestCase):
+
+ def test_repr(self):
+ # The repr() of nullary is the same as the repr() of the wrapped
+ # function.
+ def foo():
+ pass
+ wrapped = Nullary(foo)
+ self.assertEqual(repr(wrapped), repr(foo))
+
+ def test_called_with_arguments(self):
+ # The function is called with the arguments given to Nullary's
+ # constructor.
+ l = []
+ def foo(*args, **kwargs):
+ l.append((args, kwargs))
+ wrapped = Nullary(foo, 1, 2, a="b")
+ wrapped()
+ self.assertEqual(l, [((1, 2), {'a': 'b'})])
+
+ def test_returns_wrapped(self):
+ # Calling Nullary returns whatever the function returns.
+ ret = object()
+ wrapped = Nullary(lambda: ret)
+ self.assertIs(ret, wrapped())
+
+ def test_raises(self):
+ # If the function raises, so does Nullary when called.
+ wrapped = Nullary(lambda: 1/0)
+ self.assertRaises(ZeroDivisionError, wrapped)
+
+
+class TestAttributes(TestCase):
+
+ def test_simple_attr(self):
+ # Adding an attr to a test changes its id().
+ class MyTest(WithAttributes, TestCase):
+ @attr('foo')
+ def test_bar(self):
+ pass
+ case = MyTest('test_bar')
+ self.assertEqual('testtools.tests.test_testcase.MyTest.test_bar[foo]',
+ case.id())
+
+ def test_multiple_attributes(self):
+ class MyTest(WithAttributes, TestCase):
+ # Not sorted here, forward or backwards.
+ @attr('foo', 'quux', 'bar')
+ def test_bar(self):
+ pass
+ case = MyTest('test_bar')
+ self.assertEqual(
+ 'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
+ case.id())
+
+ def test_multiple_attr_decorators(self):
+ class MyTest(WithAttributes, TestCase):
+ # Not sorted here, forward or backwards.
+ @attr('bar')
+ @attr('quux')
+ @attr('foo')
+ def test_bar(self):
+ pass
+ case = MyTest('test_bar')
+ self.assertEqual(
+ 'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
+ case.id())
+
+
+class TestDecorateTestCaseResult(TestCase):
+
+ def setUp(self):
+ super(TestDecorateTestCaseResult, self).setUp()
+ self.log = []
+
+ def make_result(self, result):
+ self.log.append(('result', result))
+ return LoggingResult(self.log)
+
+ def test___call__(self):
+ case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
+ case(None)
+ case('something')
+ self.assertEqual([('result', None),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ ('result', 'something'),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set())
+ ], self.log)
+
+ def test_run(self):
+ case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
+ case.run(None)
+ case.run('something')
+ self.assertEqual([('result', None),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ ('result', 'something'),
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set())
+ ], self.log)
+
+ def test_before_after_hooks(self):
+ case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result,
+ before_run=lambda result: self.log.append('before'),
+ after_run=lambda result: self.log.append('after'))
+ case.run(None)
+ case(None)
+ self.assertEqual([
+ ('result', None),
+ 'before',
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ 'after',
+ ('result', None),
+ 'before',
+ ('tags', set(), set()),
+ ('startTest', case.decorated),
+ ('addSuccess', case.decorated),
+ ('stopTest', case.decorated),
+ ('tags', set(), set()),
+ 'after',
+ ], self.log)
+
+ def test_other_attribute(self):
+ orig = PlaceHolder('foo')
+ orig.thing = 'fred'
+ case = DecorateTestCaseResult(orig, self.make_result)
+ self.assertEqual('fred', case.thing)
+ self.assertRaises(AttributeError, getattr, case, 'other')
+ case.other = 'barbara'
+ self.assertEqual('barbara', orig.other)
+ del case.thing
+ self.assertRaises(AttributeError, getattr, orig, 'thing')
+
+
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py
new file mode 100644
index 00000000000..04aa0873ccd
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_testresult.py
@@ -0,0 +1,2919 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Test TestResults and related things."""
+
+__metaclass__ = type
+
+import codecs
+import datetime
+import doctest
+from itertools import chain, combinations
+import os
+import shutil
+import sys
+import tempfile
+import threading
+from unittest import TestSuite
+import warnings
+
+from extras import safe_hasattr, try_imports
+
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
+from testtools import (
+ CopyStreamResult,
+ ExtendedToOriginalDecorator,
+ ExtendedToStreamDecorator,
+ MultiTestResult,
+ PlaceHolder,
+ StreamFailFast,
+ StreamResult,
+ StreamResultRouter,
+ StreamSummary,
+ StreamTagger,
+ StreamToDict,
+ StreamToExtendedDecorator,
+ StreamToQueue,
+ Tagger,
+ TestCase,
+ TestControl,
+ TestResult,
+ TestResultDecorator,
+ TestByTestResult,
+ TextTestResult,
+ ThreadsafeForwardingResult,
+ TimestampingStreamResult,
+ testresult,
+ )
+from testtools.compat import (
+ _b,
+ _get_exception_encoding,
+ _r,
+ _u,
+ advance_iterator,
+ str_is_unicode,
+ StringIO,
+ )
+from testtools.content import (
+ Content,
+ content_from_stream,
+ text_content,
+ TracebackContent,
+ )
+from testtools.content_type import ContentType, UTF8_TEXT
+from testtools.matchers import (
+ AllMatch,
+ Contains,
+ DocTestMatches,
+ Equals,
+ HasLength,
+ MatchesAny,
+ MatchesException,
+ Raises,
+ )
+from testtools.tests.helpers import (
+ an_exc_info,
+ FullStackRunTest,
+ LoggingResult,
+ run_with_stack_hidden,
+ )
+from testtools.testresult.doubles import (
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ StreamResult as LoggingStreamResult,
+ )
+from testtools.testresult.real import (
+ _details_to_str,
+ _merge_tags,
+ utc,
+ )
+
+
+def make_erroring_test():
+ class Test(TestCase):
+ def error(self):
+ 1/0
+ return Test("error")
+
+
+def make_failing_test():
+ class Test(TestCase):
+ def failed(self):
+ self.fail("yo!")
+ return Test("failed")
+
+
+def make_mismatching_test():
+ class Test(TestCase):
+ def mismatch(self):
+ self.assertEqual(1, 2)
+ return Test("mismatch")
+
+
+def make_unexpectedly_successful_test():
+ class Test(TestCase):
+ def succeeded(self):
+ self.expectFailure("yo!", lambda: None)
+ return Test("succeeded")
+
+
+def make_test():
+ class Test(TestCase):
+ def test(self):
+ pass
+ return Test("test")
+
+
+def make_exception_info(exceptionFactory, *args, **kwargs):
+ try:
+ raise exceptionFactory(*args, **kwargs)
+ except:
+ return sys.exc_info()
+
+
+class Python26Contract(object):
+
+ def test_fresh_result_is_successful(self):
+ # A result is considered successful before any tests are run.
+ result = self.makeResult()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addError_is_failure(self):
+ # addError fails the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, an_exc_info)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+ def test_addFailure_is_failure(self):
+ # addFailure fails the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, an_exc_info)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+ def test_addSuccess_is_success(self):
+ # addSuccess does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSuccess(self)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_stop_sets_shouldStop(self):
+ result = self.makeResult()
+ result.stop()
+ self.assertTrue(result.shouldStop)
+
+
+class Python27Contract(Python26Contract):
+
+ def test_addExpectedFailure(self):
+ # Calling addExpectedFailure(test, exc_info) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, an_exc_info)
+
+ def test_addExpectedFailure_is_success(self):
+ # addExpectedFailure does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, an_exc_info)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addSkipped(self):
+ # Calling addSkip(test, reason) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, _u("Skipped for some reason"))
+
+ def test_addSkip_is_success(self):
+ # addSkip does not fail the test run.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, _u("Skipped for some reason"))
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_addUnexpectedSuccess(self):
+ # Calling addUnexpectedSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+
+ def test_addUnexpectedSuccess_was_successful(self):
+ # addUnexpectedSuccess does not fail the test run in Python 2.7.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startStopTestRun(self):
+ # Calling startTestRun completes ok.
+ result = self.makeResult()
+ result.startTestRun()
+ result.stopTestRun()
+
+ def test_failfast(self):
+ result = self.makeResult()
+ result.failfast = True
+ class Failing(TestCase):
+ def test_a(self):
+ self.fail('a')
+ def test_b(self):
+ self.fail('b')
+ TestSuite([Failing('test_a'), Failing('test_b')]).run(result)
+ self.assertEqual(1, result.testsRun)
+
+
+class TagsContract(Python27Contract):
+ """Tests to ensure correct tagging behaviour.
+
+ See the subunit docs for guidelines on how this is supposed to work.
+ """
+
+ def test_no_tags_by_default(self):
+ # Results initially have no tags.
+ result = self.makeResult()
+ result.startTestRun()
+ self.assertEqual(frozenset(), result.current_tags)
+
+ def test_adding_tags(self):
+ # Tags are added using 'tags' and thus become visible in
+ # 'current_tags'.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ self.assertEqual(set(['foo']), result.current_tags)
+
+ def test_removing_tags(self):
+ # Tags are removed using 'tags'.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.tags(set(), set(['foo']))
+ self.assertEqual(set(), result.current_tags)
+
+ def test_startTestRun_resets_tags(self):
+ # startTestRun makes a new test run, and thus clears all the tags.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTestRun()
+ self.assertEqual(set(), result.current_tags)
+
+ def test_add_tags_within_test(self):
+ # Tags can be added after a test has run.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(['bar']), set())
+ self.assertEqual(set(['foo', 'bar']), result.current_tags)
+
+ def test_tags_added_in_test_are_reverted(self):
+ # Tags added during a test run are then reverted once that test has
+ # finished.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(['bar']), set())
+ result.addSuccess(self)
+ result.stopTest(self)
+ self.assertEqual(set(['foo']), result.current_tags)
+
+ def test_tags_removed_in_test(self):
+ # Tags can be removed during tests.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(), set(['foo']))
+ self.assertEqual(set(), result.current_tags)
+
+ def test_tags_removed_in_test_are_restored(self):
+ # Tags removed during tests are restored once that test has finished.
+ result = self.makeResult()
+ result.startTestRun()
+ result.tags(set(['foo']), set())
+ result.startTest(self)
+ result.tags(set(), set(['foo']))
+ result.addSuccess(self)
+ result.stopTest(self)
+ self.assertEqual(set(['foo']), result.current_tags)
+
+
+class DetailsContract(TagsContract):
+ """Tests for the details API of TestResults."""
+
+ def test_addExpectedFailure_details(self):
+ # Calling addExpectedFailure(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addExpectedFailure(self, details={})
+
+ def test_addError_details(self):
+ # Calling addError(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, details={})
+
+ def test_addFailure_details(self):
+ # Calling addFailure(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, details={})
+
+ def test_addSkipped_details(self):
+ # Calling addSkip(test, reason) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSkip(self, details={})
+
+ def test_addUnexpectedSuccess_details(self):
+ # Calling addUnexpectedSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self, details={})
+
+ def test_addSuccess_details(self):
+ # Calling addSuccess(test) completes ok.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addSuccess(self, details={})
+
+
+class FallbackContract(DetailsContract):
+ """When we fallback we take our policy choice to map calls.
+
+ For instance, we map unexpectedSuccess to an error code, not to success.
+ """
+
+ def test_addUnexpectedSuccess_was_successful(self):
+ # addUnexpectedSuccess fails test run in testtools.
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ self.assertFalse(result.wasSuccessful())
+
+
+class StartTestRunContract(FallbackContract):
+ """Defines the contract for testtools policy choices.
+
+ That is things which are not simply extensions to unittest but choices we
+ have made differently.
+ """
+
+ def test_startTestRun_resets_unexpected_success(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addUnexpectedSuccess(self)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startTestRun_resets_failure(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addFailure(self, an_exc_info)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+ def test_startTestRun_resets_errors(self):
+ result = self.makeResult()
+ result.startTest(self)
+ result.addError(self, an_exc_info)
+ result.stopTest(self)
+ result.startTestRun()
+ self.assertTrue(result.wasSuccessful())
+
+
+class TestTestResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return TestResult()
+
+
+class TestMultiTestResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return MultiTestResult(TestResult(), TestResult())
+
+
+class TestTextTestResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return TextTestResult(StringIO())
+
+
+class TestThreadSafeForwardingResultContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ result_semaphore = threading.Semaphore(1)
+ target = TestResult()
+ return ThreadsafeForwardingResult(target, result_semaphore)
+
+
+class TestExtendedTestResultContract(TestCase, StartTestRunContract):
+
+ def makeResult(self):
+ return ExtendedTestResult()
+
+
+class TestPython26TestResultContract(TestCase, Python26Contract):
+
+ def makeResult(self):
+ return Python26TestResult()
+
+
+class TestAdaptedPython26TestResultContract(TestCase, FallbackContract):
+
+ def makeResult(self):
+ return ExtendedToOriginalDecorator(Python26TestResult())
+
+
+class TestPython27TestResultContract(TestCase, Python27Contract):
+
+ def makeResult(self):
+ return Python27TestResult()
+
+
+class TestAdaptedPython27TestResultContract(TestCase, DetailsContract):
+
+ def makeResult(self):
+ return ExtendedToOriginalDecorator(Python27TestResult())
+
+
+class TestAdaptedStreamResult(TestCase, DetailsContract):
+
+ def makeResult(self):
+ return ExtendedToStreamDecorator(StreamResult())
+
+
+class TestTestResultDecoratorContract(TestCase, StartTestRunContract):
+
+ run_test_with = FullStackRunTest
+
+ def makeResult(self):
+ return TestResultDecorator(TestResult())
+
+
+# DetailsContract because ExtendedToStreamDecorator follows Python for
+# uxsuccess handling.
+class TestStreamToExtendedContract(TestCase, DetailsContract):
+
+ def makeResult(self):
+ return ExtendedToStreamDecorator(
+ StreamToExtendedDecorator(ExtendedTestResult()))
+
+
+class TestStreamResultContract(object):
+
+ def _make_result(self):
+ raise NotImplementedError(self._make_result)
+
+ def test_startTestRun(self):
+ result = self._make_result()
+ result.startTestRun()
+ result.stopTestRun()
+
+ def test_files(self):
+ # Test parameter combinations when files are being emitted.
+ result = self._make_result()
+ result.startTestRun()
+ self.addCleanup(result.stopTestRun)
+ now = datetime.datetime.now(utc)
+ inputs = list(dict(
+ eof=True,
+ mime_type="text/plain",
+ route_code=_u("1234"),
+ test_id=_u("foo"),
+ timestamp=now,
+ ).items())
+ param_dicts = self._power_set(inputs)
+ for kwargs in param_dicts:
+ result.status(file_name=_u("foo"), file_bytes=_b(""), **kwargs)
+ result.status(file_name=_u("foo"), file_bytes=_b("bar"), **kwargs)
+
+ def test_test_status(self):
+ # Tests non-file attachment parameter combinations.
+ result = self._make_result()
+ result.startTestRun()
+ self.addCleanup(result.stopTestRun)
+ now = datetime.datetime.now(utc)
+ args = [[_u("foo"), s] for s in ['exists', 'inprogress', 'xfail',
+ 'uxsuccess', 'success', 'fail', 'skip']]
+ inputs = list(dict(
+ runnable=False,
+ test_tags=set(['quux']),
+ route_code=_u("1234"),
+ timestamp=now,
+ ).items())
+ param_dicts = self._power_set(inputs)
+ for kwargs in param_dicts:
+ for arg in args:
+ result.status(test_id=arg[0], test_status=arg[1], **kwargs)
+
+ def _power_set(self, iterable):
+ "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
+ s = list(iterable)
+ param_dicts = []
+ for ss in chain.from_iterable(combinations(s, r) for r in range(len(s)+1)):
+ param_dicts.append(dict(ss))
+ return param_dicts
+
+
+class TestBaseStreamResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamResult()
+
+
+class TestCopyStreamResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return CopyStreamResult([StreamResult(), StreamResult()])
+
+
+class TestDoubleStreamResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return LoggingStreamResult()
+
+
+class TestExtendedToStreamDecoratorContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return ExtendedToStreamDecorator(StreamResult())
+
+
+class TestStreamSummaryResultContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamSummary()
+
+
+class TestStreamTaggerContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamTagger([StreamResult()], add=set(), discard=set())
+
+
+class TestStreamToDictContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamToDict(lambda x:None)
+
+
+class TestStreamToExtendedDecoratorContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamToExtendedDecorator(ExtendedTestResult())
+
+
+class TestStreamToQueueContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ queue = Queue()
+ return StreamToQueue(queue, "foo")
+
+
+class TestStreamFailFastContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamFailFast(lambda:None)
+
+
+class TestStreamResultRouterContract(TestCase, TestStreamResultContract):
+
+ def _make_result(self):
+ return StreamResultRouter(StreamResult())
+
+
+class TestDoubleStreamResultEvents(TestCase):
+
+ def test_startTestRun(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ self.assertEqual([('startTestRun',)], result._events)
+
+ def test_stopTestRun(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ result.stopTestRun()
+ self.assertEqual([('startTestRun',), ('stopTestRun',)], result._events)
+
+ def test_file(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ now = datetime.datetime.now(utc)
+ result.status(file_name="foo", file_bytes="bar", eof=True, mime_type="text/json",
+ test_id="id", route_code='abc', timestamp=now)
+ self.assertEqual(
+ [('startTestRun',),
+ ('status', 'id', None, None, True, 'foo', 'bar', True, 'text/json', 'abc', now)],
+ result._events)
+
+ def test_status(self):
+ result = LoggingStreamResult()
+ result.startTestRun()
+ now = datetime.datetime.now(utc)
+ result.status("foo", "success", test_tags=set(['tag']),
+ runnable=False, route_code='abc', timestamp=now)
+ self.assertEqual(
+ [('startTestRun',),
+ ('status', 'foo', 'success', set(['tag']), False, None, None, False, None, 'abc', now)],
+ result._events)
+
+
+class TestCopyStreamResultCopies(TestCase):
+
+ def setUp(self):
+ super(TestCopyStreamResultCopies, self).setUp()
+ self.target1 = LoggingStreamResult()
+ self.target2 = LoggingStreamResult()
+ self.targets = [self.target1._events, self.target2._events]
+ self.result = CopyStreamResult([self.target1, self.target2])
+
+ def test_startTestRun(self):
+ self.result.startTestRun()
+ self.assertThat(self.targets, AllMatch(Equals([('startTestRun',)])))
+
+ def test_stopTestRun(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertThat(self.targets,
+ AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+
+ def test_status(self):
+ self.result.startTestRun()
+ now = datetime.datetime.now(utc)
+ self.result.status("foo", "success", test_tags=set(['tag']),
+ runnable=False, file_name="foo", file_bytes=b'bar', eof=True,
+ mime_type="text/json", route_code='abc', timestamp=now)
+ self.assertThat(self.targets,
+ AllMatch(Equals([('startTestRun',),
+ ('status', 'foo', 'success', set(['tag']), False, "foo",
+ b'bar', True, "text/json", 'abc', now)
+ ])))
+
+
+class TestStreamTagger(TestCase):
+
+ def test_adding(self):
+ log = LoggingStreamResult()
+ result = StreamTagger([log], add=['foo'])
+ result.startTestRun()
+ result.status()
+ result.status(test_tags=set(['bar']))
+ result.status(test_tags=None)
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
+ ('status', None, None, set(['foo', 'bar']), True, None, None, False, None, None, None),
+ ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
+ ('stopTestRun',),
+ ], log._events)
+
+ def test_discarding(self):
+ log = LoggingStreamResult()
+ result = StreamTagger([log], discard=['foo'])
+ result.startTestRun()
+ result.status()
+ result.status(test_tags=None)
+ result.status(test_tags=set(['foo']))
+ result.status(test_tags=set(['bar']))
+ result.status(test_tags=set(['foo', 'bar']))
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status', None, None, None, True, None, None, False, None, None, None),
+ ('status', None, None, None, True, None, None, False, None, None, None),
+ ('status', None, None, None, True, None, None, False, None, None, None),
+ ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
+ ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
+ ('stopTestRun',),
+ ], log._events)
+
+
+class TestStreamToDict(TestCase):
+
+ def test_hung_test(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status('foo', 'inprogress')
+ self.assertEqual([], tests)
+ result.stopTestRun()
+ self.assertEqual([
+ {'id': 'foo', 'tags': set(), 'details': {}, 'status': 'inprogress',
+ 'timestamps': [None, None]}
+ ], tests)
+
+ def test_all_terminal_states_reported(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status('success', 'success')
+ result.status('skip', 'skip')
+ result.status('exists', 'exists')
+ result.status('fail', 'fail')
+ result.status('xfail', 'xfail')
+ result.status('uxsuccess', 'uxsuccess')
+ self.assertThat(tests, HasLength(6))
+ self.assertEqual(
+ ['success', 'skip', 'exists', 'fail', 'xfail', 'uxsuccess'],
+ [test['id'] for test in tests])
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(6))
+
+ def test_files_reported(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status(file_name="some log.txt",
+ file_bytes=_b("1234 log message"), eof=True,
+ mime_type="text/plain; charset=utf8", test_id="foo.bar")
+ result.status(file_name="another file",
+ file_bytes=_b("""Traceback..."""), test_id="foo.bar")
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(1))
+ test = tests[0]
+ self.assertEqual("foo.bar", test['id'])
+ self.assertEqual("unknown", test['status'])
+ details = test['details']
+ self.assertEqual(
+ _u("1234 log message"), details['some log.txt'].as_text())
+ self.assertEqual(
+ _b("Traceback..."),
+ _b('').join(details['another file'].iter_bytes()))
+ self.assertEqual(
+ "application/octet-stream", repr(details['another file'].content_type))
+
+ def test_bad_mime(self):
+ # Testtools was making bad mime types, this tests that the specific
+ # corruption is catered for.
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status(file_name="file", file_bytes=b'a',
+ mime_type='text/plain; charset=utf8, language=python',
+ test_id='id')
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(1))
+ test = tests[0]
+ self.assertEqual("id", test['id'])
+ details = test['details']
+ self.assertEqual(_u("a"), details['file'].as_text())
+ self.assertEqual(
+ "text/plain; charset=\"utf8\"",
+ repr(details['file'].content_type))
+
+ def test_timestamps(self):
+ tests = []
+ result = StreamToDict(tests.append)
+ result.startTestRun()
+ result.status(test_id='foo', test_status='inprogress', timestamp="A")
+ result.status(test_id='foo', test_status='success', timestamp="B")
+ result.status(test_id='bar', test_status='inprogress', timestamp="C")
+ result.stopTestRun()
+ self.assertThat(tests, HasLength(2))
+ self.assertEqual(["A", "B"], tests[0]['timestamps'])
+ self.assertEqual(["C", None], tests[1]['timestamps'])
+
+
+class TestExtendedToStreamDecorator(TestCase):
+
+ def test_explicit_time(self):
+ log = LoggingStreamResult()
+ result = ExtendedToStreamDecorator(log)
+ result.startTestRun()
+ now = datetime.datetime.now(utc)
+ result.time(now)
+ result.startTest(self)
+ result.addSuccess(self)
+ result.stopTest(self)
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status',
+ 'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
+ 'inprogress',
+ None,
+ True,
+ None,
+ None,
+ False,
+ None,
+ None,
+ now),
+ ('status',
+ 'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
+ 'success',
+ set(),
+ True,
+ None,
+ None,
+ False,
+ None,
+ None,
+ now),
+ ('stopTestRun',)], log._events)
+
+ def test_wasSuccessful_after_stopTestRun(self):
+ log = LoggingStreamResult()
+ result = ExtendedToStreamDecorator(log)
+ result.startTestRun()
+ result.status(test_id='foo', test_status='fail')
+ result.stopTestRun()
+ self.assertEqual(False, result.wasSuccessful())
+
+
+class TestStreamFailFast(TestCase):
+
+ def test_inprogress(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'inprogress')
+
+ def test_exists(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'exists')
+
+ def test_xfail(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'xfail')
+
+ def test_uxsuccess(self):
+ calls = []
+ def hook():
+ calls.append("called")
+ result = StreamFailFast(hook)
+ result.status('foo', 'uxsuccess')
+ result.status('foo', 'uxsuccess')
+ self.assertEqual(['called', 'called'], calls)
+
+ def test_success(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'success')
+
+ def test_fail(self):
+ calls = []
+ def hook():
+ calls.append("called")
+ result = StreamFailFast(hook)
+ result.status('foo', 'fail')
+ result.status('foo', 'fail')
+ self.assertEqual(['called', 'called'], calls)
+
+ def test_skip(self):
+ result = StreamFailFast(self.fail)
+ result.status('foo', 'skip')
+
+
+class TestStreamSummary(TestCase):
+
+ def test_attributes(self):
+ result = StreamSummary()
+ result.startTestRun()
+ self.assertEqual([], result.failures)
+ self.assertEqual([], result.errors)
+ self.assertEqual([], result.skipped)
+ self.assertEqual([], result.expectedFailures)
+ self.assertEqual([], result.unexpectedSuccesses)
+ self.assertEqual(0, result.testsRun)
+
+ def test_startTestRun(self):
+ result = StreamSummary()
+ result.startTestRun()
+ result.failures.append('x')
+ result.errors.append('x')
+ result.skipped.append('x')
+ result.expectedFailures.append('x')
+ result.unexpectedSuccesses.append('x')
+ result.testsRun = 1
+ result.startTestRun()
+ self.assertEqual([], result.failures)
+ self.assertEqual([], result.errors)
+ self.assertEqual([], result.skipped)
+ self.assertEqual([], result.expectedFailures)
+ self.assertEqual([], result.unexpectedSuccesses)
+ self.assertEqual(0, result.testsRun)
+
+ def test_wasSuccessful(self):
+ # wasSuccessful returns False if any of
+ # failures/errors is non-empty.
+ result = StreamSummary()
+ result.startTestRun()
+ self.assertEqual(True, result.wasSuccessful())
+ result.failures.append('x')
+ self.assertEqual(False, result.wasSuccessful())
+ result.startTestRun()
+ result.errors.append('x')
+ self.assertEqual(False, result.wasSuccessful())
+ result.startTestRun()
+ result.skipped.append('x')
+ self.assertEqual(True, result.wasSuccessful())
+ result.startTestRun()
+ result.expectedFailures.append('x')
+ self.assertEqual(True, result.wasSuccessful())
+ result.startTestRun()
+ result.unexpectedSuccesses.append('x')
+ self.assertEqual(True, result.wasSuccessful())
+
+ def test_stopTestRun(self):
+ result = StreamSummary()
+ # terminal successful codes.
+ result.startTestRun()
+ result.status("foo", "inprogress")
+ result.status("foo", "success")
+ result.status("bar", "skip")
+ result.status("baz", "exists")
+ result.stopTestRun()
+ self.assertEqual(True, result.wasSuccessful())
+ # Existence is terminal but doesn't count as 'running' a test.
+ self.assertEqual(2, result.testsRun)
+
+ def test_stopTestRun_inprogress_test_fails(self):
+ # Tests inprogress at stopTestRun trigger a failure.
+ result = StreamSummary()
+ result.startTestRun()
+ result.status("foo", "inprogress")
+ result.stopTestRun()
+ self.assertEqual(False, result.wasSuccessful())
+ self.assertThat(result.errors, HasLength(1))
+ self.assertEqual("foo", result.errors[0][0].id())
+ self.assertEqual("Test did not complete", result.errors[0][1])
+ # interim state detection handles route codes - while duplicate ids in
+ # one run is undesirable, it may happen (e.g. with repeated tests).
+ result.startTestRun()
+ result.status("foo", "inprogress")
+ result.status("foo", "inprogress", route_code="A")
+ result.status("foo", "success", route_code="A")
+ result.stopTestRun()
+ self.assertEqual(False, result.wasSuccessful())
+
+ def test_status_skip(self):
+ # when skip is seen, a synthetic test is reported with reason captured
+ # from the 'reason' file attachment if any.
+ result = StreamSummary()
+ result.startTestRun()
+ result.status(file_name="reason",
+ file_bytes=_b("Missing dependency"), eof=True,
+ mime_type="text/plain; charset=utf8", test_id="foo.bar")
+ result.status("foo.bar", "skip")
+ self.assertThat(result.skipped, HasLength(1))
+ self.assertEqual("foo.bar", result.skipped[0][0].id())
+ self.assertEqual(_u("Missing dependency"), result.skipped[0][1])
+
+ def _report_files(self, result):
+ result.status(file_name="some log.txt",
+ file_bytes=_b("1234 log message"), eof=True,
+ mime_type="text/plain; charset=utf8", test_id="foo.bar")
+ result.status(file_name="traceback",
+ file_bytes=_b("""Traceback (most recent call last):
+ File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
+ AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+testtools.matchers._impl.MismatchError: Differences: [
+[('startTestRun',), ('stopTestRun',)] != []
+[('startTestRun',), ('stopTestRun',)] != []
+]
+"""), eof=True, mime_type="text/plain; charset=utf8", test_id="foo.bar")
+
+ files_message = Equals(_u("""some log.txt: {{{1234 log message}}}
+
+Traceback (most recent call last):
+ File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
+ AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+testtools.matchers._impl.MismatchError: Differences: [
+[('startTestRun',), ('stopTestRun',)] != []
+[('startTestRun',), ('stopTestRun',)] != []
+]
+"""))
+
+ def test_status_fail(self):
+ # when fail is seen, a synthetic test is reported with all files
+ # attached shown as the message.
+ result = StreamSummary()
+ result.startTestRun()
+ self._report_files(result)
+ result.status("foo.bar", "fail")
+ self.assertThat(result.errors, HasLength(1))
+ self.assertEqual("foo.bar", result.errors[0][0].id())
+ self.assertThat(result.errors[0][1], self.files_message)
+
+ def test_status_xfail(self):
+ # when xfail is seen, a synthetic test is reported with all files
+ # attached shown as the message.
+ result = StreamSummary()
+ result.startTestRun()
+ self._report_files(result)
+ result.status("foo.bar", "xfail")
+ self.assertThat(result.expectedFailures, HasLength(1))
+ self.assertEqual("foo.bar", result.expectedFailures[0][0].id())
+ self.assertThat(result.expectedFailures[0][1], self.files_message)
+
+ def test_status_uxsuccess(self):
+ # when uxsuccess is seen, a synthetic test is reported.
+ result = StreamSummary()
+ result.startTestRun()
+ result.status("foo.bar", "uxsuccess")
+ self.assertThat(result.unexpectedSuccesses, HasLength(1))
+ self.assertEqual("foo.bar", result.unexpectedSuccesses[0].id())
+
+
+class TestTestControl(TestCase):
+
+ def test_default(self):
+ self.assertEqual(False, TestControl().shouldStop)
+
+ def test_stop(self):
+ control = TestControl()
+ control.stop()
+ self.assertEqual(True, control.shouldStop)
+
+
+class TestTestResult(TestCase):
+ """Tests for 'TestResult'."""
+
+ run_tests_with = FullStackRunTest
+
+ def makeResult(self):
+ """Make an arbitrary result for testing."""
+ return TestResult()
+
+ def test_addSkipped(self):
+ # Calling addSkip on a TestResult records the test that was skipped in
+ # its skip_reasons dict.
+ result = self.makeResult()
+ result.addSkip(self, _u("Skipped for some reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self]},
+ result.skip_reasons)
+ result.addSkip(self, _u("Skipped for some reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self, self]},
+ result.skip_reasons)
+ result.addSkip(self, _u("Skipped for another reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self, self],
+ _u("Skipped for another reason"):[self]},
+ result.skip_reasons)
+
+ def test_now_datetime_now(self):
+ result = self.makeResult()
+ olddatetime = testresult.real.datetime
+ def restore():
+ testresult.real.datetime = olddatetime
+ self.addCleanup(restore)
+ class Module:
+ pass
+ now = datetime.datetime.now(utc)
+ stubdatetime = Module()
+ stubdatetime.datetime = Module()
+ stubdatetime.datetime.now = lambda tz: now
+ testresult.real.datetime = stubdatetime
+ # Calling _now() looks up the time.
+ self.assertEqual(now, result._now())
+ then = now + datetime.timedelta(0, 1)
+ # Set an explicit datetime, which gets returned from then on.
+ result.time(then)
+ self.assertNotEqual(now, result._now())
+ self.assertEqual(then, result._now())
+ # go back to looking it up.
+ result.time(None)
+ self.assertEqual(now, result._now())
+
+ def test_now_datetime_time(self):
+ result = self.makeResult()
+ now = datetime.datetime.now(utc)
+ result.time(now)
+ self.assertEqual(now, result._now())
+
+ def test_traceback_formatting_without_stack_hidden(self):
+ # During the testtools test run, we show our levels of the stack,
+ # because we want to be able to use our test suite to debug our own
+ # code.
+ result = self.makeResult()
+ test = make_erroring_test()
+ test.run(result)
+ self.assertThat(
+ result.errors[0][1],
+ DocTestMatches(
+ 'Traceback (most recent call last):\n'
+ ' File "...testtools...runtest.py", line ..., in _run_user\n'
+ ' return fn(*args, **kwargs)\n'
+ ' File "...testtools...testcase.py", line ..., in _run_test_method\n'
+ ' return self._get_test_method()()\n'
+ ' File "...testtools...tests...test_testresult.py", line ..., in error\n'
+ ' 1/0\n'
+ 'ZeroDivisionError: ...\n',
+ doctest.ELLIPSIS | doctest.REPORT_UDIFF))
+
+ def test_traceback_formatting_with_stack_hidden(self):
+ result = self.makeResult()
+ test = make_erroring_test()
+ run_with_stack_hidden(True, test.run, result)
+ self.assertThat(
+ result.errors[0][1],
+ DocTestMatches(
+ 'Traceback (most recent call last):\n'
+ ' File "...testtools...tests...test_testresult.py", line ..., in error\n'
+ ' 1/0\n'
+ 'ZeroDivisionError: ...\n',
+ doctest.ELLIPSIS))
+
+ def test_traceback_formatting_with_stack_hidden_mismatch(self):
+ result = self.makeResult()
+ test = make_mismatching_test()
+ run_with_stack_hidden(True, test.run, result)
+ self.assertThat(
+ result.failures[0][1],
+ DocTestMatches(
+ 'Traceback (most recent call last):\n'
+ ' File "...testtools...tests...test_testresult.py", line ..., in mismatch\n'
+ ' self.assertEqual(1, 2)\n'
+ '...MismatchError: 1 != 2\n',
+ doctest.ELLIPSIS))
+
+ def test_exc_info_to_unicode(self):
+ # subunit upcalls to TestResult._exc_info_to_unicode, so we need to
+ # make sure that it's there.
+ #
+ # See <https://bugs.launchpad.net/testtools/+bug/929063>.
+ test = make_erroring_test()
+ exc_info = make_exception_info(RuntimeError, "foo")
+ result = self.makeResult()
+ text_traceback = result._exc_info_to_unicode(exc_info, test)
+ self.assertEqual(
+ TracebackContent(exc_info, test).as_text(), text_traceback)
+
+
+class TestMultiTestResult(TestCase):
+ """Tests for 'MultiTestResult'."""
+
+ def setUp(self):
+ super(TestMultiTestResult, self).setUp()
+ self.result1 = LoggingResult([])
+ self.result2 = LoggingResult([])
+ self.multiResult = MultiTestResult(self.result1, self.result2)
+
+ def assertResultLogsEqual(self, expectedEvents):
+ """Assert that our test results have received the expected events."""
+ self.assertEqual(expectedEvents, self.result1._events)
+ self.assertEqual(expectedEvents, self.result2._events)
+
+ def test_repr(self):
+ self.assertEqual(
+ '<MultiTestResult (%r, %r)>' % (
+ ExtendedToOriginalDecorator(self.result1),
+ ExtendedToOriginalDecorator(self.result2)),
+ repr(self.multiResult))
+
+ def test_empty(self):
+ # Initializing a `MultiTestResult` doesn't do anything to its
+ # `TestResult`s.
+ self.assertResultLogsEqual([])
+
+ def test_failfast_get(self):
+ # Reading reads from the first one - arbitrary choice.
+ self.assertEqual(False, self.multiResult.failfast)
+ self.result1.failfast = True
+ self.assertEqual(True, self.multiResult.failfast)
+
+ def test_failfast_set(self):
+ # Writing writes to all.
+ self.multiResult.failfast = True
+ self.assertEqual(True, self.result1.failfast)
+ self.assertEqual(True, self.result2.failfast)
+
+ def test_shouldStop(self):
+ self.assertFalse(self.multiResult.shouldStop)
+ self.result2.stop()
+ # NB: result1 is not stopped: MultiTestResult has to combine the
+ # values.
+ self.assertTrue(self.multiResult.shouldStop)
+
+ def test_startTest(self):
+ # Calling `startTest` on a `MultiTestResult` calls `startTest` on all
+ # its `TestResult`s.
+ self.multiResult.startTest(self)
+ self.assertResultLogsEqual([('startTest', self)])
+
+ def test_stop(self):
+ self.assertFalse(self.multiResult.shouldStop)
+ self.multiResult.stop()
+ self.assertResultLogsEqual(['stop'])
+
+ def test_stopTest(self):
+ # Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all
+ # its `TestResult`s.
+ self.multiResult.stopTest(self)
+ self.assertResultLogsEqual([('stopTest', self)])
+
+ def test_addSkipped(self):
+ # Calling `addSkip` on a `MultiTestResult` calls addSkip on its
+ # results.
+ reason = _u("Skipped for some reason")
+ self.multiResult.addSkip(self, reason)
+ self.assertResultLogsEqual([('addSkip', self, reason)])
+
+ def test_addSuccess(self):
+ # Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on
+ # all its `TestResult`s.
+ self.multiResult.addSuccess(self)
+ self.assertResultLogsEqual([('addSuccess', self)])
+
+ def test_done(self):
+ # Calling `done` on a `MultiTestResult` calls `done` on all its
+ # `TestResult`s.
+ self.multiResult.done()
+ self.assertResultLogsEqual([('done')])
+
+ def test_addFailure(self):
+ # Calling `addFailure` on a `MultiTestResult` calls `addFailure` on
+ # all its `TestResult`s.
+ exc_info = make_exception_info(AssertionError, 'failure')
+ self.multiResult.addFailure(self, exc_info)
+ self.assertResultLogsEqual([('addFailure', self, exc_info)])
+
+ def test_addError(self):
+ # Calling `addError` on a `MultiTestResult` calls `addError` on all
+ # its `TestResult`s.
+ exc_info = make_exception_info(RuntimeError, 'error')
+ self.multiResult.addError(self, exc_info)
+ self.assertResultLogsEqual([('addError', self, exc_info)])
+
+ def test_startTestRun(self):
+ # Calling `startTestRun` on a `MultiTestResult` forwards to all its
+ # `TestResult`s.
+ self.multiResult.startTestRun()
+ self.assertResultLogsEqual([('startTestRun')])
+
+ def test_stopTestRun(self):
+ # Calling `stopTestRun` on a `MultiTestResult` forwards to all its
+ # `TestResult`s.
+ self.multiResult.stopTestRun()
+ self.assertResultLogsEqual([('stopTestRun')])
+
+ def test_stopTestRun_returns_results(self):
+ # `MultiTestResult.stopTestRun` returns a tuple of all of the return
+ # values the `stopTestRun`s that it forwards to.
+ class Result(LoggingResult):
+ def stopTestRun(self):
+ super(Result, self).stopTestRun()
+ return 'foo'
+ multi_result = MultiTestResult(Result([]), Result([]))
+ result = multi_result.stopTestRun()
+ self.assertEqual(('foo', 'foo'), result)
+
+ def test_tags(self):
+ # Calling `tags` on a `MultiTestResult` calls `tags` on all its
+ # `TestResult`s.
+ added_tags = set(['foo', 'bar'])
+ removed_tags = set(['eggs'])
+ self.multiResult.tags(added_tags, removed_tags)
+ self.assertResultLogsEqual([('tags', added_tags, removed_tags)])
+
+ def test_time(self):
+ # the time call is dispatched, not eaten by the base class
+ self.multiResult.time('foo')
+ self.assertResultLogsEqual([('time', 'foo')])
+
+
+class TestTextTestResult(TestCase):
+ """Tests for 'TextTestResult'."""
+
+ def setUp(self):
+ super(TestTextTestResult, self).setUp()
+ self.result = TextTestResult(StringIO())
+
+ def getvalue(self):
+ return self.result.stream.getvalue()
+
+ def test__init_sets_stream(self):
+ result = TextTestResult("fp")
+ self.assertEqual("fp", result.stream)
+
+ def reset_output(self):
+ self.result.stream = StringIO()
+
+ def test_startTestRun(self):
+ self.result.startTestRun()
+ self.assertEqual("Tests running...\n", self.getvalue())
+
+ def test_stopTestRun_count_many(self):
+ test = make_test()
+ self.result.startTestRun()
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.result.stream = StringIO()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("\nRan 2 tests in ...s\n...", doctest.ELLIPSIS))
+
+ def test_stopTestRun_count_single(self):
+ test = make_test()
+ self.result.startTestRun()
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("\nRan 1 test in ...s\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_count_zero(self):
+ self.result.startTestRun()
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("\nRan 0 tests in ...s\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_current_time(self):
+ test = make_test()
+ now = datetime.datetime.now(utc)
+ self.result.time(now)
+ self.result.startTestRun()
+ self.result.startTest(test)
+ now = now + datetime.timedelta(0, 0, 0, 1)
+ self.result.time(now)
+ self.result.stopTest(test)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS))
+
+ def test_stopTestRun_successful(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_failure(self):
+ test = make_failing_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_error(self):
+ test = make_erroring_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_unexpected_success(self):
+ test = make_unexpectedly_successful_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_shows_details(self):
+ self.skip("Disabled per bug 1188420")
+ def run_tests():
+ self.result.startTestRun()
+ make_erroring_test().run(self.result)
+ make_unexpectedly_successful_test().run(self.result)
+ make_failing_test().run(self.result)
+ self.reset_output()
+ self.result.stopTestRun()
+ run_with_stack_hidden(True, run_tests)
+ self.assertThat(self.getvalue(),
+ DocTestMatches("""...======================================================================
+ERROR: testtools.tests.test_testresult.Test.error
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "...testtools...tests...test_testresult.py", line ..., in error
+ 1/0
+ZeroDivisionError:... divi... by zero...
+======================================================================
+FAIL: testtools.tests.test_testresult.Test.failed
+----------------------------------------------------------------------
+Traceback (most recent call last):
+ File "...testtools...tests...test_testresult.py", line ..., in failed
+ self.fail("yo!")
+AssertionError: yo!
+======================================================================
+UNEXPECTED SUCCESS: testtools.tests.test_testresult.Test.succeeded
+----------------------------------------------------------------------
+...""", doctest.ELLIPSIS | doctest.REPORT_NDIFF))
+
+
+class TestThreadSafeForwardingResult(TestCase):
+ """Tests for `TestThreadSafeForwardingResult`."""
+
+ def make_results(self, n):
+ events = []
+ target = LoggingResult(events)
+ semaphore = threading.Semaphore(1)
+ return [
+ ThreadsafeForwardingResult(target, semaphore)
+ for i in range(n)], events
+
+ def test_nonforwarding_methods(self):
+ # startTest and stopTest are not forwarded because they need to be
+ # batched.
+ [result], events = self.make_results(1)
+ result.startTest(self)
+ result.stopTest(self)
+ self.assertEqual([], events)
+
+ def test_tags_not_forwarded(self):
+ # Tags need to be batched for each test, so they aren't forwarded
+ # until a test runs.
+ [result], events = self.make_results(1)
+ result.tags(set(['foo']), set(['bar']))
+ self.assertEqual([], events)
+
+ def test_global_tags_simple(self):
+ # Tags specified outside of a test result are global. When a test's
+ # results are finally forwarded, we send through these global tags
+ # *as* test specific tags, because as a multiplexer there should be no
+ # way for a global tag on an input stream to affect tests from other
+ # streams - we can just always issue test local tags.
+ [result], events = self.make_results(1)
+ result.tags(set(['foo']), set())
+ result.time(1)
+ result.startTest(self)
+ result.time(2)
+ result.addSuccess(self)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', self),
+ ('time', 2),
+ ('tags', set(['foo']), set()),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_global_tags_complex(self):
+ # Multiple calls to tags() in a global context are buffered until the
+ # next test completes and are issued as part of of the test context,
+ # because they cannot be issued until the output result is locked.
+ # The sample data shows them being merged together, this is, strictly
+ # speaking incidental - they could be issued separately (in-order) and
+ # still be legitimate.
+ [result], events = self.make_results(1)
+ result.tags(set(['foo', 'bar']), set(['baz', 'qux']))
+ result.tags(set(['cat', 'qux']), set(['bar', 'dog']))
+ result.time(1)
+ result.startTest(self)
+ result.time(2)
+ result.addSuccess(self)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', self),
+ ('time', 2),
+ ('tags', set(['cat', 'foo', 'qux']), set(['dog', 'bar', 'baz'])),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_local_tags(self):
+ # Any tags set within a test context are forwarded in that test
+ # context when the result is finally forwarded. This means that the
+ # tags for the test are part of the atomic message communicating
+ # everything about that test.
+ [result], events = self.make_results(1)
+ result.time(1)
+ result.startTest(self)
+ result.tags(set(['foo']), set([]))
+ result.tags(set(), set(['bar']))
+ result.time(2)
+ result.addSuccess(self)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', self),
+ ('time', 2),
+ ('tags', set(['foo']), set(['bar'])),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_local_tags_dont_leak(self):
+ # A tag set during a test is local to that test and is not set during
+ # the tests that follow.
+ [result], events = self.make_results(1)
+ a, b = PlaceHolder('a'), PlaceHolder('b')
+ result.time(1)
+ result.startTest(a)
+ result.tags(set(['foo']), set([]))
+ result.time(2)
+ result.addSuccess(a)
+ result.stopTest(a)
+ result.time(3)
+ result.startTest(b)
+ result.time(4)
+ result.addSuccess(b)
+ result.stopTest(b)
+ self.assertEqual(
+ [('time', 1),
+ ('startTest', a),
+ ('time', 2),
+ ('tags', set(['foo']), set()),
+ ('addSuccess', a),
+ ('stopTest', a),
+ ('time', 3),
+ ('startTest', b),
+ ('time', 4),
+ ('addSuccess', b),
+ ('stopTest', b),
+ ], events)
+
+ def test_startTestRun(self):
+ # Calls to startTestRun are not batched, because we are only
+ # interested in sending tests atomically, not the whole run.
+ [result1, result2], events = self.make_results(2)
+ result1.startTestRun()
+ result2.startTestRun()
+ self.assertEqual(["startTestRun", "startTestRun"], events)
+
+ def test_stopTestRun(self):
+ # Calls to stopTestRun are not batched, because we are only
+ # interested in sending tests atomically, not the whole run.
+ [result1, result2], events = self.make_results(2)
+ result1.stopTestRun()
+ result2.stopTestRun()
+ self.assertEqual(["stopTestRun", "stopTestRun"], events)
+
+ def test_forward_addError(self):
+ # Once we receive an addError event, we forward all of the events for
+ # that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ exc_info = make_exception_info(RuntimeError, 'error')
+ start_time = datetime.datetime.utcfromtimestamp(1.489)
+ end_time = datetime.datetime.utcfromtimestamp(51.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addError(self, exc_info)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addError', self, exc_info),
+ ('stopTest', self),
+ ], events)
+
+ def test_forward_addFailure(self):
+ # Once we receive an addFailure event, we forward all of the events
+ # for that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ exc_info = make_exception_info(AssertionError, 'failure')
+ start_time = datetime.datetime.utcfromtimestamp(2.489)
+ end_time = datetime.datetime.utcfromtimestamp(3.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addFailure(self, exc_info)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addFailure', self, exc_info),
+ ('stopTest', self),
+ ], events)
+
+ def test_forward_addSkip(self):
+ # Once we receive an addSkip event, we forward all of the events for
+ # that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ reason = _u("Skipped for some reason")
+ start_time = datetime.datetime.utcfromtimestamp(4.489)
+ end_time = datetime.datetime.utcfromtimestamp(5.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addSkip(self, reason)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addSkip', self, reason),
+ ('stopTest', self),
+ ], events)
+
+ def test_forward_addSuccess(self):
+ # Once we receive an addSuccess event, we forward all of the events
+ # for that test, as we now know that test is complete.
+ [result], events = self.make_results(1)
+ start_time = datetime.datetime.utcfromtimestamp(6.489)
+ end_time = datetime.datetime.utcfromtimestamp(7.476)
+ result.time(start_time)
+ result.startTest(self)
+ result.time(end_time)
+ result.addSuccess(self)
+ self.assertEqual([
+ ('time', start_time),
+ ('startTest', self),
+ ('time', end_time),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], events)
+
+ def test_only_one_test_at_a_time(self):
+ # Even if there are multiple ThreadsafeForwardingResults forwarding to
+ # the same target result, the target result only receives the complete
+ # events for one test at a time.
+ [result1, result2], events = self.make_results(2)
+ test1, test2 = self, make_test()
+ start_time1 = datetime.datetime.utcfromtimestamp(1.489)
+ end_time1 = datetime.datetime.utcfromtimestamp(2.476)
+ start_time2 = datetime.datetime.utcfromtimestamp(3.489)
+ end_time2 = datetime.datetime.utcfromtimestamp(4.489)
+ result1.time(start_time1)
+ result2.time(start_time2)
+ result1.startTest(test1)
+ result2.startTest(test2)
+ result1.time(end_time1)
+ result2.time(end_time2)
+ result2.addSuccess(test2)
+ result1.addSuccess(test1)
+ self.assertEqual([
+ # test2 finishes first, and so is flushed first.
+ ('time', start_time2),
+ ('startTest', test2),
+ ('time', end_time2),
+ ('addSuccess', test2),
+ ('stopTest', test2),
+ # test1 finishes next, and thus follows.
+ ('time', start_time1),
+ ('startTest', test1),
+ ('time', end_time1),
+ ('addSuccess', test1),
+ ('stopTest', test1),
+ ], events)
+
+
+class TestMergeTags(TestCase):
+
+ def test_merge_unseen_gone_tag(self):
+ # If an incoming "gone" tag isn't currently tagged one way or the
+ # other, add it to the "gone" tags.
+ current_tags = set(['present']), set(['missing'])
+ changing_tags = set(), set(['going'])
+ expected = set(['present']), set(['missing', 'going'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+ def test_merge_incoming_gone_tag_with_current_new_tag(self):
+ # If one of the incoming "gone" tags is one of the existing "new"
+ # tags, then it overrides the "new" tag, leaving it marked as "gone".
+ current_tags = set(['present', 'going']), set(['missing'])
+ changing_tags = set(), set(['going'])
+ expected = set(['present']), set(['missing', 'going'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+ def test_merge_unseen_new_tag(self):
+ current_tags = set(['present']), set(['missing'])
+ changing_tags = set(['coming']), set()
+ expected = set(['coming', 'present']), set(['missing'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+ def test_merge_incoming_new_tag_with_current_gone_tag(self):
+ # If one of the incoming "new" tags is currently marked as "gone",
+ # then it overrides the "gone" tag, leaving it marked as "new".
+ current_tags = set(['present']), set(['coming', 'missing'])
+ changing_tags = set(['coming']), set()
+ expected = set(['coming', 'present']), set(['missing'])
+ self.assertEqual(
+ expected, _merge_tags(current_tags, changing_tags))
+
+
+class TestStreamResultRouter(TestCase):
+
+ def test_start_stop_test_run_no_fallback(self):
+ result = StreamResultRouter()
+ result.startTestRun()
+ result.stopTestRun()
+
+ def test_no_fallback_errors(self):
+ self.assertRaises(Exception, StreamResultRouter().status, test_id='f')
+
+ def test_fallback_calls(self):
+ fallback = LoggingStreamResult()
+ result = StreamResultRouter(fallback)
+ result.startTestRun()
+ result.status(test_id='foo')
+ result.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None),
+ ('stopTestRun',),
+ ],
+ fallback._events)
+
+ def test_fallback_no_do_start_stop_run(self):
+ fallback = LoggingStreamResult()
+ result = StreamResultRouter(fallback, do_start_stop_run=False)
+ result.startTestRun()
+ result.status(test_id='foo')
+ result.stopTestRun()
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None)
+ ],
+ fallback._events)
+
+ def test_add_rule_bad_policy(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(ValueError, router.add_rule, target, 'route_code_prefixa',
+ route_prefix='0')
+
+ def test_add_rule_extra_policy_arg(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
+ route_prefix='0', foo=1)
+
+ def test_add_rule_missing_prefix(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix')
+
+ def test_add_rule_slash_in_prefix(self):
+ router = StreamResultRouter()
+ target = LoggingStreamResult()
+ self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
+ route_prefix='0/')
+
+ def test_add_rule_route_code_consume_False(self):
+ fallback = LoggingStreamResult()
+ target = LoggingStreamResult()
+ router = StreamResultRouter(fallback)
+ router.add_rule(target, 'route_code_prefix', route_prefix='0')
+ router.status(test_id='foo', route_code='0')
+ router.status(test_id='foo', route_code='0/1')
+ router.status(test_id='foo')
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, '0',
+ None),
+ ('status', 'foo', None, None, True, None, None, False, None, '0/1',
+ None),
+ ],
+ target._events)
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None),
+ ],
+ fallback._events)
+
+ def test_add_rule_route_code_consume_True(self):
+ fallback = LoggingStreamResult()
+ target = LoggingStreamResult()
+ router = StreamResultRouter(fallback)
+ router.add_rule(
+ target, 'route_code_prefix', route_prefix='0', consume_route=True)
+ router.status(test_id='foo', route_code='0') # -> None
+ router.status(test_id='foo', route_code='0/1') # -> 1
+ router.status(test_id='foo', route_code='1') # -> fallback as-is.
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, None,
+ None),
+ ('status', 'foo', None, None, True, None, None, False, None, '1',
+ None),
+ ],
+ target._events)
+ self.assertEqual([
+ ('status', 'foo', None, None, True, None, None, False, None, '1',
+ None),
+ ],
+ fallback._events)
+
+ def test_add_rule_test_id(self):
+ nontest = LoggingStreamResult()
+ test = LoggingStreamResult()
+ router = StreamResultRouter(test)
+ router.add_rule(nontest, 'test_id', test_id=None)
+ router.status(test_id='foo', file_name="bar", file_bytes=b'')
+ router.status(file_name="bar", file_bytes=b'')
+ self.assertEqual([
+ ('status', 'foo', None, None, True, 'bar', b'', False, None, None,
+ None),], test._events)
+ self.assertEqual([
+ ('status', None, None, None, True, 'bar', b'', False, None, None,
+ None),], nontest._events)
+
+ def test_add_rule_do_start_stop_run(self):
+ nontest = LoggingStreamResult()
+ router = StreamResultRouter()
+ router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
+ router.startTestRun()
+ router.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('stopTestRun',),
+ ], nontest._events)
+
+ def test_add_rule_do_start_stop_run_after_startTestRun(self):
+ nontest = LoggingStreamResult()
+ router = StreamResultRouter()
+ router.startTestRun()
+ router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
+ router.stopTestRun()
+ self.assertEqual([
+ ('startTestRun',),
+ ('stopTestRun',),
+ ], nontest._events)
+
+
+class TestStreamToQueue(TestCase):
+
+ def make_result(self):
+ queue = Queue()
+ return queue, StreamToQueue(queue, "foo")
+
+ def test_status(self):
+ def check_event(event_dict, route=None, time=None):
+ self.assertEqual("status", event_dict['event'])
+ self.assertEqual("test", event_dict['test_id'])
+ self.assertEqual("fail", event_dict['test_status'])
+ self.assertEqual(set(["quux"]), event_dict['test_tags'])
+ self.assertEqual(False, event_dict['runnable'])
+ self.assertEqual("file", event_dict['file_name'])
+ self.assertEqual(_b("content"), event_dict['file_bytes'])
+ self.assertEqual(True, event_dict['eof'])
+ self.assertEqual("quux", event_dict['mime_type'])
+ self.assertEqual("test", event_dict['test_id'])
+ self.assertEqual(route, event_dict['route_code'])
+ self.assertEqual(time, event_dict['timestamp'])
+ queue, result = self.make_result()
+ result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
+ file_name="file", file_bytes=_b("content"), eof=True,
+ mime_type="quux", route_code=None, timestamp=None)
+ self.assertEqual(1, queue.qsize())
+ a_time = datetime.datetime.now(utc)
+ result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
+ file_name="file", file_bytes=_b("content"), eof=True,
+ mime_type="quux", route_code="bar", timestamp=a_time)
+ self.assertEqual(2, queue.qsize())
+ check_event(queue.get(False), route="foo", time=None)
+ check_event(queue.get(False), route="foo/bar", time=a_time)
+
+ def testStartTestRun(self):
+ queue, result = self.make_result()
+ result.startTestRun()
+ self.assertEqual(
+ {'event':'startTestRun', 'result':result}, queue.get(False))
+ self.assertTrue(queue.empty())
+
+ def testStopTestRun(self):
+ queue, result = self.make_result()
+ result.stopTestRun()
+ self.assertEqual(
+ {'event':'stopTestRun', 'result':result}, queue.get(False))
+ self.assertTrue(queue.empty())
+
+
+class TestExtendedToOriginalResultDecoratorBase(TestCase):
+
+ def make_26_result(self):
+ self.result = Python26TestResult()
+ self.make_converter()
+
+ def make_27_result(self):
+ self.result = Python27TestResult()
+ self.make_converter()
+
+ def make_converter(self):
+ self.converter = ExtendedToOriginalDecorator(self.result)
+
+ def make_extended_result(self):
+ self.result = ExtendedTestResult()
+ self.make_converter()
+
+ def check_outcome_details(self, outcome):
+ """Call an outcome with a details dict to be passed through."""
+ # This dict is /not/ convertible - thats deliberate, as it should
+ # not hit the conversion code path.
+ details = {'foo': 'bar'}
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, details)], self.result._events)
+
+ def get_details_and_string(self):
+ """Get a details dict and expected string."""
+ text1 = lambda: [_b("1\n2\n")]
+ text2 = lambda: [_b("3\n4\n")]
+ bin1 = lambda: [_b("5\n")]
+ details = {'text 1': Content(ContentType('text', 'plain'), text1),
+ 'text 2': Content(ContentType('text', 'strange'), text2),
+ 'bin 1': Content(ContentType('application', 'binary'), bin1)}
+ return (details,
+ ("Binary content:\n"
+ " bin 1 (application/binary)\n"
+ "\n"
+ "text 1: {{{\n"
+ "1\n"
+ "2\n"
+ "}}}\n"
+ "\n"
+ "text 2: {{{\n"
+ "3\n"
+ "4\n"
+ "}}}\n"))
+
+ def check_outcome_details_to_exec_info(self, outcome, expected=None):
+ """Call an outcome with a details dict to be made into exc_info."""
+ # The conversion is a done using RemoteError and the string contents
+ # of the text types in the details dict.
+ if not expected:
+ expected = outcome
+ details, err_str = self.get_details_and_string()
+ getattr(self.converter, outcome)(self, details=details)
+ err = self.converter._details_to_exc_info(details)
+ self.assertEqual([(expected, self, err)], self.result._events)
+
+ def check_outcome_details_to_nothing(self, outcome, expected=None):
+ """Call an outcome with a details dict to be swallowed."""
+ if not expected:
+ expected = outcome
+ details = {'foo': 'bar'}
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_details_to_string(self, outcome):
+ """Call an outcome with a details dict to be stringified."""
+ details, err_str = self.get_details_and_string()
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, err_str)], self.result._events)
+
+ def check_outcome_details_to_arg(self, outcome, arg, extra_detail=None):
+ """Call an outcome with a details dict to have an arg extracted."""
+ details, _ = self.get_details_and_string()
+ if extra_detail:
+ details.update(extra_detail)
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, arg)], self.result._events)
+
+ def check_outcome_exc_info(self, outcome, expected=None):
+ """Check that calling a legacy outcome still works."""
+ # calling some outcome with the legacy exc_info style api (no keyword
+ # parameters) gets passed through.
+ if not expected:
+ expected = outcome
+ err = sys.exc_info()
+ getattr(self.converter, outcome)(self, err)
+ self.assertEqual([(expected, self, err)], self.result._events)
+
+ def check_outcome_exc_info_to_nothing(self, outcome, expected=None):
+ """Check that calling a legacy outcome on a fallback works."""
+ # calling some outcome with the legacy exc_info style api (no keyword
+ # parameters) gets passed through.
+ if not expected:
+ expected = outcome
+ err = sys.exc_info()
+ getattr(self.converter, outcome)(self, err)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_nothing(self, outcome, expected=None):
+ """Check that calling a legacy outcome still works."""
+ if not expected:
+ expected = outcome
+ getattr(self.converter, outcome)(self)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_string_nothing(self, outcome, expected):
+ """Check that calling outcome with a string calls expected."""
+ getattr(self.converter, outcome)(self, "foo")
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_string(self, outcome):
+ """Check that calling outcome with a string works."""
+ getattr(self.converter, outcome)(self, "foo")
+ self.assertEqual([(outcome, self, "foo")], self.result._events)
+
+
+class TestExtendedToOriginalResultDecorator(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ def test_failfast_py26(self):
+ self.make_26_result()
+ self.assertEqual(False, self.converter.failfast)
+ self.converter.failfast = True
+ self.assertFalse(safe_hasattr(self.converter.decorated, 'failfast'))
+
+ def test_failfast_py27(self):
+ self.make_27_result()
+ self.assertEqual(False, self.converter.failfast)
+ # setting it should write it to the backing result
+ self.converter.failfast = True
+ self.assertEqual(True, self.converter.decorated.failfast)
+
+ def test_progress_py26(self):
+ self.make_26_result()
+ self.converter.progress(1, 2)
+
+ def test_progress_py27(self):
+ self.make_27_result()
+ self.converter.progress(1, 2)
+
+ def test_progress_pyextended(self):
+ self.make_extended_result()
+ self.converter.progress(1, 2)
+ self.assertEqual([('progress', 1, 2)], self.result._events)
+
+ def test_shouldStop(self):
+ self.make_26_result()
+ self.assertEqual(False, self.converter.shouldStop)
+ self.converter.decorated.stop()
+ self.assertEqual(True, self.converter.shouldStop)
+
+ def test_startTest_py26(self):
+ self.make_26_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTest_py27(self):
+ self.make_27_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTest_pyextended(self):
+ self.make_extended_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTestRun_py26(self):
+ self.make_26_result()
+ self.converter.startTestRun()
+ self.assertEqual([], self.result._events)
+
+ def test_startTestRun_py27(self):
+ self.make_27_result()
+ self.converter.startTestRun()
+ self.assertEqual([('startTestRun',)], self.result._events)
+
+ def test_startTestRun_pyextended(self):
+ self.make_extended_result()
+ self.converter.startTestRun()
+ self.assertEqual([('startTestRun',)], self.result._events)
+
+ def test_stopTest_py26(self):
+ self.make_26_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTest_py27(self):
+ self.make_27_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTest_pyextended(self):
+ self.make_extended_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTestRun_py26(self):
+ self.make_26_result()
+ self.converter.stopTestRun()
+ self.assertEqual([], self.result._events)
+
+ def test_stopTestRun_py27(self):
+ self.make_27_result()
+ self.converter.stopTestRun()
+ self.assertEqual([('stopTestRun',)], self.result._events)
+
+ def test_stopTestRun_pyextended(self):
+ self.make_extended_result()
+ self.converter.stopTestRun()
+ self.assertEqual([('stopTestRun',)], self.result._events)
+
+ def test_tags_py26(self):
+ self.make_26_result()
+ self.converter.tags(set([1]), set([2]))
+
+ def test_tags_py27(self):
+ self.make_27_result()
+ self.converter.tags(set([1]), set([2]))
+
+ def test_tags_pyextended(self):
+ self.make_extended_result()
+ self.converter.tags(set([1]), set([2]))
+ self.assertEqual([('tags', set([1]), set([2]))], self.result._events)
+
+ def test_time_py26(self):
+ self.make_26_result()
+ self.converter.time(1)
+
+ def test_time_py27(self):
+ self.make_27_result()
+ self.converter.time(1)
+
+ def test_time_pyextended(self):
+ self.make_extended_result()
+ self.converter.time(1)
+ self.assertEqual([('time', 1)], self.result._events)
+
+
+class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addError'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_exec_info(self.outcome)
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_exec_info(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+ def test_outcome__no_details(self):
+ self.make_extended_result()
+ self.assertThat(
+ lambda: getattr(self.converter, self.outcome)(self),
+ Raises(MatchesException(ValueError)))
+
+
+class TestExtendedToOriginalAddFailure(
+ TestExtendedToOriginalAddError):
+
+ outcome = 'addFailure'
+
+
+class TestExtendedToOriginalAddExpectedFailure(
+ TestExtendedToOriginalAddError):
+
+ outcome = 'addExpectedFailure'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_nothing(self.outcome, 'addSuccess')
+
+
+
+class TestExtendedToOriginalAddSkip(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addSkip'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_string(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_string(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Extended_py27_no_reason(self):
+ self.make_27_result()
+ self.check_outcome_details_to_string(self.outcome)
+
+ def test_outcome_Extended_py27_reason(self):
+ self.make_27_result()
+ self.check_outcome_details_to_arg(self.outcome, 'foo',
+ {'reason': Content(UTF8_TEXT, lambda:[_b('foo')])})
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+ def test_outcome__no_details(self):
+ self.make_extended_result()
+ self.assertThat(
+ lambda: getattr(self.converter, self.outcome)(self),
+ Raises(MatchesException(ValueError)))
+
+
+class TestExtendedToOriginalAddSuccess(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addSuccess'
+ expected = 'addSuccess'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_nothing(self.outcome, self.expected)
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_nothing(self.outcome, self.expected)
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_nothing(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+
+class TestExtendedToOriginalAddUnexpectedSuccess(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addUnexpectedSuccess'
+ expected = 'addFailure'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ getattr(self.converter, self.outcome)(self)
+ [event] = self.result._events
+ self.assertEqual((self.expected, self), event[:2])
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ getattr(self.converter, self.outcome)(self)
+ [event] = self.result._events
+ self.assertEqual((self.expected, self), event[:2])
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_nothing(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+
+class TestExtendedToOriginalResultOtherAttributes(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ def test_other_attribute(self):
+ class OtherExtendedResult:
+ def foo(self):
+ return 2
+ bar = 1
+ self.result = OtherExtendedResult()
+ self.make_converter()
+ self.assertEqual(1, self.converter.bar)
+ self.assertEqual(2, self.converter.foo())
+
+
+class TestNonAsciiResults(TestCase):
+ """Test all kinds of tracebacks are cleanly interpreted as unicode
+
+ Currently only uses weak "contains" assertions, would be good to be much
+ stricter about the expected output. This would add a few failures for the
+ current release of IronPython for instance, which gets some traceback
+ lines muddled.
+ """
+
+ _sample_texts = (
+ _u("pa\u026a\u03b8\u0259n"), # Unicode encodings only
+ _u("\u5357\u7121"), # In ISO 2022 encodings
+ _u("\xa7\xa7\xa7"), # In ISO 8859 encodings
+ )
+
+ _is_pypy = "__pypy__" in sys.builtin_module_names
+ # Everything but Jython shows syntax errors on the current character
+ _error_on_character = os.name != "java" and not _is_pypy
+
+ def _run(self, stream, test):
+ """Run the test, the same as in testtools.run but not to stdout"""
+ result = TextTestResult(stream)
+ result.startTestRun()
+ try:
+ return test.run(result)
+ finally:
+ result.stopTestRun()
+
+ def _write_module(self, name, encoding, contents):
+ """Create Python module on disk with contents in given encoding"""
+ try:
+ # Need to pre-check that the coding is valid or codecs.open drops
+ # the file without closing it which breaks non-refcounted pythons
+ codecs.lookup(encoding)
+ except LookupError:
+ self.skip("Encoding unsupported by implementation: %r" % encoding)
+ f = codecs.open(os.path.join(self.dir, name + ".py"), "w", encoding)
+ try:
+ f.write(contents)
+ finally:
+ f.close()
+
+ def _test_external_case(self, testline, coding="ascii", modulelevel="",
+ suffix=""):
+ """Create and run a test case in a seperate module"""
+ self._setup_external_case(testline, coding, modulelevel, suffix)
+ return self._run_external_case()
+
+ def _setup_external_case(self, testline, coding="ascii", modulelevel="",
+ suffix=""):
+ """Create a test case in a seperate module"""
+ _, prefix, self.modname = self.id().rsplit(".", 2)
+ self.dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
+ self.addCleanup(shutil.rmtree, self.dir)
+ self._write_module(self.modname, coding,
+ # Older Python 2 versions don't see a coding declaration in a
+ # docstring so it has to be in a comment, but then we can't
+ # workaround bug: <http://ironpython.codeplex.com/workitem/26940>
+ "# coding: %s\n"
+ "import testtools\n"
+ "%s\n"
+ "class Test(testtools.TestCase):\n"
+ " def runTest(self):\n"
+ " %s\n" % (coding, modulelevel, testline))
+
+ def _run_external_case(self):
+ """Run the prepared test case in a seperate module"""
+ sys.path.insert(0, self.dir)
+ self.addCleanup(sys.path.remove, self.dir)
+ module = __import__(self.modname)
+ self.addCleanup(sys.modules.pop, self.modname)
+ stream = StringIO()
+ self._run(stream, module.Test())
+ return stream.getvalue()
+
+ def _silence_deprecation_warnings(self):
+ """Shut up DeprecationWarning for this test only"""
+ warnings.simplefilter("ignore", DeprecationWarning)
+ self.addCleanup(warnings.filters.remove, warnings.filters[0])
+
+ def _get_sample_text(self, encoding="unicode_internal"):
+ if encoding is None and str_is_unicode:
+ encoding = "unicode_internal"
+ for u in self._sample_texts:
+ try:
+ b = u.encode(encoding)
+ if u == b.decode(encoding):
+ if str_is_unicode:
+ return u, u
+ return u, b
+ except (LookupError, UnicodeError):
+ pass
+ self.skip("Could not find a sample text for encoding: %r" % encoding)
+
+ def _as_output(self, text):
+ return text
+
+ def test_non_ascii_failure_string(self):
+ """Assertion contents can be non-ascii and should get decoded"""
+ text, raw = self._get_sample_text(_get_exception_encoding())
+ textoutput = self._test_external_case("self.fail(%s)" % _r(raw))
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_non_ascii_failure_string_via_exec(self):
+ """Assertion via exec can be non-ascii and still gets decoded"""
+ text, raw = self._get_sample_text(_get_exception_encoding())
+ textoutput = self._test_external_case(
+ testline='exec ("self.fail(%s)")' % _r(raw))
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_control_characters_in_failure_string(self):
+ """Control characters in assertions should be escaped"""
+ textoutput = self._test_external_case("self.fail('\\a\\a\\a')")
+ self.expectFailure("Defense against the beeping horror unimplemented",
+ self.assertNotIn, self._as_output("\a\a\a"), textoutput)
+ self.assertIn(self._as_output(_u("\uFFFD\uFFFD\uFFFD")), textoutput)
+
+ def _local_os_error_matcher(self):
+ if sys.version_info > (3, 3):
+ return MatchesAny(Contains("FileExistsError: "),
+ Contains("PermissionError: "))
+ elif os.name != "nt" or sys.version_info < (2, 5):
+ return Contains(self._as_output("OSError: "))
+ else:
+ return Contains(self._as_output("WindowsError: "))
+
+ def test_os_error(self):
+ """Locale error messages from the OS shouldn't break anything"""
+ textoutput = self._test_external_case(
+ modulelevel="import os",
+ testline="os.mkdir('/')")
+ self.assertThat(textoutput, self._local_os_error_matcher())
+
+ def test_assertion_text_shift_jis(self):
+ """A terminal raw backslash in an encoded string is weird but fine"""
+ example_text = _u("\u5341")
+ textoutput = self._test_external_case(
+ coding="shift_jis",
+ testline="self.fail('%s')" % example_text)
+ if str_is_unicode:
+ output_text = example_text
+ else:
+ output_text = example_text.encode("shift_jis").decode(
+ _get_exception_encoding(), "replace")
+ self.assertIn(self._as_output("AssertionError: %s" % output_text),
+ textoutput)
+
+ def test_file_comment_iso2022_jp(self):
+ """Control character escapes must be preserved if valid encoding"""
+ example_text, _ = self._get_sample_text("iso2022_jp")
+ textoutput = self._test_external_case(
+ coding="iso2022_jp",
+ testline="self.fail('Simple') # %s" % example_text)
+ self.assertIn(self._as_output(example_text), textoutput)
+
+ def test_unicode_exception(self):
+ """Exceptions that can be formated losslessly as unicode should be"""
+ example_text, _ = self._get_sample_text()
+ exception_class = (
+ "class FancyError(Exception):\n"
+ # A __unicode__ method does nothing on py3k but the default works
+ " def __unicode__(self):\n"
+ " return self.args[0]\n")
+ textoutput = self._test_external_case(
+ modulelevel=exception_class,
+ testline="raise FancyError(%s)" % _r(example_text))
+ self.assertIn(self._as_output(example_text), textoutput)
+
+ def test_unprintable_exception(self):
+ """A totally useless exception instance still prints something"""
+ exception_class = (
+ "class UnprintableError(Exception):\n"
+ " def __str__(self):\n"
+ " raise RuntimeError\n"
+ " def __unicode__(self):\n"
+ " raise RuntimeError\n"
+ " def __repr__(self):\n"
+ " raise RuntimeError\n")
+ textoutput = self._test_external_case(
+ modulelevel=exception_class,
+ testline="raise UnprintableError")
+ self.assertIn(self._as_output(
+ "UnprintableError: <unprintable UnprintableError object>\n"),
+ textoutput)
+
+ def test_string_exception(self):
+ """Raise a string rather than an exception instance if supported"""
+ if sys.version_info > (2, 6):
+ self.skip("No string exceptions in Python 2.6 or later")
+ elif sys.version_info > (2, 5):
+ self._silence_deprecation_warnings()
+ textoutput = self._test_external_case(testline="raise 'plain str'")
+ self.assertIn(self._as_output("\nplain str\n"), textoutput)
+
+ def test_non_ascii_dirname(self):
+ """Script paths in the traceback can be non-ascii"""
+ text, raw = self._get_sample_text(sys.getfilesystemencoding())
+ textoutput = self._test_external_case(
+ # Avoid bug in Python 3 by giving a unicode source encoding rather
+ # than just ascii which raises a SyntaxError with no other details
+ coding="utf-8",
+ testline="self.fail('Simple')",
+ suffix=raw)
+ self.assertIn(self._as_output(text), textoutput)
+
+ def test_syntax_error(self):
+ """Syntax errors should still have fancy special-case formatting"""
+ textoutput = self._test_external_case("exec ('f(a, b c)')")
+ self.assertIn(self._as_output(
+ ' File "<string>", line 1\n'
+ ' f(a, b c)\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: '
+ ), textoutput)
+
+ def test_syntax_error_malformed(self):
+ """Syntax errors with bogus parameters should break anything"""
+ textoutput = self._test_external_case("raise SyntaxError(3, 2, 1)")
+ self.assertIn(self._as_output("\nSyntaxError: "), textoutput)
+
+ def test_syntax_error_import_binary(self):
+ """Importing a binary file shouldn't break SyntaxError formatting"""
+ if sys.version_info < (2, 5):
+ # Python 2.4 assumes the file is latin-1 and tells you off
+ self._silence_deprecation_warnings()
+ self._setup_external_case("import bad")
+ f = open(os.path.join(self.dir, "bad.py"), "wb")
+ try:
+ f.write(_b("x\x9c\xcb*\xcd\xcb\x06\x00\x04R\x01\xb9"))
+ finally:
+ f.close()
+ textoutput = self._run_external_case()
+ matches_error = MatchesAny(
+ Contains('\nTypeError: '), Contains('\nSyntaxError: '))
+ self.assertThat(textoutput, matches_error)
+
+ def test_syntax_error_line_iso_8859_1(self):
+ """Syntax error on a latin-1 line shows the line decoded"""
+ text, raw = self._get_sample_text("iso-8859-1")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "iso-8859-1",
+ "# coding: iso-8859-1\n! = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' ! = 0 # %s\n'
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_iso_8859_5(self):
+ """Syntax error on a iso-8859-5 line shows the line decoded"""
+ text, raw = self._get_sample_text("iso-8859-5")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "iso-8859-5",
+ "# coding: iso-8859-5\n%% = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' %% = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_euc_jp(self):
+ """Syntax error on a euc_jp line shows the line decoded"""
+ text, raw = self._get_sample_text("euc_jp")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "euc_jp",
+ "# coding: euc_jp\n$ = 0 # %s\n" % text)
+ textoutput = self._run_external_case()
+ # pypy uses cpython's multibyte codecs so has their behavior here
+ if self._is_pypy:
+ self._error_on_character = True
+ self.assertIn(self._as_output(_u(
+ #'bad.py", line 2\n'
+ ' $ = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ (text,)), textoutput)
+
+ def test_syntax_error_line_utf_8(self):
+ """Syntax error on a utf-8 line shows the line decoded"""
+ text, raw = self._get_sample_text("utf-8")
+ textoutput = self._setup_external_case("import bad")
+ self._write_module("bad", "utf-8", _u("\ufeff^ = 0 # %s\n") % text)
+ textoutput = self._run_external_case()
+ self.assertIn(self._as_output(_u(
+ 'bad.py", line 1\n'
+ ' ^ = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ ' ^\n'
+ 'SyntaxError: ') %
+ text), textoutput)
+
+
+class TestNonAsciiResultsWithUnittest(TestNonAsciiResults):
+ """Test that running under unittest produces clean ascii strings"""
+
+ def _run(self, stream, test):
+ from unittest import TextTestRunner as _Runner
+ return _Runner(stream).run(test)
+
+ def _as_output(self, text):
+ if str_is_unicode:
+ return text
+ return text.encode("utf-8")
+
+
+class TestDetailsToStr(TestCase):
+
+ def test_no_details(self):
+ string = _details_to_str({})
+ self.assertThat(string, Equals(''))
+
+ def test_binary_content(self):
+ content = content_from_stream(
+ StringIO('foo'), content_type=ContentType('image', 'jpeg'))
+ string = _details_to_str({'attachment': content})
+ self.assertThat(
+ string, Equals("""\
+Binary content:
+ attachment (image/jpeg)
+"""))
+
+ def test_single_line_content(self):
+ content = text_content('foo')
+ string = _details_to_str({'attachment': content})
+ self.assertThat(string, Equals('attachment: {{{foo}}}\n'))
+
+ def test_multi_line_text_content(self):
+ content = text_content('foo\nbar\nbaz')
+ string = _details_to_str({'attachment': content})
+ self.assertThat(string, Equals('attachment: {{{\nfoo\nbar\nbaz\n}}}\n'))
+
+ def test_special_text_content(self):
+ content = text_content('foo')
+ string = _details_to_str({'attachment': content}, special='attachment')
+ self.assertThat(string, Equals('foo\n'))
+
+ def test_multiple_text_content(self):
+ string = _details_to_str(
+ {'attachment': text_content('foo\nfoo'),
+ 'attachment-1': text_content('bar\nbar')})
+ self.assertThat(
+ string, Equals('attachment: {{{\n'
+ 'foo\n'
+ 'foo\n'
+ '}}}\n'
+ '\n'
+ 'attachment-1: {{{\n'
+ 'bar\n'
+ 'bar\n'
+ '}}}\n'))
+
+ def test_empty_attachment(self):
+ string = _details_to_str({'attachment': text_content('')})
+ self.assertThat(
+ string, Equals("""\
+Empty attachments:
+ attachment
+"""))
+
+ def test_lots_of_different_attachments(self):
+ jpg = lambda x: content_from_stream(
+ StringIO(x), ContentType('image', 'jpeg'))
+ attachments = {
+ 'attachment': text_content('foo'),
+ 'attachment-1': text_content('traceback'),
+ 'attachment-2': jpg('pic1'),
+ 'attachment-3': text_content('bar'),
+ 'attachment-4': text_content(''),
+ 'attachment-5': jpg('pic2'),
+ }
+ string = _details_to_str(attachments, special='attachment-1')
+ self.assertThat(
+ string, Equals("""\
+Binary content:
+ attachment-2 (image/jpeg)
+ attachment-5 (image/jpeg)
+Empty attachments:
+ attachment-4
+
+attachment: {{{foo}}}
+attachment-3: {{{bar}}}
+
+traceback
+"""))
+
+
+class TestByTestResultTests(TestCase):
+
+ def setUp(self):
+ super(TestByTestResultTests, self).setUp()
+ self.log = []
+ self.result = TestByTestResult(self.on_test)
+ now = iter(range(5))
+ self.result._now = lambda: advance_iterator(now)
+
+ def assertCalled(self, **kwargs):
+ defaults = {
+ 'test': self,
+ 'tags': set(),
+ 'details': None,
+ 'start_time': 0,
+ 'stop_time': 1,
+ }
+ defaults.update(kwargs)
+ self.assertEqual([defaults], self.log)
+
+ def on_test(self, **kwargs):
+ self.log.append(kwargs)
+
+ def test_no_tests_nothing_reported(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertEqual([], self.log)
+
+ def test_add_success(self):
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success')
+
+ def test_add_success_details(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addSuccess(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', details=details)
+
+ def test_global_tags(self):
+ self.result.tags(['foo'], [])
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', tags=set(['foo']))
+
+ def test_local_tags(self):
+ self.result.tags(['foo'], [])
+ self.result.startTest(self)
+ self.result.tags(['bar'], [])
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', tags=set(['foo', 'bar']))
+
+ def test_add_error(self):
+ self.result.startTest(self)
+ try:
+ 1/0
+ except ZeroDivisionError:
+ error = sys.exc_info()
+ self.result.addError(self, error)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='error',
+ details={'traceback': TracebackContent(error, self)})
+
+ def test_add_error_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addError(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='error', details=details)
+
+ def test_add_failure(self):
+ self.result.startTest(self)
+ try:
+ self.fail("intentional failure")
+ except self.failureException:
+ failure = sys.exc_info()
+ self.result.addFailure(self, failure)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='failure',
+ details={'traceback': TracebackContent(failure, self)})
+
+ def test_add_failure_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addFailure(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='failure', details=details)
+
+ def test_add_xfail(self):
+ self.result.startTest(self)
+ try:
+ 1/0
+ except ZeroDivisionError:
+ error = sys.exc_info()
+ self.result.addExpectedFailure(self, error)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='xfail',
+ details={'traceback': TracebackContent(error, self)})
+
+ def test_add_xfail_details(self):
+ self.result.startTest(self)
+ details = {"foo": text_content("bar")}
+ self.result.addExpectedFailure(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='xfail', details=details)
+
+ def test_add_unexpected_success(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addUnexpectedSuccess(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='success', details=details)
+
+ def test_add_skip_reason(self):
+ self.result.startTest(self)
+ reason = self.getUniqueString()
+ self.result.addSkip(self, reason)
+ self.result.stopTest(self)
+ self.assertCalled(
+ status='skip', details={'reason': text_content(reason)})
+
+ def test_add_skip_details(self):
+ self.result.startTest(self)
+ details = {'foo': 'bar'}
+ self.result.addSkip(self, details=details)
+ self.result.stopTest(self)
+ self.assertCalled(status='skip', details=details)
+
+ def test_twice(self):
+ self.result.startTest(self)
+ self.result.addSuccess(self, details={'foo': 'bar'})
+ self.result.stopTest(self)
+ self.result.startTest(self)
+ self.result.addSuccess(self)
+ self.result.stopTest(self)
+ self.assertEqual(
+ [{'test': self,
+ 'status': 'success',
+ 'start_time': 0,
+ 'stop_time': 1,
+ 'tags': set(),
+ 'details': {'foo': 'bar'}},
+ {'test': self,
+ 'status': 'success',
+ 'start_time': 2,
+ 'stop_time': 3,
+ 'tags': set(),
+ 'details': None},
+ ],
+ self.log)
+
+
+class TestTagger(TestCase):
+
+ def test_tags_tests(self):
+ result = ExtendedTestResult()
+ tagger = Tagger(result, set(['foo']), set(['bar']))
+ test1, test2 = self, make_test()
+ tagger.startTest(test1)
+ tagger.addSuccess(test1)
+ tagger.stopTest(test1)
+ tagger.startTest(test2)
+ tagger.addSuccess(test2)
+ tagger.stopTest(test2)
+ self.assertEqual(
+ [('startTest', test1),
+ ('tags', set(['foo']), set(['bar'])),
+ ('addSuccess', test1),
+ ('stopTest', test1),
+ ('startTest', test2),
+ ('tags', set(['foo']), set(['bar'])),
+ ('addSuccess', test2),
+ ('stopTest', test2),
+ ], result._events)
+
+
+class TestTimestampingStreamResult(TestCase):
+
+ def test_startTestRun(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.startTestRun()
+ self.assertEqual([('startTestRun',)], result.targets[0]._events)
+
+ def test_stopTestRun(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.stopTestRun()
+ self.assertEqual([('stopTestRun',)], result.targets[0]._events)
+
+ def test_status_no_timestamp(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.status(test_id="A", test_status="B", test_tags="C",
+ runnable="D", file_name="E", file_bytes=b"F", eof=True,
+ mime_type="G", route_code="H")
+ events = result.targets[0]._events
+ self.assertThat(events, HasLength(1))
+ self.assertThat(events[0], HasLength(11))
+ self.assertEqual(
+ ("status", "A", "B", "C", "D", "E", b"F", True, "G", "H"),
+ events[0][:10])
+ self.assertNotEqual(None, events[0][10])
+ self.assertIsInstance(events[0][10], datetime.datetime)
+
+ def test_status_timestamp(self):
+ result = TimestampingStreamResult(LoggingStreamResult())
+ result.status(timestamp="F")
+ self.assertEqual("F", result.targets[0]._events[0][10])
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py
new file mode 100644
index 00000000000..e2c33062b2d
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_testsuite.py
@@ -0,0 +1,279 @@
+# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+
+"""Test ConcurrentTestSuite and related things."""
+
+__metaclass__ = type
+
+import doctest
+from functools import partial
+import sys
+import unittest
+
+from extras import try_import
+
+from testtools import (
+ ConcurrentTestSuite,
+ ConcurrentStreamTestSuite,
+ iterate_tests,
+ PlaceHolder,
+ TestByTestResult,
+ TestCase,
+ )
+from testtools.compat import _b, _u
+from testtools.matchers import DocTestMatches
+from testtools.testsuite import FixtureSuite, iterate_tests, sorted_tests
+from testtools.tests.helpers import LoggingResult
+from testtools.testresult.doubles import StreamResult as LoggingStream
+
+FunctionFixture = try_import('fixtures.FunctionFixture')
+
+class Sample(TestCase):
+ def __hash__(self):
+ return id(self)
+ def test_method1(self):
+ pass
+ def test_method2(self):
+ pass
+
+
+class TestConcurrentTestSuiteRun(TestCase):
+
+ def test_broken_test(self):
+ log = []
+ def on_test(test, status, start_time, stop_time, tags, details):
+ log.append((test.id(), status, set(details.keys())))
+ class BrokenTest(object):
+ # Simple break - no result parameter to run()
+ def __call__(self):
+ pass
+ run = __call__
+ original_suite = unittest.TestSuite([BrokenTest()])
+ suite = ConcurrentTestSuite(original_suite, self.split_suite)
+ suite.run(TestByTestResult(on_test))
+ self.assertEqual([('broken-runner', 'error', set(['traceback']))], log)
+
+ def test_trivial(self):
+ log = []
+ result = LoggingResult(log)
+ test1 = Sample('test_method1')
+ test2 = Sample('test_method2')
+ original_suite = unittest.TestSuite([test1, test2])
+ suite = ConcurrentTestSuite(original_suite, self.split_suite)
+ suite.run(result)
+ # log[0] is the timestamp for the first test starting.
+ test1 = log[1][1]
+ test2 = log[-1][1]
+ self.assertIsInstance(test1, Sample)
+ self.assertIsInstance(test2, Sample)
+ self.assertNotEqual(test1.id(), test2.id())
+
+ def test_wrap_result(self):
+ # ConcurrentTestSuite has a hook for wrapping the per-thread result.
+ wrap_log = []
+
+ def wrap_result(thread_safe_result, thread_number):
+ wrap_log.append(
+ (thread_safe_result.result.decorated, thread_number))
+ return thread_safe_result
+
+ result_log = []
+ result = LoggingResult(result_log)
+ test1 = Sample('test_method1')
+ test2 = Sample('test_method2')
+ original_suite = unittest.TestSuite([test1, test2])
+ suite = ConcurrentTestSuite(
+ original_suite, self.split_suite, wrap_result=wrap_result)
+ suite.run(result)
+ self.assertEqual(
+ [(result, 0),
+ (result, 1),
+ ], wrap_log)
+ # Smoke test to make sure everything ran OK.
+ self.assertNotEqual([], result_log)
+
+ def split_suite(self, suite):
+ return list(iterate_tests(suite))
+
+
+class TestConcurrentStreamTestSuiteRun(TestCase):
+
+ def test_trivial(self):
+ result = LoggingStream()
+ test1 = Sample('test_method1')
+ test2 = Sample('test_method2')
+ cases = lambda:[(test1, '0'), (test2, '1')]
+ suite = ConcurrentStreamTestSuite(cases)
+ suite.run(result)
+ def freeze(set_or_none):
+ if set_or_none is None:
+ return set_or_none
+ return frozenset(set_or_none)
+ # Ignore event order: we're testing the code is all glued together,
+ # which just means we can pump events through and they get route codes
+ # added appropriately.
+ self.assertEqual(set([
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method1',
+ 'inprogress',
+ None,
+ True,
+ None,
+ None,
+ False,
+ None,
+ '0',
+ None,
+ ),
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method1',
+ 'success',
+ frozenset(),
+ True,
+ None,
+ None,
+ False,
+ None,
+ '0',
+ None,
+ ),
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method2',
+ 'inprogress',
+ None,
+ True,
+ None,
+ None,
+ False,
+ None,
+ '1',
+ None,
+ ),
+ ('status',
+ 'testtools.tests.test_testsuite.Sample.test_method2',
+ 'success',
+ frozenset(),
+ True,
+ None,
+ None,
+ False,
+ None,
+ '1',
+ None,
+ ),
+ ]), set(event[0:3] + (freeze(event[3]),) + event[4:10] + (None,)
+ for event in result._events))
+
+ def test_broken_runner(self):
+ # If the object called breaks, the stream is informed about it
+ # regardless.
+ class BrokenTest(object):
+ # broken - no result parameter!
+ def __call__(self):
+ pass
+ def run(self):
+ pass
+ result = LoggingStream()
+ cases = lambda:[(BrokenTest(), '0')]
+ suite = ConcurrentStreamTestSuite(cases)
+ suite.run(result)
+ events = result._events
+ # Check the traceback loosely.
+ self.assertThat(events[1][6].decode('utf8'), DocTestMatches("""\
+Traceback (most recent call last):
+ File "...testtools/testsuite.py", line ..., in _run_test
+ test.run(process_result)
+TypeError: run() takes ...1 ...argument...2...given...
+""", doctest.ELLIPSIS))
+ events = [event[0:10] + (None,) for event in events]
+ events[1] = events[1][:6] + (None,) + events[1][7:]
+ self.assertEqual([
+ ('status', "broken-runner-'0'", 'inprogress', None, True, None, None, False, None, _u('0'), None),
+ ('status', "broken-runner-'0'", None, None, True, 'traceback', None,
+ False,
+ 'text/x-traceback; charset="utf8"; language="python"',
+ '0',
+ None),
+ ('status', "broken-runner-'0'", None, None, True, 'traceback', b'', True,
+ 'text/x-traceback; charset="utf8"; language="python"', '0', None),
+ ('status', "broken-runner-'0'", 'fail', set(), True, None, None, False, None, _u('0'), None)
+ ], events)
+
+ def split_suite(self, suite):
+ tests = list(enumerate(iterate_tests(suite)))
+ return [(test, _u(str(pos))) for pos, test in tests]
+
+
+class TestFixtureSuite(TestCase):
+
+ def setUp(self):
+ super(TestFixtureSuite, self).setUp()
+ if FunctionFixture is None:
+ self.skip("Need fixtures")
+
+ def test_fixture_suite(self):
+ log = []
+ class Sample(TestCase):
+ def test_one(self):
+ log.append(1)
+ def test_two(self):
+ log.append(2)
+ fixture = FunctionFixture(
+ lambda: log.append('setUp'),
+ lambda fixture: log.append('tearDown'))
+ suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_two')])
+ suite.run(LoggingResult([]))
+ self.assertEqual(['setUp', 1, 2, 'tearDown'], log)
+
+ def test_fixture_suite_sort(self):
+ log = []
+ class Sample(TestCase):
+ def test_one(self):
+ log.append(1)
+ def test_two(self):
+ log.append(2)
+ fixture = FunctionFixture(
+ lambda: log.append('setUp'),
+ lambda fixture: log.append('tearDown'))
+ suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_one')])
+ self.assertRaises(ValueError, suite.sort_tests)
+
+
+class TestSortedTests(TestCase):
+
+ def test_sorts_custom_suites(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ class Subclass(unittest.TestSuite):
+ def sort_tests(self):
+ self._tests = sorted_tests(self, True)
+ input_suite = Subclass([b, a])
+ suite = sorted_tests(input_suite)
+ self.assertEqual([a, b], list(iterate_tests(suite)))
+ self.assertEqual([input_suite], list(iter(suite)))
+
+ def test_custom_suite_without_sort_tests_works(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ class Subclass(unittest.TestSuite):pass
+ input_suite = Subclass([b, a])
+ suite = sorted_tests(input_suite)
+ self.assertEqual([b, a], list(iterate_tests(suite)))
+ self.assertEqual([input_suite], list(iter(suite)))
+
+ def test_sorts_simple_suites(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ suite = sorted_tests(unittest.TestSuite([b, a]))
+ self.assertEqual([a, b], list(iterate_tests(suite)))
+
+ def test_duplicate_simple_suites(self):
+ a = PlaceHolder('a')
+ b = PlaceHolder('b')
+ c = PlaceHolder('a')
+ self.assertRaises(
+ ValueError, sorted_tests, unittest.TestSuite([a, b, c]))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/test/3rdparty/testtools-0.9.12/testtools/tests/test_with_with.py b/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py
index e06adeb1816..4305c624a86 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/tests/test_with_with.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/tests/test_with_with.py
@@ -11,6 +11,7 @@ from testtools import (
from testtools.matchers import (
AfterPreprocessing,
Equals,
+ EndsWith,
)
@@ -71,3 +72,17 @@ class TestExpectedException(TestCase):
def test_pass_on_raise_any_message(self):
with ExpectedException(ValueError):
raise ValueError('whatever')
+
+ def test_annotate(self):
+ def die():
+ with ExpectedException(ValueError, msg="foo"):
+ pass
+ exc = self.assertRaises(AssertionError, die)
+ self.assertThat(exc.args[0], EndsWith(': foo'))
+
+ def test_annotated_matcher(self):
+ def die():
+ with ExpectedException(ValueError, 'bar', msg="foo"):
+ pass
+ exc = self.assertRaises(AssertionError, die)
+ self.assertThat(exc.args[0], EndsWith(': foo'))
diff --git a/test/3rdparty/testtools-0.9.34/testtools/testsuite.py b/test/3rdparty/testtools-0.9.34/testtools/testsuite.py
new file mode 100644
index 00000000000..9e92e0cb8b1
--- /dev/null
+++ b/test/3rdparty/testtools-0.9.34/testtools/testsuite.py
@@ -0,0 +1,317 @@
+# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+
+"""Test suites and related things."""
+
+__metaclass__ = type
+__all__ = [
+ 'ConcurrentTestSuite',
+ 'ConcurrentStreamTestSuite',
+ 'filter_by_ids',
+ 'iterate_tests',
+ 'sorted_tests',
+ ]
+
+import sys
+import threading
+import unittest
+
+from extras import safe_hasattr, try_imports
+
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
+import testtools
+
+
+def iterate_tests(test_suite_or_case):
+ """Iterate through all of the test cases in 'test_suite_or_case'."""
+ try:
+ suite = iter(test_suite_or_case)
+ except TypeError:
+ yield test_suite_or_case
+ else:
+ for test in suite:
+ for subtest in iterate_tests(test):
+ yield subtest
+
+
+class ConcurrentTestSuite(unittest.TestSuite):
+ """A TestSuite whose run() calls out to a concurrency strategy."""
+
+ def __init__(self, suite, make_tests, wrap_result=None):
+ """Create a ConcurrentTestSuite to execute suite.
+
+ :param suite: A suite to run concurrently.
+ :param make_tests: A helper function to split the tests in the
+ ConcurrentTestSuite into some number of concurrently executing
+ sub-suites. make_tests must take a suite, and return an iterable
+ of TestCase-like object, each of which must have a run(result)
+ method.
+ :param wrap_result: An optional function that takes a thread-safe
+ result and a thread number and must return a ``TestResult``
+ object. If not provided, then ``ConcurrentTestSuite`` will just
+ use a ``ThreadsafeForwardingResult`` wrapped around the result
+ passed to ``run()``.
+ """
+ super(ConcurrentTestSuite, self).__init__([suite])
+ self.make_tests = make_tests
+ if wrap_result:
+ self._wrap_result = wrap_result
+
+ def _wrap_result(self, thread_safe_result, thread_number):
+ """Wrap a thread-safe result before sending it test results.
+
+ You can either override this in a subclass or pass your own
+ ``wrap_result`` in to the constructor. The latter is preferred.
+ """
+ return thread_safe_result
+
+ def run(self, result):
+ """Run the tests concurrently.
+
+ This calls out to the provided make_tests helper, and then serialises
+ the results so that result only sees activity from one TestCase at
+ a time.
+
+ ConcurrentTestSuite provides no special mechanism to stop the tests
+ returned by make_tests, it is up to the make_tests to honour the
+ shouldStop attribute on the result object they are run with, which will
+ be set if an exception is raised in the thread which
+ ConcurrentTestSuite.run is called in.
+ """
+ tests = self.make_tests(self)
+ try:
+ threads = {}
+ queue = Queue()
+ semaphore = threading.Semaphore(1)
+ for i, test in enumerate(tests):
+ process_result = self._wrap_result(
+ testtools.ThreadsafeForwardingResult(result, semaphore), i)
+ reader_thread = threading.Thread(
+ target=self._run_test, args=(test, process_result, queue))
+ threads[test] = reader_thread, process_result
+ reader_thread.start()
+ while threads:
+ finished_test = queue.get()
+ threads[finished_test][0].join()
+ del threads[finished_test]
+ except:
+ for thread, process_result in threads.values():
+ process_result.stop()
+ raise
+
+ def _run_test(self, test, process_result, queue):
+ try:
+ try:
+ test.run(process_result)
+ except Exception as e:
+ # The run logic itself failed.
+ case = testtools.ErrorHolder(
+ "broken-runner",
+ error=sys.exc_info())
+ case.run(process_result)
+ finally:
+ queue.put(test)
+
+
+class ConcurrentStreamTestSuite(object):
+ """A TestSuite whose run() parallelises."""
+
+ def __init__(self, make_tests):
+ """Create a ConcurrentTestSuite to execute tests returned by make_tests.
+
+ :param make_tests: A helper function that should return some number
+ of concurrently executable test suite / test case objects.
+ make_tests must take no parameters and return an iterable of
+ tuples. Each tuple must be of the form (case, route_code), where
+ case is a TestCase-like object with a run(result) method, and
+ route_code is either None or a unicode string.
+ """
+ super(ConcurrentStreamTestSuite, self).__init__()
+ self.make_tests = make_tests
+
+ def run(self, result):
+ """Run the tests concurrently.
+
+ This calls out to the provided make_tests helper to determine the
+ concurrency to use and to assign routing codes to each worker.
+
+ ConcurrentTestSuite provides no special mechanism to stop the tests
+ returned by make_tests, it is up to the made tests to honour the
+ shouldStop attribute on the result object they are run with, which will
+ be set if the test run is to be aborted.
+
+ The tests are run with an ExtendedToStreamDecorator wrapped around a
+ StreamToQueue instance. ConcurrentStreamTestSuite dequeues events from
+ the queue and forwards them to result. Tests can therefore be either
+ original unittest tests (or compatible tests), or new tests that emit
+ StreamResult events directly.
+
+ :param result: A StreamResult instance. The caller is responsible for
+ calling startTestRun on this instance prior to invoking suite.run,
+ and stopTestRun subsequent to the run method returning.
+ """
+ tests = self.make_tests()
+ try:
+ threads = {}
+ queue = Queue()
+ for test, route_code in tests:
+ to_queue = testtools.StreamToQueue(queue, route_code)
+ process_result = testtools.ExtendedToStreamDecorator(
+ testtools.TimestampingStreamResult(to_queue))
+ runner_thread = threading.Thread(
+ target=self._run_test,
+ args=(test, process_result, route_code))
+ threads[to_queue] = runner_thread, process_result
+ runner_thread.start()
+ while threads:
+ event_dict = queue.get()
+ event = event_dict.pop('event')
+ if event == 'status':
+ result.status(**event_dict)
+ elif event == 'stopTestRun':
+ thread = threads.pop(event_dict['result'])[0]
+ thread.join()
+ elif event == 'startTestRun':
+ pass
+ else:
+ raise ValueError('unknown event type %r' % (event,))
+ except:
+ for thread, process_result in threads.values():
+ # Signal to each TestControl in the ExtendedToStreamDecorator
+ # that the thread should stop running tests and cleanup
+ process_result.stop()
+ raise
+
+ def _run_test(self, test, process_result, route_code):
+ process_result.startTestRun()
+ try:
+ try:
+ test.run(process_result)
+ except Exception as e:
+ # The run logic itself failed.
+ case = testtools.ErrorHolder(
+ "broken-runner-'%s'" % (route_code,),
+ error=sys.exc_info())
+ case.run(process_result)
+ finally:
+ process_result.stopTestRun()
+
+
+class FixtureSuite(unittest.TestSuite):
+
+ def __init__(self, fixture, tests):
+ super(FixtureSuite, self).__init__(tests)
+ self._fixture = fixture
+
+ def run(self, result):
+ self._fixture.setUp()
+ try:
+ super(FixtureSuite, self).run(result)
+ finally:
+ self._fixture.cleanUp()
+
+ def sort_tests(self):
+ self._tests = sorted_tests(self, True)
+
+
+def _flatten_tests(suite_or_case, unpack_outer=False):
+ try:
+ tests = iter(suite_or_case)
+ except TypeError:
+ # Not iterable, assume it's a test case.
+ return [(suite_or_case.id(), suite_or_case)]
+ if (type(suite_or_case) in (unittest.TestSuite,) or
+ unpack_outer):
+ # Plain old test suite (or any others we may add).
+ result = []
+ for test in tests:
+ # Recurse to flatten.
+ result.extend(_flatten_tests(test))
+ return result
+ else:
+ # Find any old actual test and grab its id.
+ suite_id = None
+ tests = iterate_tests(suite_or_case)
+ for test in tests:
+ suite_id = test.id()
+ break
+ # If it has a sort_tests method, call that.
+ if safe_hasattr(suite_or_case, 'sort_tests'):
+ suite_or_case.sort_tests()
+ return [(suite_id, suite_or_case)]
+
+
+def filter_by_ids(suite_or_case, test_ids):
+ """Remove tests from suite_or_case where their id is not in test_ids.
+
+ :param suite_or_case: A test suite or test case.
+ :param test_ids: Something that supports the __contains__ protocol.
+ :return: suite_or_case, unless suite_or_case was a case that itself
+ fails the predicate when it will return a new unittest.TestSuite with
+ no contents.
+
+ This helper exists to provide backwards compatability with older versions
+ of Python (currently all versions :)) that don't have a native
+ filter_by_ids() method on Test(Case|Suite).
+
+ For subclasses of TestSuite, filtering is done by:
+ - attempting to call suite.filter_by_ids(test_ids)
+ - if there is no method, iterating the suite and identifying tests to
+ remove, then removing them from _tests, manually recursing into
+ each entry.
+
+ For objects with an id() method - TestCases, filtering is done by:
+ - attempting to return case.filter_by_ids(test_ids)
+ - if there is no such method, checking for case.id() in test_ids
+ and returning case if it is, or TestSuite() if it is not.
+
+ For anything else, it is not filtered - it is returned as-is.
+
+ To provide compatability with this routine for a custom TestSuite, just
+ define a filter_by_ids() method that will return a TestSuite equivalent to
+ the original minus any tests not in test_ids.
+ Similarly to provide compatability for a custom TestCase that does
+ something unusual define filter_by_ids to return a new TestCase object
+ that will only run test_ids that are in the provided container. If none
+ would run, return an empty TestSuite().
+
+ The contract for this function does not require mutation - each filtered
+ object can choose to return a new object with the filtered tests. However
+ because existing custom TestSuite classes in the wild do not have this
+ method, we need a way to copy their state correctly which is tricky:
+ thus the backwards-compatible code paths attempt to mutate in place rather
+ than guessing how to reconstruct a new suite.
+ """
+ # Compatible objects
+ if safe_hasattr(suite_or_case, 'filter_by_ids'):
+ return suite_or_case.filter_by_ids(test_ids)
+ # TestCase objects.
+ if safe_hasattr(suite_or_case, 'id'):
+ if suite_or_case.id() in test_ids:
+ return suite_or_case
+ else:
+ return unittest.TestSuite()
+ # Standard TestSuites or derived classes [assumed to be mutable].
+ if isinstance(suite_or_case, unittest.TestSuite):
+ filtered = []
+ for item in suite_or_case:
+ filtered.append(filter_by_ids(item, test_ids))
+ suite_or_case._tests[:] = filtered
+ # Everything else:
+ return suite_or_case
+
+
+def sorted_tests(suite_or_case, unpack_outer=False):
+ """Sort suite_or_case while preserving non-vanilla TestSuites."""
+ # Duplicate test id can induce TypeError in Python 3.3.
+ # Detect the duplicate test id, raise exception when found.
+ seen = set()
+ for test_case in iterate_tests(suite_or_case):
+ test_id = test_case.id()
+ if test_id not in seen:
+ seen.add(test_id)
+ else:
+ raise ValueError('Duplicate test id detected: %s' % (test_id,))
+ tests = _flatten_tests(suite_or_case, unpack_outer=unpack_outer)
+ tests.sort()
+ return unittest.TestSuite([test for (sort_key, test) in tests])
diff --git a/test/3rdparty/testtools-0.9.12/testtools/utils.py b/test/3rdparty/testtools-0.9.34/testtools/utils.py
index 0f39d8f5b6e..0f39d8f5b6e 100644
--- a/test/3rdparty/testtools-0.9.12/testtools/utils.py
+++ b/test/3rdparty/testtools-0.9.34/testtools/utils.py
diff --git a/test/suite/run.py b/test/suite/run.py
index 933e272841f..bd808a0544e 100644
--- a/test/suite/run.py
+++ b/test/suite/run.py
@@ -40,9 +40,13 @@ wt_3rdpartydir = os.path.join(wt_disttop, 'test', '3rdparty')
# Cannot import wiredtiger and supporting utils until we set up paths
sys.path.append(os.path.join(wt_builddir, 'lang', 'python'))
sys.path.append(os.path.join(wt_disttop, 'lang', 'python'))
-sys.path.append(os.path.join(wt_3rdpartydir, 'discover-0.4.0'))
-sys.path.append(os.path.join(wt_3rdpartydir, 'testtools-0.9.12'))
-sys.path.append(os.path.join(wt_3rdpartydir, 'testscenarios-0.2', 'lib'))
+
+# Add all 3rd party directories: some have code in subdirectories
+for d in os.listdir(wt_3rdpartydir):
+ for subdir in ('lib', 'python', ''):
+ if os.path.exists(os.path.join(wt_3rdpartydir, d, subdir)):
+ sys.path.append(os.path.join(wt_3rdpartydir, d, subdir))
+ break
import wttest
# Use the same version of unittest found by wttest.py
@@ -62,8 +66,8 @@ Options:\n\
-d | --debug run with \'pdb\', the python debugger\n\
-g | --gdb all subprocesses (like calls to wt) use gdb\n\
-h | --help show this message\n\
+ -j N | --parallel N run all tests in parallel using N processes\n\
-p | --preserve preserve output files in WT_TEST/<testname>\n\
- -P N | --parallel N run all tests in parallel using N processes\n\
-t | --timestamp name WT_TEST according to timestamp\n\
-v N | --verbose N set verboseness to N (0<=N<=3, default=1)\n\
\n\
@@ -224,7 +228,7 @@ if __name__ == '__main__':
if option == '-debug' or option == 'd':
debug = True
continue
- if option == '-parallel' or option == 'P':
+ if option == '-parallel' or option == 'j':
if parallel != 0 or len(args) == 0:
usage()
sys.exit(False)
diff --git a/test/suite/wttest.py b/test/suite/wttest.py
index b81b3cf142d..6cf2beccbec 100644
--- a/test/suite/wttest.py
+++ b/test/suite/wttest.py
@@ -428,7 +428,7 @@ class WiredTigerTestCase(unittest.TestCase):
def runsuite(suite, parallel):
suite_to_run = suite
- if parallel > 0:
+ if parallel > 1:
try:
from concurrencytest import ConcurrentTestSuite, fork_for_tests
except ImportError: