summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--NEWS10
-rw-r--r--README33
-rw-r--r--lib/testscenarios/__init__.py6
-rw-r--r--lib/testscenarios/scenarios.py37
-rw-r--r--lib/testscenarios/testcase.py29
-rw-r--r--lib/testscenarios/tests/__init__.py5
-rw-r--r--lib/testscenarios/tests/test_scenarios.py22
-rw-r--r--lib/testscenarios/tests/test_testcase.py167
8 files changed, 146 insertions, 163 deletions
diff --git a/NEWS b/NEWS
index db185e2..b3ff2dc 100644
--- a/NEWS
+++ b/NEWS
@@ -9,6 +9,12 @@ IN DEVELOPMENT
0.2
~~~
+NEW FEATURES:
+
+* New function ``per_module_scenarios`` for tests that should be applied across
+ multiple modules providing the same interface, some of which may not be
+ available at run time. (Martin Pool)
+
CHANGES:
* Adjust the cloned tests ``shortDescription`` if one is present. (Ben Finney)
@@ -16,6 +22,10 @@ CHANGES:
* Provide a load_tests implementation for easy use, and multiply_scenarios to
create the cross product of scenarios. (Martin Pool)
+* ``TestWithScenarios`` is now backed by a mixin - WithScenarios - which can be
+ mixed into different unittest implementations more cleanly (e.g. unittest2).
+ (James Polley, Robert Collins)
+
0.1
~~~
diff --git a/README b/README
index 887a6ad..002c340 100644
--- a/README
+++ b/README
@@ -111,7 +111,7 @@ implementations::
>>> mytests = loader.loadTestsFromNames(['doc.test_sample'])
>>> test_suite.addTests(generate_scenarios(mytests))
>>> runner.run(test_suite)
- <unittest._TextTestResult run=1 errors=0 failures=0>
+ <unittest...TextTestResult run=1 errors=0 failures=0>
Testloaders
+++++++++++
@@ -258,6 +258,37 @@ allowing it to be used to layer scenarios without affecting existing scenario
selection.
+Generating Scenarios
+====================
+
+Some functions (currently one :-) are available to ease generation of scenario
+lists for common situations.
+
+Testing Per Implementation Module
++++++++++++++++++++++++++++++++++
+
+It is reasonably common to have multiple Python modules that provide the same
+capabilities and interface, and to want apply the same tests to all of them.
+
+In some cases, not all of the statically defined implementations will be able
+to be used in a particular testing environment. For example, there may be both
+a C and a pure-Python implementation of a module. You want to test the C
+module if it can be loaded, but also to have the tests pass if the C module has
+not been compiled.
+
+The ``per_module_scenarios`` function generates a scenario for each named
+module. The module object of the imported module is set in the supplied
+attribute name of the resulting scenario.
+Modules which raise ``ImportError`` during import will have the
+``sys.exc_info()`` of the exception set instead of the module object. Tests
+can check for the attribute being a tuple to decide what to do (e.g. to skip).
+
+Note that for the test to be valid, all access to the module under test must go
+through the relevant attribute of the test object. If one of the
+implementations is also directly imported by the test module or any other,
+testscenarios will not magically stop it being used.
+
+
Advice on Writing Scenarios
===========================
diff --git a/lib/testscenarios/__init__.py b/lib/testscenarios/__init__.py
index c1f4c0f..f8cb056 100644
--- a/lib/testscenarios/__init__.py
+++ b/lib/testscenarios/__init__.py
@@ -42,12 +42,13 @@ __version__ = (0, 2, 0, 'final', 0)
__all__ = [
'TestWithScenarios',
- 'MixableTestWithScenarios',
+ 'WithScenarios',
'apply_scenario',
'apply_scenarios',
'generate_scenarios',
'load_tests_apply_scenarios',
'multiply_scenarios',
+ 'per_module_scenarios',
]
@@ -58,8 +59,9 @@ from testscenarios.scenarios import (
generate_scenarios,
load_tests_apply_scenarios,
multiply_scenarios,
+ per_module_scenarios,
)
-from testscenarios.testcase import TestWithScenarios, MixableTestWithScenarios
+from testscenarios.testcase import TestWithScenarios, WithScenarios
def test_suite():
diff --git a/lib/testscenarios/scenarios.py b/lib/testscenarios/scenarios.py
index 80847d6..9538b33 100644
--- a/lib/testscenarios/scenarios.py
+++ b/lib/testscenarios/scenarios.py
@@ -2,7 +2,7 @@
# dependency injection ('scenarios') by tests.
#
# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
-# Copyright (c) 2010 Martin Pool <mbp@sourcefrog.net>
+# Copyright (c) 2010, 2011 Martin Pool <mbp@sourcefrog.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
@@ -27,6 +27,7 @@ from itertools import (
chain,
product,
)
+import sys
import unittest
from testtools.testcase import clone_test_with_new_id
@@ -129,3 +130,37 @@ def multiply_scenarios(*scenarios):
scenario_parameters.update(parameter)
result.append((scenario_name, scenario_parameters))
return result
+
+
+def per_module_scenarios(attribute_name, modules):
+ """Generate scenarios for available implementation modules.
+
+ This is typically used when there is a subsystem implemented, for
+ example, in both Python and C, and we want to apply the same tests to
+ both, but the C module may sometimes not be available.
+
+ Note: if the module can't be loaded, the sys.exc_info() tuple for the
+ exception raised during import of the module is used instead of the module
+ object. A common idiom is to check in setUp for that and raise a skip or
+ error for that case. No special helpers are supplied in testscenarios as
+ yet.
+
+ :param attribute_name: A name to be set in the scenario parameter
+ dictionary (and thence onto the test instance) pointing to the
+ implementation module (or import exception) for this scenario.
+
+ :param modules: An iterable of (short_name, module_name), where
+ the short name is something like 'python' to put in the
+ scenario name, and the long name is a fully-qualified Python module
+ name.
+ """
+ scenarios = []
+ for short_name, module_name in modules:
+ try:
+ mod = __import__(module_name, {}, {}, [''])
+ except:
+ mod = sys.exc_info()
+ scenarios.append((
+ short_name,
+ {attribute_name: mod}))
+ return scenarios
diff --git a/lib/testscenarios/testcase.py b/lib/testscenarios/testcase.py
index 33c6f9e..2ab50c7 100644
--- a/lib/testscenarios/testcase.py
+++ b/lib/testscenarios/testcase.py
@@ -16,6 +16,7 @@
__all__ = [
'TestWithScenarios',
+ 'WithScenarios',
]
import unittest
@@ -24,16 +25,18 @@ from testtools.testcase import clone_test_with_new_id
from testscenarios.scenarios import generate_scenarios
-class MixableTestWithScenarios(object):
- """A TestCase with support for scenarios via a scenarios attribute.
-
- When a test object which is an instance of TestWithScenarios is run,
- and there is a non-empty scenarios attribute on the object, the test is
- multiplied by the run method into one test per scenario. For this to work
- reliably the TestWithScenarios.run method must not be overriden in a
- subclass (or overridden compatibly with TestWithScenarios).
+_doc = """
+ When a test object which inherits from WithScenarios is run, and there is a
+ non-empty scenarios attribute on the object, the test is multiplied by the
+ run method into one test per scenario. For this to work reliably the
+ WithScenarios.run method must not be overriden in a subclass (or overridden
+ compatibly with WithScenarios).
"""
+class WithScenarios(object):
+ __doc__ = """A mixin for TestCase with support for declarative scenarios.
+ """ + _doc
+
def _get_scenarios(self):
return getattr(self, 'scenarios', None)
@@ -50,7 +53,7 @@ class MixableTestWithScenarios(object):
for test in generate_scenarios(self):
test.debug()
else:
- return super(MixableTestWithScenarios, self).debug()
+ return super(WithScenarios, self).debug()
def run(self, result=None):
scenarios = self._get_scenarios()
@@ -59,7 +62,9 @@ class MixableTestWithScenarios(object):
test.run(result)
return
else:
- return super(MixableTestWithScenarios, self).run(result)
+ return super(WithScenarios, self).run(result)
+
-class TestWithScenarios(MixableTestWithScenarios, unittest.TestCase):
- pass
+class TestWithScenarios(WithScenarios, unittest.TestCase):
+ __doc__ = """Unittest TestCase with support for declarative scenarios.
+ """ + _doc
diff --git a/lib/testscenarios/tests/__init__.py b/lib/testscenarios/tests/__init__.py
index e5e2bbe..8e243b6 100644
--- a/lib/testscenarios/tests/__init__.py
+++ b/lib/testscenarios/tests/__init__.py
@@ -38,5 +38,6 @@ def load_tests(standard_tests, module, loader):
test_mod_names = [prefix + test_module for test_module in test_modules]
standard_tests.addTests(loader.loadTestsFromNames(test_mod_names))
doctest.set_unittest_reportflags(doctest.REPORT_ONLY_FIRST_FAILURE)
- standard_tests.addTest(doctest.DocFileSuite("../../../README"))
- return standard_tests
+ standard_tests.addTest(
+ doctest.DocFileSuite("../../../README", optionflags=doctest.ELLIPSIS))
+ return loader.suiteClass(testscenarios.generate_scenarios(standard_tests))
diff --git a/lib/testscenarios/tests/test_scenarios.py b/lib/testscenarios/tests/test_scenarios.py
index 063df51..97aa17f 100644
--- a/lib/testscenarios/tests/test_scenarios.py
+++ b/lib/testscenarios/tests/test_scenarios.py
@@ -2,7 +2,7 @@
# dependency injection ('scenarios') by tests.
#
# Copyright (c) 2009, Robert Collins <robertc@robertcollins.net>
-# Copyright (c) 2010 Martin Pool <mbp@sourcefrog.net>
+# Copyright (c) 2010, 2011 Martin Pool <mbp@sourcefrog.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
@@ -239,3 +239,23 @@ class TestMultiplyScenarios(testtools.TestCase):
self.assertEqual(
'a,a,a,a',
scenarios[0][0])
+
+
+class TestPerModuleScenarios(testtools.TestCase):
+
+ def test_per_module_scenarios(self):
+ """Generate scenarios for available modules"""
+ s = testscenarios.scenarios.per_module_scenarios(
+ 'the_module', [
+ ('Python', 'testscenarios'),
+ ('unittest', 'unittest'),
+ ('nonexistent', 'nonexistent'),
+ ])
+ self.assertEqual('nonexistent', s[-1][0])
+ self.assertIsInstance(s[-1][1]['the_module'], tuple)
+ s[-1][1]['the_module'] = None
+ self.assertEqual(s, [
+ ('Python', {'the_module': testscenarios}),
+ ('unittest', {'the_module': unittest}),
+ ('nonexistent', {'the_module': None}),
+ ])
diff --git a/lib/testscenarios/tests/test_testcase.py b/lib/testscenarios/tests/test_testcase.py
index 867f34a..74d2fe1 100644
--- a/lib/testscenarios/tests/test_testcase.py
+++ b/lib/testscenarios/tests/test_testcase.py
@@ -17,13 +17,25 @@
import unittest
import testscenarios
+import testtools
from testtools.tests.helpers import LoggingResult
-class TestTestWithScenarios(unittest.TestCase):
+class TestTestWithScenarios(testtools.TestCase):
+
+ scenarios = testscenarios.scenarios.per_module_scenarios(
+ 'impl', (('unittest', 'unittest'), ('unittest2', 'unittest2')))
+
+ @property
+ def Implementation(self):
+ if isinstance(self.impl, tuple):
+ self.skipTest('import failed - module not installed?')
+ class Implementation(testscenarios.WithScenarios, self.impl.TestCase):
+ pass
+ return Implementation
def test_no_scenarios_no_error(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
def test_pass(self):
pass
test = ReferenceTest("test_pass")
@@ -33,7 +45,7 @@ class TestTestWithScenarios(unittest.TestCase):
self.assertEqual(1, result.testsRun)
def test_with_one_scenario_one_run(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [('demo', {})]
def test_pass(self):
pass
@@ -48,7 +60,7 @@ class TestTestWithScenarios(unittest.TestCase):
log[0][1].id())
def test_with_two_scenarios_two_run(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [('1', {}), ('2', {})]
def test_pass(self):
pass
@@ -66,7 +78,7 @@ class TestTestWithScenarios(unittest.TestCase):
log[4][1].id())
def test_attributes_set(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [
('1', {'foo': 1, 'bar': 2}),
('2', {'foo': 2, 'bar': 4})]
@@ -80,7 +92,7 @@ class TestTestWithScenarios(unittest.TestCase):
self.assertEqual(2, result.testsRun)
def test_scenarios_attribute_cleared(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [
('1', {'foo': 1, 'bar': 2}),
('2', {'foo': 2, 'bar': 4})]
@@ -97,14 +109,14 @@ class TestTestWithScenarios(unittest.TestCase):
self.assertEqual(None, log[4][1].scenarios)
def test_countTestCases_no_scenarios(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
def test_check_foo(self):
pass
test = ReferenceTest("test_check_foo")
self.assertEqual(1, test.countTestCases())
def test_countTestCases_empty_scenarios(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = []
def test_check_foo(self):
pass
@@ -112,7 +124,7 @@ class TestTestWithScenarios(unittest.TestCase):
self.assertEqual(1, test.countTestCases())
def test_countTestCases_1_scenarios(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [('1', {'foo': 1, 'bar': 2})]
def test_check_foo(self):
pass
@@ -120,7 +132,7 @@ class TestTestWithScenarios(unittest.TestCase):
self.assertEqual(1, test.countTestCases())
def test_countTestCases_2_scenarios(self):
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [
('1', {'foo': 1, 'bar': 2}),
('2', {'foo': 2, 'bar': 4})]
@@ -131,7 +143,7 @@ class TestTestWithScenarios(unittest.TestCase):
def test_debug_2_scenarios(self):
log = []
- class ReferenceTest(testscenarios.TestWithScenarios):
+ class ReferenceTest(self.Implementation):
scenarios = [
('1', {'foo': 1, 'bar': 2}),
('2', {'foo': 2, 'bar': 4})]
@@ -143,136 +155,3 @@ class TestTestWithScenarios(unittest.TestCase):
self.assertEqual(None, log[0].scenarios)
self.assertEqual(None, log[1].scenarios)
self.assertNotEqual(log[0].id(), log[1].id())
-
-try:
- import unittest2
- class TestWithScenariosWithUnittest2(testscenarios.MixableTestWithScenarios, unittest2.TestCase):
- pass
-
- class TestTestWithScenariosWithInittest2(unittest.TestCase):
-
- def test_no_scenarios_no_error(self):
- class ReferenceTest(TestWithScenariosWithUnittest2):
- def test_pass(self):
- pass
- test = ReferenceTest("test_pass")
- result = unittest.TestResult()
- test.run(result)
- self.assertTrue(result.wasSuccessful())
- self.assertEqual(1, result.testsRun)
-
- def test_with_one_scenario_one_run(self):
- class ReferenceTest(TestWithScenariosWithUnittest2):
- scenarios = [('demo', {})]
- def test_pass(self):
- pass
- test = ReferenceTest("test_pass")
- log = []
- result = LoggingResult(log)
- test.run(result)
- self.assertTrue(result.wasSuccessful())
- self.assertEqual(1, result.testsRun)
- self.assertEqual(
- 'testscenarios.tests.test_testcase.ReferenceTest.test_pass(demo)',
- log[0][1].id())
-
- def test_with_two_scenarios_two_run(self):
- class ReferenceTest(TestWithScenariosWithUnittest2):
- scenarios = [('1', {}), ('2', {})]
- def test_pass(self):
- pass
- test = ReferenceTest("test_pass")
- log = []
- result = LoggingResult(log)
- test.run(result)
- self.assertTrue(result.wasSuccessful())
- self.assertEqual(2, result.testsRun)
- self.assertEqual(
- 'testscenarios.tests.test_testcase.ReferenceTest.test_pass(1)',
- log[0][1].id())
- self.assertEqual(
- 'testscenarios.tests.test_testcase.ReferenceTest.test_pass(2)',
- log[4][1].id())
-
- def test_attributes_set(self):
- class ReferenceTest(TestWithScenariosWithUnittest2):
- scenarios = [
- ('1', {'foo': 1, 'bar': 2}),
- ('2', {'foo': 2, 'bar': 4})]
- def test_check_foo(self):
- self.assertEqual(self.foo * 2, self.bar)
- test = ReferenceTest("test_check_foo")
- log = []
- result = LoggingResult(log)
- test.run(result)
- self.assertTrue(result.wasSuccessful())
- self.assertEqual(2, result.testsRun)
-
- def test_scenarios_attribute_cleared(self):
- class ReferenceTest(TestWithScenariosWithUnittest2):
- scenarios = [
- ('1', {'foo': 1, 'bar': 2}),
- ('2', {'foo': 2, 'bar': 4})]
- def test_check_foo(self):
- self.assertEqual(self.foo * 2, self.bar)
- test = ReferenceTest("test_check_foo")
- log = []
- result = LoggingResult(log)
- test.run(result)
- self.assertTrue(result.wasSuccessful())
- self.assertEqual(2, result.testsRun)
- self.assertNotEqual(None, test.scenarios)
- self.assertEqual(None, log[0][1].scenarios)
- self.assertEqual(None, log[4][1].scenarios)
-
- def test_countTestCases_no_scenarios(self):
- class ReferenceTest(TestWithScenariosWithUnittest2):
- def test_check_foo(self):
- pass
- test = ReferenceTest("test_check_foo")
- self.assertEqual(1, test.countTestCases())
-
- def test_countTestCases_empty_scenarios(self):
- class ReferenceTest(TestWithScenariosWithUnittest2):
- scenarios = []
- def test_check_foo(self):
- pass
- test = ReferenceTest("test_check_foo")
- self.assertEqual(1, test.countTestCases())
-
- def test_countTestCases_1_scenarios(self):
- class ReferenceTest(TestWithScenariosWithUnittest2):
- scenarios = [('1', {'foo': 1, 'bar': 2})]
- def test_check_foo(self):
- pass
- test = ReferenceTest("test_check_foo")
- self.assertEqual(1, test.countTestCases())
-
- def test_countTestCases_2_scenarios(self):
- class ReferenceTest(TestWithScenariosWithUnittest2):
- scenarios = [
- ('1', {'foo': 1, 'bar': 2}),
- ('2', {'foo': 2, 'bar': 4})]
- def test_check_foo(self):
- pass
- test = ReferenceTest("test_check_foo")
- self.assertEqual(2, test.countTestCases())
-
- def test_debug_2_scenarios(self):
- log = []
- class ReferenceTest(TestWithScenariosWithUnittest2):
- scenarios = [
- ('1', {'foo': 1, 'bar': 2}),
- ('2', {'foo': 2, 'bar': 4})]
- def test_check_foo(self):
- log.append(self)
- test = ReferenceTest("test_check_foo")
- test.debug()
- self.assertEqual(2, len(log))
- self.assertEqual(None, log[0].scenarios)
- self.assertEqual(None, log[1].scenarios)
- self.assertNotEqual(log[0].id(), log[1].id())
-
-
-except ImportError:
- pass