summaryrefslogtreecommitdiff
path: root/numpy/testing
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/testing')
-rw-r--r--numpy/testing/__init__.py2
-rw-r--r--numpy/testing/noseclasses.py27
-rw-r--r--numpy/testing/nosetester.py106
-rw-r--r--numpy/testing/numpytest.py647
-rw-r--r--numpy/testing/parametric.py311
5 files changed, 67 insertions, 1026 deletions
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
index 53941fd76..f391c8053 100644
--- a/numpy/testing/__init__.py
+++ b/numpy/testing/__init__.py
@@ -5,12 +5,10 @@ in a single location, so that test scripts can just import it and work right
away.
"""
-#import unittest
from unittest import TestCase
import decorators as dec
from utils import *
-from parametric import ParametricTestCase
from numpytest import *
from nosetester import NoseTester as Tester
from nosetester import run_module_suite
diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py
index ef2f90745..05d244083 100644
--- a/numpy/testing/noseclasses.py
+++ b/numpy/testing/noseclasses.py
@@ -1,4 +1,6 @@
-# These classes implement a doctest runner plugin for nose.
+# These classes implement a doctest runner plugin for nose, a "known failure"
+# error class, and a customized TestProgram for NumPy.
+
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
@@ -6,6 +8,7 @@
import os
import doctest
+import nose
from nose.plugins import doctests as npd
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.plugins.base import Plugin
@@ -275,3 +278,25 @@ class KnownFailure(ErrorClassPlugin):
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
+
+
+
+# Because nose currently discards the test result object, but we need
+# to return it to the user, override TestProgram.runTests to retain
+# the result
+class NumpyTestProgram(nose.core.TestProgram):
+ def runTests(self):
+ """Run Tests. Returns true on success, false on failure, and
+ sets self.success to the same value.
+ """
+ if self.testRunner is None:
+ self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
+ verbosity=self.config.verbosity,
+ config=self.config)
+ plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
+ if plug_runner is not None:
+ self.testRunner = plug_runner
+
+ self.result = self.testRunner.run(self.test)
+ self.success = self.result.wasSuccessful()
+ return self.success
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
index bd4c78308..7a10a5b1f 100644
--- a/numpy/testing/nosetester.py
+++ b/numpy/testing/nosetester.py
@@ -5,7 +5,6 @@ Implements test and bench functions for modules.
'''
import os
import sys
-import warnings
def get_package_name(filepath):
# find the package name given a path name that's part of the package
@@ -28,7 +27,6 @@ def get_package_name(filepath):
pkg_name.reverse()
return '.'.join(pkg_name)
-
def import_nose():
""" Import nose only when needed.
"""
@@ -166,8 +164,8 @@ class NoseTester(object):
print "nose version %d.%d.%d" % nose.__versioninfo__
- def test(self, label='fast', verbose=1, extra_argv=None, doctests=False,
- coverage=False, **kwargs):
+ def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False):
''' Run tests for module using nose
%(test_header)s
@@ -179,39 +177,6 @@ class NoseTester(object):
http://nedbatchelder.com/code/modules/coverage.html)
'''
- old_args = set(['level', 'verbosity', 'all', 'sys_argv',
- 'testcase_pattern'])
- unexpected_args = set(kwargs.keys()) - old_args
- if len(unexpected_args) > 0:
- ua = ', '.join(unexpected_args)
- raise TypeError("test() got unexpected arguments: %s" % ua)
-
- # issue a deprecation warning if any of the pre-1.2 arguments to
- # test are given
- if old_args.intersection(kwargs.keys()):
- warnings.warn("This method's signature will change in the next " \
- "release; the level, verbosity, all, sys_argv, " \
- "and testcase_pattern keyword arguments will be " \
- "removed. Please update your code.",
- DeprecationWarning, stacklevel=2)
-
- # Use old arguments if given (where it makes sense)
- # For the moment, level and sys_argv are ignored
-
- # replace verbose with verbosity
- if kwargs.get('verbosity') is not None:
- verbose = kwargs.get('verbosity')
- # cap verbosity at 3 because nose becomes *very* verbose beyond that
- verbose = min(verbose, 3)
-
- import utils
- utils.verbose = verbose
-
- # if all evaluates as True, omit attribute filter and run doctests
- if kwargs.get('all'):
- label = ''
- doctests = True
-
# if doctests is in the extra args, remove it and set the doctest
# flag so the NumPy doctester is used instead
if extra_argv and '--with-doctest' in extra_argv:
@@ -221,9 +186,6 @@ class NoseTester(object):
argv = self._test_argv(label, verbose, extra_argv)
if doctests:
argv += ['--with-numpydoctest']
- print "Running unit tests and doctests for %s" % self.package_name
- else:
- print "Running unit tests for %s" % self.package_name
if coverage:
argv+=['--cover-package=%s' % self.package_name, '--with-coverage',
@@ -237,33 +199,8 @@ class NoseTester(object):
argv += ['--exclude','swig_ext']
argv += ['--exclude','array_from_pyobj']
- self._show_system_info()
-
nose = import_nose()
- # Because nose currently discards the test result object, but we need
- # to return it to the user, override TestProgram.runTests to retain
- # the result
- class NumpyTestProgram(nose.core.TestProgram):
- def runTests(self):
- """Run Tests. Returns true on success, false on failure, and
- sets self.success to the same value.
- """
- if self.testRunner is None:
- self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
- verbosity=self.config.verbosity,
- config=self.config)
- plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
- if plug_runner is not None:
- self.testRunner = plug_runner
- self.result = self.testRunner.run(self.test)
- self.success = self.result.wasSuccessful()
- return self.success
-
- # reset doctest state on every run
- import doctest
- doctest.master = None
-
# construct list of plugins, omitting the existing doctest plugin
import nose.plugins.builtin
from noseclasses import NumpyDoctest, KnownFailure
@@ -271,10 +208,46 @@ class NoseTester(object):
for p in nose.plugins.builtin.plugins:
plug = p()
if plug.name == 'doctest':
+ # skip the builtin doctest plugin
continue
plugins.append(plug)
+ return argv, plugins
+
+ def test(self, label='fast', verbose=1, extra_argv=None, doctests=False,
+ coverage=False):
+ ''' Run tests for module using nose
+
+ %(test_header)s
+ doctests : boolean
+ If True, run doctests in module, default False
+ coverage : boolean
+ If True, report coverage of NumPy code, default False
+ (Requires the coverage module:
+ http://nedbatchelder.com/code/modules/coverage.html)
+ '''
+
+ # cap verbosity at 3 because nose becomes *very* verbose beyond that
+ verbose = min(verbose, 3)
+
+ import utils
+ utils.verbose = verbose
+
+ if doctests:
+ print "Running unit tests and doctests for %s" % self.package_name
+ else:
+ print "Running unit tests for %s" % self.package_name
+
+ self._show_system_info()
+
+ # reset doctest state on every run
+ import doctest
+ doctest.master = None
+
+ argv, plugins = self.prepare_test_args(label, verbose, extra_argv,
+ doctests, coverage)
+ from noseclasses import NumpyTestProgram
t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
return t.result
@@ -286,9 +259,10 @@ class NoseTester(object):
print "Running benchmarks for %s" % self.package_name
self._show_system_info()
- nose = import_nose()
argv = self._test_argv(label, verbose, extra_argv)
argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
+
+ nose = import_nose()
return nose.run(argv=argv)
# generate method docstrings
diff --git a/numpy/testing/numpytest.py b/numpy/testing/numpytest.py
index c08215383..5ef2cc7f5 100644
--- a/numpy/testing/numpytest.py
+++ b/numpy/testing/numpytest.py
@@ -1,91 +1,16 @@
import os
-import re
import sys
-import imp
-import types
-import unittest
import traceback
-import warnings
-__all__ = ['set_package_path', 'set_local_path', 'restore_path',
- 'IgnoreException', 'NumpyTestCase', 'NumpyTest', 'importall',]
+__all__ = ['IgnoreException', 'importall',]
DEBUG=0
-from numpy.testing.utils import jiffies
get_frame = sys._getframe
class IgnoreException(Exception):
"Ignoring this exception due to disabled feature"
-def set_package_path(level=1):
- """ Prepend package directory to sys.path.
-
- set_package_path should be called from a test_file.py that
- satisfies the following tree structure:
-
- <somepath>/<somedir>/test_file.py
-
- Then the first existing path name from the following list
-
- <somepath>/build/lib.<platform>-<version>
- <somepath>/..
-
- is prepended to sys.path.
- The caller is responsible for removing this path by using
-
- restore_path()
- """
- warnings.warn("set_package_path will be removed in NumPy 1.3; please "
- "update your code", DeprecationWarning, stacklevel=2)
-
- from distutils.util import get_platform
- f = get_frame(level)
- if f.f_locals['__name__']=='__main__':
- testfile = sys.argv[0]
- else:
- testfile = f.f_locals['__file__']
- d = os.path.dirname(os.path.dirname(os.path.abspath(testfile)))
- d1 = os.path.join(d,'build','lib.%s-%s'%(get_platform(),sys.version[:3]))
- if not os.path.isdir(d1):
- d1 = os.path.dirname(d)
- if DEBUG:
- print 'Inserting %r to sys.path for test_file %r' % (d1, testfile)
- sys.path.insert(0,d1)
- return
-
-
-def set_local_path(reldir='', level=1):
- """ Prepend local directory to sys.path.
-
- The caller is responsible for removing this path by using
-
- restore_path()
- """
- warnings.warn("set_local_path will be removed in NumPy 1.3; please "
- "update your code", DeprecationWarning, stacklevel=2)
-
- f = get_frame(level)
- if f.f_locals['__name__']=='__main__':
- testfile = sys.argv[0]
- else:
- testfile = f.f_locals['__file__']
- local_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(testfile)),reldir))
- if DEBUG:
- print 'Inserting %r to sys.path' % (local_path)
- sys.path.insert(0,local_path)
- return
-
-def restore_path():
- warnings.warn("restore_path will be removed in NumPy 1.3; please "
- "update your code", DeprecationWarning, stacklevel=2)
-
- if DEBUG:
- print 'Removing %r from sys.path' % (sys.path[0])
- del sys.path[0]
- return
-
-
def output_exception(printstream = sys.stdout):
try:
type, value, tb = sys.exc_info()
@@ -99,576 +24,6 @@ def output_exception(printstream = sys.stdout):
type = value = tb = None # clean up
return
-
-class _dummy_stream:
- def __init__(self,stream):
- self.data = []
- self.stream = stream
- def write(self,message):
- if not self.data and not message.startswith('E'):
- self.stream.write(message)
- self.stream.flush()
- message = ''
- self.data.append(message)
- def writeln(self,message):
- self.write(message+'\n')
- def flush(self):
- self.stream.flush()
-
-
-class NumpyTestCase (unittest.TestCase):
- def __init__(self, *args, **kwds):
- warnings.warn("NumpyTestCase will be removed in the next release; please update your code to use nose or unittest",
- DeprecationWarning, stacklevel=2)
- unittest.TestCase.__init__(self, *args, **kwds)
-
- def measure(self,code_str,times=1):
- """ Return elapsed time for executing code_str in the
- namespace of the caller for given times.
- """
- frame = get_frame(1)
- locs,globs = frame.f_locals,frame.f_globals
- code = compile(code_str,
- 'NumpyTestCase runner for '+self.__class__.__name__,
- 'exec')
- i = 0
- elapsed = jiffies()
- while i<times:
- i += 1
- exec code in globs,locs
- elapsed = jiffies() - elapsed
- return 0.01*elapsed
-
- def __call__(self, result=None):
- if result is None or not hasattr(result, 'errors') \
- or not hasattr(result, 'stream'):
- return unittest.TestCase.__call__(self, result)
-
- nof_errors = len(result.errors)
- save_stream = result.stream
- result.stream = _dummy_stream(save_stream)
- unittest.TestCase.__call__(self, result)
- if nof_errors != len(result.errors):
- test, errstr = result.errors[-1][:2]
- if isinstance(errstr, tuple):
- errstr = str(errstr[0])
- elif isinstance(errstr, str):
- errstr = errstr.split('\n')[-2]
- else:
- # allow for proxy classes
- errstr = str(errstr).split('\n')[-2]
- l = len(result.stream.data)
- if errstr.startswith('IgnoreException:'):
- if l==1:
- assert result.stream.data[-1]=='E', \
- repr(result.stream.data)
- result.stream.data[-1] = 'i'
- else:
- assert result.stream.data[-1]=='ERROR\n', \
- repr(result.stream.data)
- result.stream.data[-1] = 'ignoring\n'
- del result.errors[-1]
- map(save_stream.write, result.stream.data)
- save_stream.flush()
- result.stream = save_stream
-
- def warn(self, message):
- from numpy.distutils.misc_util import yellow_text
- print>>sys.stderr,yellow_text('Warning: %s' % (message))
- sys.stderr.flush()
- def info(self, message):
- print>>sys.stdout, message
- sys.stdout.flush()
-
- def rundocs(self, filename=None):
- """ Run doc string tests found in filename.
- """
- import doctest
- if filename is None:
- f = get_frame(1)
- filename = f.f_globals['__file__']
- name = os.path.splitext(os.path.basename(filename))[0]
- path = [os.path.dirname(filename)]
- file, pathname, description = imp.find_module(name, path)
- try:
- m = imp.load_module(name, file, pathname, description)
- finally:
- file.close()
- if sys.version[:3]<'2.4':
- doctest.testmod(m, verbose=False)
- else:
- tests = doctest.DocTestFinder().find(m)
- runner = doctest.DocTestRunner(verbose=False)
- for test in tests:
- runner.run(test)
- return
-
-
-def _get_all_method_names(cls):
- names = dir(cls)
- if sys.version[:3]<='2.1':
- for b in cls.__bases__:
- for n in dir(b)+_get_all_method_names(b):
- if n not in names:
- names.append(n)
- return names
-
-
-# for debug build--check for memory leaks during the test.
-class _NumPyTextTestResult(unittest._TextTestResult):
- def startTest(self, test):
- unittest._TextTestResult.startTest(self, test)
- if self.showAll:
- N = len(sys.getobjects(0))
- self._totnumobj = N
- self._totrefcnt = sys.gettotalrefcount()
- return
-
- def stopTest(self, test):
- if self.showAll:
- N = len(sys.getobjects(0))
- self.stream.write("objects: %d ===> %d; " % (self._totnumobj, N))
- self.stream.write("refcnts: %d ===> %d\n" % (self._totrefcnt,
- sys.gettotalrefcount()))
- return
-
-class NumPyTextTestRunner(unittest.TextTestRunner):
- def _makeResult(self):
- return _NumPyTextTestResult(self.stream, self.descriptions, self.verbosity)
-
-
-class NumpyTest:
- """ Numpy tests site manager.
-
- Usage: NumpyTest(<package>).test(level=1,verbosity=1)
-
- <package> is package name or its module object.
-
- Package is supposed to contain a directory tests/ with test_*.py
- files where * refers to the names of submodules. See .rename()
- method to redefine name mapping between test_*.py files and names of
- submodules. Pattern test_*.py can be overwritten by redefining
- .get_testfile() method.
-
- test_*.py files are supposed to define a classes, derived from
- NumpyTestCase or unittest.TestCase, with methods having names
- starting with test or bench or check. The names of TestCase classes
- must have a prefix test. This can be overwritten by redefining
- .check_testcase_name() method.
-
- And that is it! No need to implement test or test_suite functions
- in each .py file.
-
- Old-style test_suite(level=1) hooks are also supported.
- """
- _check_testcase_name = re.compile(r'test.*|Test.*').match
- def check_testcase_name(self, name):
- """ Return True if name matches TestCase class.
- """
- return not not self._check_testcase_name(name)
-
- testfile_patterns = ['test_%(modulename)s.py']
- def get_testfile(self, module, verbosity = 0):
- """ Return path to module test file.
- """
- mstr = self._module_str
- short_module_name = self._get_short_module_name(module)
- d = os.path.split(module.__file__)[0]
- test_dir = os.path.join(d,'tests')
- local_test_dir = os.path.join(os.getcwd(),'tests')
- if os.path.basename(os.path.dirname(local_test_dir)) \
- == os.path.basename(os.path.dirname(test_dir)):
- test_dir = local_test_dir
- for pat in self.testfile_patterns:
- fn = os.path.join(test_dir, pat % {'modulename':short_module_name})
- if os.path.isfile(fn):
- return fn
- if verbosity>1:
- self.warn('No test file found in %s for module %s' \
- % (test_dir, mstr(module)))
- return
-
- def __init__(self, package=None):
- warnings.warn("NumpyTest will be removed in the next release; please update your code to use nose or unittest",
- DeprecationWarning, stacklevel=2)
- if package is None:
- from numpy.distutils.misc_util import get_frame
- f = get_frame(1)
- package = f.f_locals.get('__name__',f.f_globals.get('__name__',None))
- assert package is not None
- self.package = package
- self._rename_map = {}
-
- def rename(self, **kws):
- """Apply renaming submodule test file test_<name>.py to
- test_<newname>.py.
-
- Usage: self.rename(name='newname') before calling the
- self.test() method.
-
- If 'newname' is None, then no tests will be executed for a given
- module.
- """
- for k,v in kws.items():
- self._rename_map[k] = v
- return
-
- def _module_str(self, module):
- filename = module.__file__[-30:]
- if filename!=module.__file__:
- filename = '...'+filename
- return '<module %r from %r>' % (module.__name__, filename)
-
- def _get_method_names(self,clsobj,level):
- names = []
- for mthname in _get_all_method_names(clsobj):
- if mthname[:5] not in ['bench','check'] \
- and mthname[:4] not in ['test']:
- continue
- mth = getattr(clsobj, mthname)
- if type(mth) is not types.MethodType:
- continue
- d = mth.im_func.func_defaults
- if d is not None:
- mthlevel = d[0]
- else:
- mthlevel = 1
- if level>=mthlevel:
- if mthname not in names:
- names.append(mthname)
- for base in clsobj.__bases__:
- for n in self._get_method_names(base,level):
- if n not in names:
- names.append(n)
- return names
-
- def _get_short_module_name(self, module):
- d,f = os.path.split(module.__file__)
- short_module_name = os.path.splitext(os.path.basename(f))[0]
- if short_module_name=='__init__':
- short_module_name = module.__name__.split('.')[-1]
- short_module_name = self._rename_map.get(short_module_name,short_module_name)
- return short_module_name
-
- def _get_module_tests(self, module, level, verbosity):
- mstr = self._module_str
-
- short_module_name = self._get_short_module_name(module)
- if short_module_name is None:
- return []
-
- test_file = self.get_testfile(module, verbosity)
-
- if test_file is None:
- return []
-
- if not os.path.isfile(test_file):
- if short_module_name[:5]=='info_' \
- and short_module_name[5:]==module.__name__.split('.')[-2]:
- return []
- if short_module_name in ['__cvs_version__','__svn_version__']:
- return []
- if short_module_name[-8:]=='_version' \
- and short_module_name[:-8]==module.__name__.split('.')[-2]:
- return []
- if verbosity>1:
- self.warn(test_file)
- self.warn(' !! No test file %r found for %s' \
- % (os.path.basename(test_file), mstr(module)))
- return []
-
- if test_file in self.test_files:
- return []
-
- parent_module_name = '.'.join(module.__name__.split('.')[:-1])
- test_module_name,ext = os.path.splitext(os.path.basename(test_file))
- test_dir_module = parent_module_name+'.tests'
- test_module_name = test_dir_module+'.'+test_module_name
-
- if test_dir_module not in sys.modules:
- sys.modules[test_dir_module] = imp.new_module(test_dir_module)
-
- old_sys_path = sys.path[:]
- try:
- f = open(test_file,'r')
- test_module = imp.load_module(test_module_name, f,
- test_file, ('.py', 'r', 1))
- f.close()
- except:
- sys.path[:] = old_sys_path
- self.warn('FAILURE importing tests for %s' % (mstr(module)))
- output_exception(sys.stderr)
- return []
- sys.path[:] = old_sys_path
-
- self.test_files.append(test_file)
-
- return self._get_suite_list(test_module, level, module.__name__)
-
- def _get_suite_list(self, test_module, level, module_name='__main__',
- verbosity=1):
- suite_list = []
- if hasattr(test_module, 'test_suite'):
- suite_list.extend(test_module.test_suite(level)._tests)
- for name in dir(test_module):
- obj = getattr(test_module, name)
- if type(obj) is not type(unittest.TestCase) \
- or not issubclass(obj, unittest.TestCase) \
- or not self.check_testcase_name(obj.__name__):
- continue
- for mthname in self._get_method_names(obj,level):
- suite = obj(mthname)
- if getattr(suite,'isrunnable',lambda mthname:1)(mthname):
- suite_list.append(suite)
- matched_suite_list = [suite for suite in suite_list \
- if self.testcase_match(suite.id()\
- .replace('__main__.',''))]
- if verbosity>=0:
- self.info(' Found %s/%s tests for %s' \
- % (len(matched_suite_list), len(suite_list), module_name))
- return matched_suite_list
-
- def _test_suite_from_modules(self, this_package, level, verbosity):
- package_name = this_package.__name__
- modules = []
- for name, module in sys.modules.items():
- if not name.startswith(package_name) or module is None:
- continue
- if not hasattr(module,'__file__'):
- continue
- if os.path.basename(os.path.dirname(module.__file__))=='tests':
- continue
- modules.append((name, module))
-
- modules.sort()
- modules = [m[1] for m in modules]
-
- self.test_files = []
- suites = []
- for module in modules:
- suites.extend(self._get_module_tests(module, abs(level), verbosity))
-
- suites.extend(self._get_suite_list(sys.modules[package_name],
- abs(level), verbosity=verbosity))
- return unittest.TestSuite(suites)
-
- def _test_suite_from_all_tests(self, this_package, level, verbosity):
- importall(this_package)
- package_name = this_package.__name__
-
- # Find all tests/ directories under the package
- test_dirs_names = {}
- for name, module in sys.modules.items():
- if not name.startswith(package_name) or module is None:
- continue
- if not hasattr(module, '__file__'):
- continue
- d = os.path.dirname(module.__file__)
- if os.path.basename(d)=='tests':
- continue
- d = os.path.join(d, 'tests')
- if not os.path.isdir(d):
- continue
- if d in test_dirs_names:
- continue
- test_dir_module = '.'.join(name.split('.')[:-1]+['tests'])
- test_dirs_names[d] = test_dir_module
-
- test_dirs = test_dirs_names.keys()
- test_dirs.sort()
-
- # For each file in each tests/ directory with a test case in it,
- # import the file, and add the test cases to our list
- suite_list = []
- testcase_match = re.compile(r'\s*class\s+\w+\s*\(.*TestCase').match
- for test_dir in test_dirs:
- test_dir_module = test_dirs_names[test_dir]
-
- if test_dir_module not in sys.modules:
- sys.modules[test_dir_module] = imp.new_module(test_dir_module)
-
- for fn in os.listdir(test_dir):
- base, ext = os.path.splitext(fn)
- if ext != '.py':
- continue
- f = os.path.join(test_dir, fn)
-
- # check that file contains TestCase class definitions:
- fid = open(f, 'r')
- skip = True
- for line in fid:
- if testcase_match(line):
- skip = False
- break
- fid.close()
- if skip:
- continue
-
- # import the test file
- n = test_dir_module + '.' + base
- # in case test files import local modules
- sys.path.insert(0, test_dir)
- fo = None
- try:
- try:
- fo = open(f)
- test_module = imp.load_module(n, fo, f,
- ('.py', 'U', 1))
- except Exception, msg:
- print 'Failed importing %s: %s' % (f,msg)
- continue
- finally:
- if fo:
- fo.close()
- del sys.path[0]
-
- suites = self._get_suite_list(test_module, level,
- module_name=n,
- verbosity=verbosity)
- suite_list.extend(suites)
-
- all_tests = unittest.TestSuite(suite_list)
- return all_tests
-
- def test(self, level=1, verbosity=1, all=True, sys_argv=[],
- testcase_pattern='.*'):
- """Run Numpy module test suite with level and verbosity.
-
- level:
- None --- do nothing, return None
- < 0 --- scan for tests of level=abs(level),
- don't run them, return TestSuite-list
- > 0 --- scan for tests of level, run them,
- return TestRunner
- > 10 --- run all tests (same as specifying all=True).
- (backward compatibility).
-
- verbosity:
- >= 0 --- show information messages
- > 1 --- show warnings on missing tests
-
- all:
- True --- run all test files (like self.testall())
- False (default) --- only run test files associated with a module
-
- sys_argv --- replacement of sys.argv[1:] during running
- tests.
-
- testcase_pattern --- run only tests that match given pattern.
-
- It is assumed (when all=False) that package tests suite follows
- the following convention: for each package module, there exists
- file <packagepath>/tests/test_<modulename>.py that defines
- TestCase classes (with names having prefix 'test_') with methods
- (with names having prefixes 'check_' or 'bench_'); each of these
- methods are called when running unit tests.
- """
- if level is None: # Do nothing.
- return
-
- if isinstance(self.package, str):
- exec 'import %s as this_package' % (self.package)
- else:
- this_package = self.package
-
- self.testcase_match = re.compile(testcase_pattern).match
-
- if all:
- all_tests = self._test_suite_from_all_tests(this_package,
- level, verbosity)
- else:
- all_tests = self._test_suite_from_modules(this_package,
- level, verbosity)
-
- if level < 0:
- return all_tests
-
- runner = unittest.TextTestRunner(verbosity=verbosity)
- old_sys_argv = sys.argv[1:]
- sys.argv[1:] = sys_argv
- # Use the builtin displayhook. If the tests are being run
- # under IPython (for instance), any doctest test suites will
- # fail otherwise.
- old_displayhook = sys.displayhook
- sys.displayhook = sys.__displayhook__
- try:
- r = runner.run(all_tests)
- finally:
- sys.displayhook = old_displayhook
- sys.argv[1:] = old_sys_argv
- return r
-
- def testall(self, level=1,verbosity=1):
- """ Run Numpy module test suite with level and verbosity.
-
- level:
- None --- do nothing, return None
- < 0 --- scan for tests of level=abs(level),
- don't run them, return TestSuite-list
- > 0 --- scan for tests of level, run them,
- return TestRunner
-
- verbosity:
- >= 0 --- show information messages
- > 1 --- show warnings on missing tests
-
- Different from .test(..) method, this method looks for
- TestCase classes from all files in <packagedir>/tests/
- directory and no assumptions are made for naming the
- TestCase classes or their methods.
- """
- return self.test(level=level, verbosity=verbosity, all=True)
-
- def run(self):
- """ Run Numpy module test suite with level and verbosity
- taken from sys.argv. Requires optparse module.
- """
-
- # delayed import of shlex to reduce startup time
- import shlex
-
- try:
- from optparse import OptionParser
- except ImportError:
- self.warn('Failed to import optparse module, ignoring.')
- return self.test()
- usage = r'usage: %prog [-v <verbosity>] [-l <level>]'\
- r' [-s "<replacement of sys.argv[1:]>"]'\
- r' [-t "<testcase pattern>"]'
- parser = OptionParser(usage)
- parser.add_option("-v", "--verbosity",
- action="store",
- dest="verbosity",
- default=1,
- type='int')
- parser.add_option("-l", "--level",
- action="store",
- dest="level",
- default=1,
- type='int')
- parser.add_option("-s", "--sys-argv",
- action="store",
- dest="sys_argv",
- default='',
- type='string')
- parser.add_option("-t", "--testcase-pattern",
- action="store",
- dest="testcase_pattern",
- default=r'.*',
- type='string')
- (options, args) = parser.parse_args()
- return self.test(options.level,options.verbosity,
- sys_argv=shlex.split(options.sys_argv or ''),
- testcase_pattern=options.testcase_pattern)
-
- def warn(self, message):
- from numpy.distutils.misc_util import yellow_text
- print>>sys.stderr,yellow_text('Warning: %s' % (message))
- sys.stderr.flush()
- def info(self, message):
- print>>sys.stdout, message
- sys.stdout.flush()
-
def importall(package):
"""
Try recursively to import all subpackages under package.
diff --git a/numpy/testing/parametric.py b/numpy/testing/parametric.py
deleted file mode 100644
index 27b9d23c6..000000000
--- a/numpy/testing/parametric.py
+++ /dev/null
@@ -1,311 +0,0 @@
-"""Support for parametric tests in unittest.
-
-:Author: Fernando Perez
-
-Purpose
-=======
-
-Briefly, the main class in this module allows you to easily and cleanly
-(without the gross name-mangling hacks that are normally needed) to write
-unittest TestCase classes that have parametrized tests. That is, tests which
-consist of multiple sub-tests that scan for example a parameter range, but
-where you want each sub-test to:
-
-* count as a separate test in the statistics.
-
-* be run even if others in the group error out or fail.
-
-
-The class offers a simple name-based convention to create such tests (see
-simple example at the end), in one of two ways:
-
-* Each sub-test in a group can be run fully independently, with the
- setUp/tearDown methods being called each time.
-
-* The whole group can be run with setUp/tearDown being called only once for the
- group. This lets you conveniently reuse state that may be very expensive to
- compute for multiple tests. Be careful not to corrupt it!!!
-
-
-Caveats
-=======
-
-This code relies on implementation details of the unittest module (some key
-methods are heavily modified versions of those, after copying them in). So it
-may well break either if you make sophisticated use of the unittest APIs, or if
-unittest itself changes in the future. I have only tested this with Python
-2.5.
-
-"""
-__docformat__ = "restructuredtext en"
-
-import unittest
-import warnings
-
-class _ParametricTestCase(unittest.TestCase):
- """TestCase subclass with support for parametric tests.
-
- Subclasses of this class can implement test methods that return a list of
- tests and arguments to call those with, to do parametric testing (often
- also called 'data driven' testing."""
-
- #: Prefix for tests with independent state. These methods will be run with
- #: a separate setUp/tearDown call for each test in the group.
- _indepParTestPrefix = 'testip'
-
- #: Prefix for tests with shared state. These methods will be run with
- #: a single setUp/tearDown call for the whole group. This is useful when
- #: writing a group of tests for which the setup is expensive and one wants
- #: to actually share that state. Use with care (especially be careful not
- #: to mutate the state you are using, which will alter later tests).
- _shareParTestPrefix = 'testsp'
-
- def __init__(self, methodName = 'runTest'):
- warnings.warn("ParametricTestCase will be removed in the next NumPy "
- "release", DeprecationWarning)
- unittest.TestCase.__init__(self, methodName)
-
- def exec_test(self,test,args,result):
- """Execute a single test. Returns a success boolean"""
-
- ok = False
- try:
- test(*args)
- ok = True
- except self.failureException:
- result.addFailure(self, self._exc_info())
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, self._exc_info())
-
- return ok
-
- def set_testMethodDoc(self,doc):
- self._testMethodDoc = doc
- self._TestCase__testMethodDoc = doc
-
- def get_testMethodDoc(self):
- return self._testMethodDoc
-
- testMethodDoc = property(fset=set_testMethodDoc, fget=get_testMethodDoc)
-
- def get_testMethodName(self):
- try:
- return getattr(self,"_testMethodName")
- except:
- return getattr(self,"_TestCase__testMethodName")
-
- testMethodName = property(fget=get_testMethodName)
-
- def run_test(self, testInfo,result):
- """Run one test with arguments"""
-
- test,args = testInfo[0],testInfo[1:]
-
- # Reset the doc attribute to be the docstring of this particular test,
- # so that in error messages it prints the actual test's docstring and
- # not that of the test factory.
- self.testMethodDoc = test.__doc__
- result.startTest(self)
- try:
- try:
- self.setUp()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, self._exc_info())
- return
-
- ok = self.exec_test(test,args,result)
-
- try:
- self.tearDown()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, self._exc_info())
- ok = False
- if ok: result.addSuccess(self)
- finally:
- result.stopTest(self)
-
- def run_tests(self, tests,result):
- """Run many tests with a common setUp/tearDown.
-
- The entire set of tests is run with a single setUp/tearDown call."""
-
- try:
- self.setUp()
- except KeyboardInterrupt:
- raise
- except:
- result.testsRun += 1
- result.addError(self, self._exc_info())
- return
-
- saved_doc = self.testMethodDoc
-
- try:
- # Run all the tests specified
- for testInfo in tests:
- test,args = testInfo[0],testInfo[1:]
-
- # Set the doc argument for this test. Note that even if we do
- # this, the fail/error tracebacks still print the docstring for
- # the parent factory, because they only generate the message at
- # the end of the run, AFTER we've restored it. There is no way
- # to tell the unittest system (without overriding a lot of
- # stuff) to extract this information right away, the logic is
- # hardcoded to pull it later, since unittest assumes it doesn't
- # change.
- self.testMethodDoc = test.__doc__
- result.startTest(self)
- ok = self.exec_test(test,args,result)
- if ok: result.addSuccess(self)
-
- finally:
- # Restore docstring info and run tearDown once only.
- self.testMethodDoc = saved_doc
- try:
- self.tearDown()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, self._exc_info())
-
- def run(self, result=None):
- """Test runner."""
-
- #print
- #print '*** run for method:',self._testMethodName # dbg
- #print '*** doc:',self._testMethodDoc # dbg
-
- if result is None: result = self.defaultTestResult()
-
- # Independent tests: each gets its own setup/teardown
- if self.testMethodName.startswith(self._indepParTestPrefix):
- for t in getattr(self,self.testMethodName)():
- self.run_test(t,result)
- # Shared-state test: single setup/teardown for all
- elif self.testMethodName.startswith(self._shareParTestPrefix):
- tests = getattr(self,self.testMethodName,'runTest')()
- self.run_tests(tests,result)
- # Normal unittest Test methods
- else:
- unittest.TestCase.run(self,result)
-
-# The underscore was added to the class name to keep nose from trying
-# to run the test class (nose ignores class names that begin with an
-# underscore by default).
-ParametricTestCase = _ParametricTestCase
-
-#############################################################################
-# Quick and dirty interactive example/test
-if __name__ == '__main__':
-
- class ExampleTestCase(ParametricTestCase):
-
- #-------------------------------------------------------------------
- # An instrumented setUp method so we can see when it gets called and
- # how many times per instance
- counter = 0
-
- def setUp(self):
- self.counter += 1
- print 'setUp count: %2s for: %s' % (self.counter,
- self.testMethodDoc)
-
- #-------------------------------------------------------------------
- # A standard test method, just like in the unittest docs.
- def test_foo(self):
- """Normal test for feature foo."""
- pass
-
- #-------------------------------------------------------------------
- # Testing methods that need parameters. These can NOT be named test*,
- # since they would be picked up by unittest and called without
- # arguments. Instead, call them anything else (I use tst*) and then
- # load them via the factories below.
- def tstX(self,i):
- "Test feature X with parameters."
- print 'tstX, i=',i
- if i==1 or i==3:
- # Test fails
- self.fail('i is bad, bad: %s' % i)
-
- def tstY(self,i):
- "Test feature Y with parameters."
- print 'tstY, i=',i
- if i==1:
- # Force an error
- 1/0
-
- def tstXX(self,i,j):
- "Test feature XX with parameters."
- print 'tstXX, i=',i,'j=',j
- if i==1:
- # Test fails
- self.fail('i is bad, bad: %s' % i)
-
- def tstYY(self,i):
- "Test feature YY with parameters."
- print 'tstYY, i=',i
- if i==2:
- # Force an error
- 1/0
-
- def tstZZ(self):
- """Test feature ZZ without parameters, needs multiple runs.
-
- This could be a random test that you want to run multiple times."""
- pass
-
- #-------------------------------------------------------------------
- # Parametric test factories that create the test groups to call the
- # above tst* methods with their required arguments.
- def testip(self):
- """Independent parametric test factory.
-
- A separate setUp() call is made for each test returned by this
- method.
-
- You must return an iterable (list or generator is fine) containing
- tuples with the actual method to be called as the first argument,
- and the arguments for that call later."""
- return [(self.tstX,i) for i in range(5)]
-
- def testip2(self):
- """Another independent parametric test factory"""
- return [(self.tstY,i) for i in range(5)]
-
- def testip3(self):
- """Test factory combining different subtests.
-
- This one shows how to assemble calls to different tests."""
- return [(self.tstX,3),(self.tstX,9),(self.tstXX,4,10),
- (self.tstZZ,),(self.tstZZ,)]
-
- def testsp(self):
- """Shared parametric test factory
-
- A single setUp() call is made for all the tests returned by this
- method.
- """
- return [(self.tstXX,i,i+1) for i in range(5)]
-
- def testsp2(self):
- """Another shared parametric test factory"""
- return [(self.tstYY,i) for i in range(5)]
-
- def testsp3(self):
- """Another shared parametric test factory.
-
- This one simply calls the same test multiple times, without any
- arguments. Note that you must still return tuples, even if there
- are no arguments."""
- return [(self.tstZZ,) for i in range(10)]
-
-
- # This test class runs normally under unittest's default runner
- unittest.main()