From 8d6ec65c925ebef5e0567708de1d16df39077c9d Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 9 Feb 2017 18:51:08 +0000 Subject: MAINT: Be specific about where AxisError is raised These were tested by temporarily removing the base classes from AxisError --- numpy/lib/tests/test_function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index f69c24d59..d914260ad 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -466,8 +466,8 @@ class TestInsert(TestCase): insert(a, 1, a[:, 2,:], axis=1)) # invalid axis value - assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=3) - assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=-4) + assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=3) + assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=-4) # negative axis value a = np.arange(24).reshape((2, 3, 4)) -- cgit v1.2.1 From 9520de90837d0afaac3d1612047f4b952563b3d5 Mon Sep 17 00:00:00 2001 From: Alessandro Pietro Bardelli Date: Wed, 4 Jan 2017 00:58:44 +0100 Subject: ENH: gradient support for unevenly spaced data This somehow reverts #7618 and solves #6847, #7548 by implementing support for unevenly spaced data. Now the behaviour is similar to that of Matlab/Octave function. As argument it can take: 1. A single scalar to specify a sample distance for all dimensions. 2. N scalars to specify a constant sample distance for each dimension. i.e. `dx`, `dy`, `dz`, ... 3. N arrays to specify the coordinates of the values along each dimension of F. The length of the array must match the size of the corresponding dimension 4. Any combination of N scalars/arrays with the meaning of 2. and 3. --- numpy/lib/tests/test_function_base.py | 205 +++++++++++++++++++++++++++------- 1 file changed, 163 insertions(+), 42 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index d914260ad..4fb0dba51 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -96,7 +96,7 @@ class TestRot90(TestCase): for k in range(1,5): assert_equal(rot90(a, k=k, axes=(2, 0)), - rot90(a_rot90_20, k=k-1, axes=(2, 0))) + rot90(a_rot90_20, k=k-1, axes=(2, 0))) class TestFlip(TestCase): @@ -168,7 +168,8 @@ class TestFlip(TestCase): def test_4d(self): a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5) for i in range(a.ndim): - assert_equal(np.flip(a, i), np.flipud(a.swapaxes(0, i)).swapaxes(i, 0)) + assert_equal(np.flip(a, i), + np.flipud(a.swapaxes(0, i)).swapaxes(i, 0)) class TestAny(TestCase): @@ -219,7 +220,7 @@ class TestCopy(TestCase): def test_order(self): # It turns out that people rely on np.copy() preserving order by # default; changing this broke scikit-learn: - # https://github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 + # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa a = np.array([[1, 2], [3, 4]]) assert_(a.flags.c_contiguous) assert_(not a.flags.f_contiguous) @@ -555,7 +556,8 @@ class TestCumsum(TestCase): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32, - np.uint32, np.float32, np.float64, np.complex64, np.complex128]: + np.uint32, np.float32, np.float64, np.complex64, + np.complex128]: a = np.array(ba, ctype) a2 = np.array(ba2, ctype) @@ -726,14 +728,51 @@ class TestGradient(TestCase): assert_array_equal(gradient(x), dx) assert_array_equal(gradient(v), dx) + def test_args(self): + dx = np.cumsum(np.ones(5)) + dx_uneven = [1., 2., 5., 9., 11.] + f_2d = np.arange(25).reshape(5, 5) + + # distances must be scalars or have size equal to gradient[axis] + gradient(np.arange(5), 3.) + gradient(np.arange(5), dx) + gradient(f_2d, 1.5) # dy is set equal to dx because scalar + + gradient(f_2d, dx_uneven, dx_uneven) + # mix between even and uneven spaces and + # mix between scalar and vector + gradient(f_2d, dx, 2) + + # 2D but axis specified + gradient(f_2d, dx, axis=1) + def test_badargs(self): - # for 2D array, gradient can take 0, 1, or 2 extra args - x = np.array([[1, 1], [3, 4]]) - assert_raises(SyntaxError, gradient, x, np.array([1., 1.]), - np.array([1., 1.]), np.array([1., 1.])) + f_2d = np.arange(25).reshape(5, 5) + x = np.cumsum(np.ones(5)) + + # wrong sizes + assert_raises(ValueError, gradient, f_2d, x, np.ones(2)) + assert_raises(ValueError, gradient, f_2d, 1, np.ones(2)) + assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2)) + # wrong number of arguments + assert_raises(TypeError, gradient, f_2d, x) + assert_raises(TypeError, gradient, f_2d, x, axis=(0,1)) + assert_raises(TypeError, gradient, f_2d, x, x, x) + assert_raises(TypeError, gradient, f_2d, 1, 1, 1) + assert_raises(TypeError, gradient, f_2d, x, x, axis=1) + assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1) - # disallow arrays as distances, see gh-6847 - assert_raises(ValueError, gradient, np.arange(5), np.ones(5)) + def test_datetime64(self): + # Make sure gradient() can handle special types like datetime64 + x = np.array( + ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12', + '1910-10-12', '1910-12-12', '1912-12-12'], + dtype='datetime64[D]') + dx = np.array( + [-5, -3, 0, 31, 61, 396, 731], + dtype='timedelta64[D]') + assert_array_equal(gradient(x), dx) + assert_(dx.dtype == np.dtype('timedelta64[D]')) def test_masked(self): # Make sure that gradient supports subclasses like masked arrays @@ -750,29 +789,6 @@ class TestGradient(TestCase): np.gradient(x2, edge_order=2) assert_array_equal(x2.mask, [False, False, True, False, False]) - def test_datetime64(self): - # Make sure gradient() can handle special types like datetime64 - x = np.array( - ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12', - '1910-10-12', '1910-12-12', '1912-12-12'], - dtype='datetime64[D]') - dx = np.array( - [-5, -3, 0, 31, 61, 396, 731], - dtype='timedelta64[D]') - assert_array_equal(gradient(x), dx) - assert_(dx.dtype == np.dtype('timedelta64[D]')) - - def test_timedelta64(self): - # Make sure gradient() can handle special types like timedelta64 - x = np.array( - [-5, -3, 10, 12, 61, 321, 300], - dtype='timedelta64[D]') - dx = np.array( - [2, 7, 7, 25, 154, 119, -21], - dtype='timedelta64[D]') - assert_array_equal(gradient(x), dx) - assert_(dx.dtype == np.dtype('timedelta64[D]')) - def test_second_order_accurate(self): # Testing that the relative numerical error is less that 3% for # this example problem. This corresponds to second order @@ -785,6 +801,78 @@ class TestGradient(TestCase): num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1) assert_(np.all(num_error < 0.03) == True) + # test with unevenly spaced + np.random.seed(0) + x = np.sort(np.random.random(10)) + y = 2 * x ** 3 + 4 * x ** 2 + 2 * x + analytical = 6 * x ** 2 + 8 * x + 2 + num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1) + assert_(np.all(num_error < 0.03) == True) + + def test_spacing(self): + f = np.array([0, 2., 3., 4., 5., 5.]) + f = np.tile(f, (6,1)) + f.reshape(-1, 1) + x_uneven = np.array([0., 0.5, 1., 3., 5., 7.]) + x_even = np.arange(6.) + + fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1)) + fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1)) + fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1)) + fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1)) + + # evenly spaced + for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]: + res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order) + res2 = gradient(f, x_even, x_even, + axis=(0,1), edge_order=edge_order) + res3 = gradient(f, x_even, x_even, + axis=None, edge_order=edge_order) + assert_array_equal(res1, res2) + assert_array_equal(res2, res3) + assert_almost_equal(res1[0], exp_res.T) + assert_almost_equal(res1[1], exp_res) + + res1 = gradient(f, 1., axis=0, edge_order=edge_order) + res2 = gradient(f, x_even, axis=0, edge_order=edge_order) + assert_(res1.shape == res2.shape) + assert_almost_equal(res2, exp_res.T) + + res1 = gradient(f, 1., axis=1, edge_order=edge_order) + res2 = gradient(f, x_even, axis=1, edge_order=edge_order) + assert_(res1.shape == res2.shape) + assert_array_equal(res2, exp_res) + + # unevenly spaced + for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]: + res1 = gradient(f, x_uneven, x_uneven, + axis=(0,1), edge_order=edge_order) + res2 = gradient(f, x_uneven, x_uneven, + axis=None, edge_order=edge_order) + assert_array_equal(res1, res2) + assert_almost_equal(res1[0], exp_res.T) + assert_almost_equal(res1[1], exp_res) + + res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order) + assert_almost_equal(res1, exp_res.T) + + res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order) + assert_almost_equal(res1, exp_res) + + # mixed + res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1) + res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1) + assert_array_equal(res1[0], res2[1]) + assert_array_equal(res1[1], res2[0]) + assert_almost_equal(res1[0], fdx_even_ord1.T) + assert_almost_equal(res1[1], fdx_uneven_ord1) + + res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2) + res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2) + assert_array_equal(res1[0], res2[1]) + assert_array_equal(res1[1], res2[0]) + assert_almost_equal(res1[0], fdx_even_ord2.T) + assert_almost_equal(res1[1], fdx_uneven_ord2) + def test_specific_axes(self): # Testing that gradient can work on a given axis only v = [[1, 1], [3, 4]] @@ -802,13 +890,37 @@ class TestGradient(TestCase): assert_almost_equal(gradient(x, axis=None), gradient(x)) # test vararg order - assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1]/2.0, dx[0]/3.0]) + assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), + [dx[1]/2.0, dx[0]/3.0]) # test maximal number of varargs - assert_raises(SyntaxError, gradient, x, 1, 2, axis=1) + assert_raises(TypeError, gradient, x, 1, 2, axis=1) assert_raises(ValueError, gradient, x, axis=3) assert_raises(ValueError, gradient, x, axis=-3) assert_raises(TypeError, gradient, x, axis=[1,]) + + def test_timedelta64(self): + # Make sure gradient() can handle special types like timedelta64 + x = np.array( + [-5, -3, 10, 12, 61, 321, 300], + dtype='timedelta64[D]') + dx = np.array( + [2, 7, 7, 25, 154, 119, -21], + dtype='timedelta64[D]') + assert_array_equal(gradient(x), dx) + assert_(dx.dtype == np.dtype('timedelta64[D]')) + + def test_values(self): + # needs at least 2 points for edge_order ==1 + gradient(np.arange(2), edge_order=1) + # needs at least 3 points for edge_order ==1 + gradient(np.arange(3), edge_order=2) + + assert_raises(ValueError, gradient, np.arange(0), edge_order=1) + assert_raises(ValueError, gradient, np.arange(0), edge_order=2) + assert_raises(ValueError, gradient, np.arange(1), edge_order=1) + assert_raises(ValueError, gradient, np.arange(1), edge_order=2) + assert_raises(ValueError, gradient, np.arange(2), edge_order=2) class TestAngle(TestCase): @@ -1753,20 +1865,28 @@ class TestHistogramOptimBinNums(TestCase): completely ignored. All test values have been precomputed and the shouldn't change. """ - # some basic sanity checking, with some fixed data. Checking for the correct number of bins - basic_test = {50: {'fd': 8, 'scott': 8, 'rice': 15, 'sturges': 14, 'auto': 14}, - 500: {'fd': 15, 'scott': 16, 'rice': 32, 'sturges': 20, 'auto': 20}, - 5000: {'fd': 33, 'scott': 33, 'rice': 69, 'sturges': 27, 'auto': 33}} + # some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = { + 50: {'fd': 8, 'scott': 8, 'rice': 15, + 'sturges': 14, 'auto': 14}, + 500: {'fd': 15, 'scott': 16, 'rice': 32, + 'sturges': 20, 'auto': 20}, + 5000: {'fd': 33, 'scott': 33, 'rice': 69, + 'sturges': 27, 'auto': 33} + } for testlen, expectedResults in basic_test.items(): - # create some sort of non uniform data to test with (3 peak uniform mixture) + # create some sort of non uniform data to test with + # (3 peak uniform mixture) x1 = np.linspace(-10, -1, testlen // 5 * 2) x2 = np.linspace(1, 10, testlen // 5 * 3) x3 = np.linspace(-100, -50, testlen) x = np.hstack((x1, x2, x3)) for estimator, numbins in expectedResults.items(): a, b = np.histogram(x, estimator, range = (-20, 20)) - msg = "For the {0} estimator with datasize of {1}".format(estimator, testlen) + msg = "For the {0} estimator".format(estimator) + msg += " with datasize of {0}".format(testlen) assert_equal(len(a), numbins, err_msg=msg) def test_simple_weighted(self): @@ -1775,7 +1895,8 @@ class TestHistogramOptimBinNums(TestCase): """ estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] for estimator in estimator_list: - assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3]) + assert_raises(TypeError, histogram, [1, 2, 3], + estimator, weights=[1, 2, 3]) class TestHistogramdd(TestCase): -- cgit v1.2.1 From 1588ae39ffb51ea916f03510671aab711fdfb568 Mon Sep 17 00:00:00 2001 From: Duke Vijitbenjaronk Date: Tue, 7 Mar 2017 11:11:53 -0600 Subject: BUG: Fix np.average with object array weights Fixes #8696 --- numpy/lib/tests/test_function_base.py | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 4fb0dba51..188c1c2ea 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3,6 +3,7 @@ from __future__ import division, absolute_import, print_function import operator import warnings import sys +import decimal import numpy as np from numpy.testing import ( @@ -341,6 +342,12 @@ class TestAverage(TestCase): w = np.array([[1,2],[3,4]], dtype=wt) assert_equal(np.average(a, weights=w).dtype, np.dtype(rt)) + def test_object_dtype(self): + a = np.array([decimal.Decimal(x) for x in range(10)]) + w = np.array([decimal.Decimal(1) for _ in range(10)]) + w /= w.sum() + assert_almost_equal(a.mean(0), average(a, weights=w)) + class TestSelect(TestCase): choices = [np.array([1, 2, 3]), np.array([4, 5, 6]), -- cgit v1.2.1 From ec6d4295a80e5df235d3f5445e6425581309c930 Mon Sep 17 00:00:00 2001 From: Antony Lee Date: Mon, 5 Dec 2016 18:20:40 -0800 Subject: ENH: Allow bincount(..., minlength=0). --- numpy/lib/tests/test_function_base.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 5c2446e50..3f17a3c32 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2369,11 +2369,16 @@ class TestBincount(TestCase): x = np.array([0, 1, 0, 1, 1]) y = np.bincount(x, minlength=3) assert_array_equal(y, np.array([2, 3, 0])) + x = [] + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([])) def test_with_minlength_smaller_than_maxvalue(self): x = np.array([0, 1, 1, 2, 2, 3, 3]) y = np.bincount(x, minlength=2) assert_array_equal(y, np.array([1, 2, 2, 2])) + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([1, 2, 2, 2])) def test_with_minlength_and_weights(self): x = np.array([1, 2, 4, 5, 2]) @@ -2397,22 +2402,16 @@ class TestBincount(TestCase): "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, - "must be positive", + "must be non-negative", lambda: np.bincount(x, minlength=-1)) - assert_raises_regex(ValueError, - "must be positive", - lambda: np.bincount(x, minlength=0)) x = np.arange(5) assert_raises_regex(TypeError, "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, - "minlength must be positive", + "minlength must be non-negative", lambda: np.bincount(x, minlength=-1)) - assert_raises_regex(ValueError, - "minlength must be positive", - lambda: np.bincount(x, minlength=0)) @dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount") def test_dtype_reference_leaks(self): -- cgit v1.2.1 From efa1bd2c0de6cd9c07b410002f2e1c0bae8cf385 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 26 Mar 2017 10:38:28 +0100 Subject: MAINT: Reuse _validate_axis in np.gradient This also means that its axis argument invokes operator.index like others do. _validate_axis currently accepts lists of axes, which is a bug. --- numpy/lib/tests/test_function_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 708b20482..6c6ed5941 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -902,9 +902,9 @@ class TestGradient(TestCase): # test maximal number of varargs assert_raises(TypeError, gradient, x, 1, 2, axis=1) - assert_raises(ValueError, gradient, x, axis=3) - assert_raises(ValueError, gradient, x, axis=-3) - assert_raises(TypeError, gradient, x, axis=[1,]) + assert_raises(np.AxisError, gradient, x, axis=3) + assert_raises(np.AxisError, gradient, x, axis=-3) + # assert_raises(TypeError, gradient, x, axis=[1,]) def test_timedelta64(self): # Make sure gradient() can handle special types like timedelta64 -- cgit v1.2.1 From e3ed705e5d91b584e9191a20f3a4780d354271ff Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 26 Mar 2017 11:22:43 +0100 Subject: MAINT: Use _validate_axis inside _ureduce This fixes an omission where duplicate axes would only be detected when positive --- numpy/lib/tests/test_function_base.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 6c6ed5941..7c07606f6 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2973,11 +2973,14 @@ class TestPercentile(TestCase): def test_extended_axis_invalid(self): d = np.ones((3, 5, 7, 11)) - assert_raises(IndexError, np.percentile, d, axis=-5, q=25) - assert_raises(IndexError, np.percentile, d, axis=(0, -5), q=25) - assert_raises(IndexError, np.percentile, d, axis=4, q=25) - assert_raises(IndexError, np.percentile, d, axis=(0, 4), q=25) + assert_raises(np.AxisError, np.percentile, d, axis=-5, q=25) + assert_raises(np.AxisError, np.percentile, d, axis=(0, -5), q=25) + assert_raises(np.AxisError, np.percentile, d, axis=4, q=25) + assert_raises(np.AxisError, np.percentile, d, axis=(0, 4), q=25) + # each of these refers to the same axis twice assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25) def test_keepdims(self): d = np.ones((3, 5, 7, 11)) @@ -3349,10 +3352,10 @@ class TestMedian(TestCase): def test_extended_axis_invalid(self): d = np.ones((3, 5, 7, 11)) - assert_raises(IndexError, np.median, d, axis=-5) - assert_raises(IndexError, np.median, d, axis=(0, -5)) - assert_raises(IndexError, np.median, d, axis=4) - assert_raises(IndexError, np.median, d, axis=(0, 4)) + assert_raises(np.AxisError, np.median, d, axis=-5) + assert_raises(np.AxisError, np.median, d, axis=(0, -5)) + assert_raises(np.AxisError, np.median, d, axis=4) + assert_raises(np.AxisError, np.median, d, axis=(0, 4)) assert_raises(ValueError, np.median, d, axis=(1, 1)) def test_keepdims(self): -- cgit v1.2.1 From ef5684564e3074daf614846f30bfdd7f15f5254f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Skytt=C3=A4?= Date: Tue, 9 May 2017 12:16:14 +0300 Subject: ENH: Spelling fixes --- numpy/lib/tests/test_function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 7c07606f6..4f21e261f 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2787,7 +2787,7 @@ class TestPercentile(TestCase): interpolation="higher").shape, (3, 3, 5, 6)) def test_scalar_q(self): - # test for no empty dimensions for compatiblity with old percentile + # test for no empty dimensions for compatibility with old percentile x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50), 5.5) self.assertTrue(np.isscalar(np.percentile(x, 50))) @@ -2808,7 +2808,7 @@ class TestPercentile(TestCase): assert_equal(np.percentile(x, 50, axis=1, out=out), r1) assert_equal(out, r1) - # test for no empty dimensions for compatiblity with old percentile + # test for no empty dimensions for compatibility with old percentile x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50, interpolation='lower'), 5.) self.assertTrue(np.isscalar(np.percentile(x, 50))) -- cgit v1.2.1 From 1608e53072b035bd40de7a202e75354f0e802120 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sat, 3 Jun 2017 13:41:26 +0100 Subject: BUG: KeyboardInterrupt is swallowed all over the place Bare except is very rarely the right thing --- numpy/lib/tests/test_function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 4f21e261f..7479e30b3 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1067,7 +1067,7 @@ class TestVectorize(TestCase): import random try: vectorize(random.randrange) # Should succeed - except: + except Exception: raise AssertionError() def test_keywords2_ticket_2100(self): -- cgit v1.2.1 From 0bf437fb0ae42fefdb0040692b993f17dedb2e3d Mon Sep 17 00:00:00 2001 From: Egor Panfilov Date: Sat, 17 Jun 2017 15:11:06 +0300 Subject: BUG: Switched to xor for bool arrays in diff, added corresponding tests --- numpy/lib/tests/test_function_base.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 7479e30b3..4000b55f5 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -638,6 +638,16 @@ class TestDiff(TestCase): assert_array_equal(diff(x, n=2), out2) assert_array_equal(diff(x, n=3), out3) + x = [1.1, 2.2, 3.0, -0.2, -0.1] + out = np.array([1.1, 0.8, -3.2, 0.1]) + assert_almost_equal(diff(x), out) + + x = [True, True, False, False] + out = np.array([False, True, False]) + out2 = np.array([True, True]) + assert_array_equal(diff(x), out) + assert_array_equal(diff(x, n=2), out2) + def test_nd(self): x = 20 * rand(10, 20, 30) out1 = x[:, :, 1:] - x[:, :, :-1] @@ -927,7 +937,7 @@ class TestGradient(TestCase): assert_raises(ValueError, gradient, np.arange(0), edge_order=2) assert_raises(ValueError, gradient, np.arange(1), edge_order=1) assert_raises(ValueError, gradient, np.arange(1), edge_order=2) - assert_raises(ValueError, gradient, np.arange(2), edge_order=2) + assert_raises(ValueError, gradient, np.arange(2), edge_order=2) class TestAngle(TestCase): -- cgit v1.2.1 From 23c1db87a939dc2f19cfa3744389e3be667215ba Mon Sep 17 00:00:00 2001 From: Brandon Carter Date: Sat, 24 Jun 2017 22:59:14 -0400 Subject: TST: add test for unsigned bins monotonicity check, see #9222 --- numpy/lib/tests/test_function_base.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 4000b55f5..6cd990634 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1771,6 +1771,14 @@ class TestHistogram(TestCase): hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) self.assertEqual(hist[-1], 1) + def test_unsigned_monotonicity_check(self): + # Ensures ValueError is raised if bins not increasing monotonically + # when bins contain unsigned values (see #9222) + arr = np.array([2]) + bins = np.array([1, 3, 1], dtype='uint64') + with assert_raises(ValueError): + hist, edges = np.histogram(arr, bins=bins) + class TestHistogramOptimBinNums(TestCase): """ -- cgit v1.2.1 From ae84af3b6e6d96e4be408e8a56408290ee1879db Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 4 Jul 2017 13:47:45 -0600 Subject: MAINT: Rearrange files in numpy/testing module. The aim here is to separate out the nose dependent files prior to adding pytest support. This could be done by adding new files to the general numpy/testing directory, but I felt that it was to have the relevant files separated out as it makes it easier to completely remove nose dependencies when needed. Many places were accessing submodules in numpy/testing directly, and in some cases incorrectly. That presented a backwards compatibility problem. The solution adapted here is to have "dummy" files whose contents will depend on whether of not pytest is active. That way the module looks the same as before from the outside. In the case of numpy itself, direct accesses have been fixed. Having proper `__all__` lists in the submodules helped in that. --- numpy/lib/tests/test_function_base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 4000b55f5..f6d4b6111 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -10,9 +10,8 @@ from numpy.testing import ( run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, assert_allclose, assert_array_max_ulp, assert_warns, - assert_raises_regex, dec, suppress_warnings + assert_raises_regex, dec, suppress_warnings, HAS_REFCOUNT, ) -from numpy.testing.utils import HAS_REFCOUNT import numpy.lib.function_base as nfb from numpy.random import rand from numpy.lib import ( -- cgit v1.2.1 From d86e3fee3c9906ce0fad36520de86d9ac306cee8 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 13 Jul 2017 12:40:05 +0100 Subject: BUG: Allow 0d arrays instead of scalars in gradient This fixes gh-8292 --- numpy/lib/tests/test_function_base.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 4f21e261f..ee2cc166a 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -742,8 +742,11 @@ class TestGradient(TestCase): # distances must be scalars or have size equal to gradient[axis] gradient(np.arange(5), 3.) + gradient(np.arange(5), np.array(3.)) gradient(np.arange(5), dx) - gradient(f_2d, 1.5) # dy is set equal to dx because scalar + # dy is set equal to dx because scalar + gradient(f_2d, 1.5) + gradient(f_2d, np.array(1.5)) gradient(f_2d, dx_uneven, dx_uneven) # mix between even and uneven spaces and -- cgit v1.2.1 From 2bfec2c0dd92958694e6d736530ae6333d7d62d7 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 13 Jul 2017 12:40:32 +0100 Subject: BUG: Only allow 1d distance arrays 2d arrays would work, but in unpredictable and undocumented ways. This at least makes gh-9401 give a better error message. --- numpy/lib/tests/test_function_base.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index ee2cc166a..d7d00758e 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -756,6 +756,10 @@ class TestGradient(TestCase): # 2D but axis specified gradient(f_2d, dx, axis=1) + # 2d coordinate arguments are not yet allowed + assert_raises_regex(ValueError, '.*scalars or 1d', + gradient, f_2d, np.stack([dx]*2, axis=-1), 1) + def test_badargs(self): f_2d = np.arange(25).reshape(5, 5) x = np.cumsum(np.ones(5)) -- cgit v1.2.1 From 575a48e0da892d447a4d884f024df82c567e6f3f Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 13 Jul 2017 15:53:34 +0100 Subject: BUG: float16 is promoted to float64 by gradient This isn't the case for `diff` --- numpy/lib/tests/test_function_base.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index f6d4b6111..2f766471f 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -926,6 +926,12 @@ class TestGradient(TestCase): assert_array_equal(gradient(x), dx) assert_(dx.dtype == np.dtype('timedelta64[D]')) + def test_inexact_dtypes(self): + for dt in [np.float16, np.float32, np.float64]: + # dtypes should not be promoted in a different way to what diff does + x = np.array([1, 2, 3], dtype=dt) + assert_equal(gradient(x).dtype, np.diff(x).dtype) + def test_values(self): # needs at least 2 points for edge_order ==1 gradient(np.arange(2), edge_order=1) -- cgit v1.2.1 From 8eee1b06b330f6ff23fd3604444ba20a8d2d6416 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 17 Jul 2017 17:51:33 -0600 Subject: TST: Remove unittest dependencies in numpy/lib/tests. --- numpy/lib/tests/test_function_base.py | 174 +++++++++++++++++----------------- 1 file changed, 87 insertions(+), 87 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index f6d4b6111..e26ea9440 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -7,10 +7,10 @@ import decimal import numpy as np from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, + run_module_suite, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, - assert_allclose, assert_array_max_ulp, assert_warns, - assert_raises_regex, dec, suppress_warnings, HAS_REFCOUNT, + assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex, + dec, suppress_warnings, HAS_REFCOUNT, ) import numpy.lib.function_base as nfb from numpy.random import rand @@ -31,9 +31,9 @@ def get_mat(n): return data -class TestRot90(TestCase): +class TestRot90(object): def test_basic(self): - self.assertRaises(ValueError, rot90, np.ones(4)) + assert_raises(ValueError, rot90, np.ones(4)) assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2)) assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2)) assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1)) @@ -99,12 +99,12 @@ class TestRot90(TestCase): rot90(a_rot90_20, k=k-1, axes=(2, 0))) -class TestFlip(TestCase): +class TestFlip(object): def test_axes(self): - self.assertRaises(ValueError, np.flip, np.ones(4), axis=1) - self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=2) - self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=-3) + assert_raises(ValueError, np.flip, np.ones(4), axis=1) + assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=2) + assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=-3) def test_basic_lr(self): a = get_mat(4) @@ -172,7 +172,7 @@ class TestFlip(TestCase): np.flipud(a.swapaxes(0, i)).swapaxes(i, 0)) -class TestAny(TestCase): +class TestAny(object): def test_basic(self): y1 = [0, 0, 1, 0] @@ -189,7 +189,7 @@ class TestAny(TestCase): assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1]) -class TestAll(TestCase): +class TestAll(object): def test_basic(self): y1 = [0, 1, 1, 0] @@ -207,7 +207,7 @@ class TestAll(TestCase): assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1]) -class TestCopy(TestCase): +class TestCopy(object): def test_basic(self): a = np.array([[1, 2], [3, 4]]) @@ -235,7 +235,7 @@ class TestCopy(TestCase): assert_(a_fort_copy.flags.f_contiguous) -class TestAverage(TestCase): +class TestAverage(object): def test_basic(self): y1 = np.array([1, 2, 3]) @@ -345,9 +345,9 @@ class TestAverage(TestCase): a = np.array([decimal.Decimal(x) for x in range(10)]) w = np.array([decimal.Decimal(1) for _ in range(10)]) w /= w.sum() - assert_almost_equal(a.mean(0), average(a, weights=w)) + assert_almost_equal(a.mean(0), average(a, weights=w)) -class TestSelect(TestCase): +class TestSelect(object): choices = [np.array([1, 2, 3]), np.array([4, 5, 6]), np.array([7, 8, 9])] @@ -419,7 +419,7 @@ class TestSelect(TestCase): select(conditions, choices) -class TestInsert(TestCase): +class TestInsert(object): def test_basic(self): a = [1, 2, 3] @@ -520,7 +520,7 @@ class TestInsert(TestCase): assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype)) -class TestAmax(TestCase): +class TestAmax(object): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] @@ -532,7 +532,7 @@ class TestAmax(TestCase): assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0]) -class TestAmin(TestCase): +class TestAmin(object): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] @@ -544,7 +544,7 @@ class TestAmin(TestCase): assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0]) -class TestPtp(TestCase): +class TestPtp(object): def test_basic(self): a = np.array([3, 4, 5, 10, -3, -5, 6.0]) @@ -556,7 +556,7 @@ class TestPtp(TestCase): assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0]) -class TestCumsum(TestCase): +class TestCumsum(object): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] @@ -579,7 +579,7 @@ class TestCumsum(TestCase): assert_array_equal(np.cumsum(a2, axis=1), tgt) -class TestProd(TestCase): +class TestProd(object): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] @@ -589,8 +589,8 @@ class TestProd(TestCase): a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: - self.assertRaises(ArithmeticError, np.prod, a) - self.assertRaises(ArithmeticError, np.prod, a2, 1) + assert_raises(ArithmeticError, np.prod, a) + assert_raises(ArithmeticError, np.prod, a2, 1) else: assert_equal(a.prod(axis=0), 26400) assert_array_equal(a2.prod(axis=0), @@ -599,7 +599,7 @@ class TestProd(TestCase): np.array([24, 1890, 600], ctype)) -class TestCumprod(TestCase): +class TestCumprod(object): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] @@ -609,9 +609,9 @@ class TestCumprod(TestCase): a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: - self.assertRaises(ArithmeticError, np.cumprod, a) - self.assertRaises(ArithmeticError, np.cumprod, a2, 1) - self.assertRaises(ArithmeticError, np.cumprod, a) + assert_raises(ArithmeticError, np.cumprod, a) + assert_raises(ArithmeticError, np.cumprod, a2, 1) + assert_raises(ArithmeticError, np.cumprod, a) else: assert_array_equal(np.cumprod(a, axis=-1), np.array([1, 2, 20, 220, @@ -626,7 +626,7 @@ class TestCumprod(TestCase): [10, 30, 120, 600]], ctype)) -class TestDiff(TestCase): +class TestDiff(object): def test_basic(self): x = [1, 4, 6, 7, 12] @@ -659,9 +659,9 @@ class TestDiff(TestCase): assert_array_equal(diff(x, n=2, axis=0), out4) -class TestDelete(TestCase): +class TestDelete(object): - def setUp(self): + def setup(self): self.a = np.arange(5) self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) @@ -734,7 +734,7 @@ class TestDelete(TestCase): assert_equal(m.flags.f_contiguous, k.flags.f_contiguous) -class TestGradient(TestCase): +class TestGradient(object): def test_basic(self): v = [[1, 1], [3, 4]] @@ -744,7 +744,7 @@ class TestGradient(TestCase): assert_array_equal(gradient(x), dx) assert_array_equal(gradient(v), dx) - def test_args(self): + def test_args(self): dx = np.cumsum(np.ones(5)) dx_uneven = [1., 2., 5., 9., 11.] f_2d = np.arange(25).reshape(5, 5) @@ -827,15 +827,15 @@ class TestGradient(TestCase): def test_spacing(self): f = np.array([0, 2., 3., 4., 5., 5.]) - f = np.tile(f, (6,1)) + f.reshape(-1, 1) + f = np.tile(f, (6,1)) + f.reshape(-1, 1) x_uneven = np.array([0., 0.5, 1., 3., 5., 7.]) x_even = np.arange(6.) - + fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1)) fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1)) fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1)) fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1)) - + # evenly spaced for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]: res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order) @@ -845,19 +845,19 @@ class TestGradient(TestCase): axis=None, edge_order=edge_order) assert_array_equal(res1, res2) assert_array_equal(res2, res3) - assert_almost_equal(res1[0], exp_res.T) - assert_almost_equal(res1[1], exp_res) - + assert_almost_equal(res1[0], exp_res.T) + assert_almost_equal(res1[1], exp_res) + res1 = gradient(f, 1., axis=0, edge_order=edge_order) res2 = gradient(f, x_even, axis=0, edge_order=edge_order) assert_(res1.shape == res2.shape) assert_almost_equal(res2, exp_res.T) - + res1 = gradient(f, 1., axis=1, edge_order=edge_order) res2 = gradient(f, x_even, axis=1, edge_order=edge_order) assert_(res1.shape == res2.shape) assert_array_equal(res2, exp_res) - + # unevenly spaced for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]: res1 = gradient(f, x_uneven, x_uneven, @@ -867,13 +867,13 @@ class TestGradient(TestCase): assert_array_equal(res1, res2) assert_almost_equal(res1[0], exp_res.T) assert_almost_equal(res1[1], exp_res) - + res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order) assert_almost_equal(res1, exp_res.T) - + res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order) assert_almost_equal(res1, exp_res) - + # mixed res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1) res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1) @@ -881,14 +881,14 @@ class TestGradient(TestCase): assert_array_equal(res1[1], res2[0]) assert_almost_equal(res1[0], fdx_even_ord1.T) assert_almost_equal(res1[1], fdx_uneven_ord1) - + res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2) res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2) assert_array_equal(res1[0], res2[1]) assert_array_equal(res1[1], res2[0]) assert_almost_equal(res1[0], fdx_even_ord2.T) assert_almost_equal(res1[1], fdx_uneven_ord2) - + def test_specific_axes(self): # Testing that gradient can work on a given axis only v = [[1, 1], [3, 4]] @@ -914,7 +914,7 @@ class TestGradient(TestCase): assert_raises(np.AxisError, gradient, x, axis=3) assert_raises(np.AxisError, gradient, x, axis=-3) # assert_raises(TypeError, gradient, x, axis=[1,]) - + def test_timedelta64(self): # Make sure gradient() can handle special types like timedelta64 x = np.array( @@ -931,7 +931,7 @@ class TestGradient(TestCase): gradient(np.arange(2), edge_order=1) # needs at least 3 points for edge_order ==1 gradient(np.arange(3), edge_order=2) - + assert_raises(ValueError, gradient, np.arange(0), edge_order=1) assert_raises(ValueError, gradient, np.arange(0), edge_order=2) assert_raises(ValueError, gradient, np.arange(1), edge_order=1) @@ -939,7 +939,7 @@ class TestGradient(TestCase): assert_raises(ValueError, gradient, np.arange(2), edge_order=2) -class TestAngle(TestCase): +class TestAngle(object): def test_basic(self): x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2, @@ -955,7 +955,7 @@ class TestAngle(TestCase): assert_array_almost_equal(z, zo, 11) -class TestTrimZeros(TestCase): +class TestTrimZeros(object): """ Only testing for integer splits. @@ -978,7 +978,7 @@ class TestTrimZeros(TestCase): assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4])) -class TestExtins(TestCase): +class TestExtins(object): def test_basic(self): a = np.array([1, 3, 2, 1, 2, 3, 3]) @@ -1017,7 +1017,7 @@ class TestExtins(TestCase): assert_array_equal(a, ac) -class TestVectorize(TestCase): +class TestVectorize(object): def test_simple(self): def addsubtract(a, b): @@ -1349,7 +1349,7 @@ class TestVectorize(TestCase): f(x) -class TestDigitize(TestCase): +class TestDigitize(object): def test_forward(self): x = np.arange(-6, 5) @@ -1422,7 +1422,7 @@ class TestDigitize(TestCase): assert_(not isinstance(digitize(b, a, True), A)) -class TestUnwrap(TestCase): +class TestUnwrap(object): def test_simple(self): # check that unwrap removes jumps greather that 2*pi @@ -1431,7 +1431,7 @@ class TestUnwrap(TestCase): assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) -class TestFilterwindows(TestCase): +class TestFilterwindows(object): def test_hanning(self): # check symmetry @@ -1462,7 +1462,7 @@ class TestFilterwindows(TestCase): assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) -class TestTrapz(TestCase): +class TestTrapz(object): def test_simple(self): x = np.arange(-10, 10, .1) @@ -1534,7 +1534,7 @@ class TestTrapz(TestCase): assert_almost_equal(mr, r) -class TestSinc(TestCase): +class TestSinc(object): def test_simple(self): assert_(sinc(0) == 1) @@ -1551,12 +1551,12 @@ class TestSinc(TestCase): assert_array_equal(y1, y3) -class TestHistogram(TestCase): +class TestHistogram(object): - def setUp(self): + def setup(self): pass - def tearDown(self): + def teardown(self): pass def test_simple(self): @@ -1762,16 +1762,16 @@ class TestHistogram(TestCase): left_edges = edges[:-1][mask] right_edges = edges[1:][mask] for x, left, right in zip(arr, left_edges, right_edges): - self.assertGreaterEqual(x, left) - self.assertLess(x, right) + assert_(x >= left) + assert_(x < right) def test_last_bin_inclusive_range(self): arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) - self.assertEqual(hist[-1], 1) + assert_equal(hist[-1], 1) -class TestHistogramOptimBinNums(TestCase): +class TestHistogramOptimBinNums(object): """ Provide test coverage when using provided estimators for optimal number of bins @@ -1881,7 +1881,7 @@ class TestHistogramOptimBinNums(TestCase): completely ignored. All test values have been precomputed and the shouldn't change. """ - # some basic sanity checking, with some fixed data. + # some basic sanity checking, with some fixed data. # Checking for the correct number of bins basic_test = { 50: {'fd': 8, 'scott': 8, 'rice': 15, @@ -1893,7 +1893,7 @@ class TestHistogramOptimBinNums(TestCase): } for testlen, expectedResults in basic_test.items(): - # create some sort of non uniform data to test with + # create some sort of non uniform data to test with # (3 peak uniform mixture) x1 = np.linspace(-10, -1, testlen // 5 * 2) x2 = np.linspace(1, 10, testlen // 5 * 3) @@ -1911,11 +1911,11 @@ class TestHistogramOptimBinNums(TestCase): """ estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] for estimator in estimator_list: - assert_raises(TypeError, histogram, [1, 2, 3], + assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3]) -class TestHistogramdd(TestCase): +class TestHistogramdd(object): def test_simple(self): x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], @@ -2055,7 +2055,7 @@ class TestHistogramdd(TestCase): range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) -class TestUnique(TestCase): +class TestUnique(object): def test_simple(self): x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) @@ -2067,7 +2067,7 @@ class TestUnique(TestCase): assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) -class TestCheckFinite(TestCase): +class TestCheckFinite(object): def test_simple(self): a = [1, 2, 3] @@ -2084,7 +2084,7 @@ class TestCheckFinite(TestCase): assert_(a.dtype == np.float64) -class TestCorrCoef(TestCase): +class TestCorrCoef(object): A = np.array( [[0.15391142, 0.18045767, 0.14197213], [0.70461506, 0.96474128, 0.27906989], @@ -2169,7 +2169,7 @@ class TestCorrCoef(TestCase): assert_(np.all(np.abs(c) <= 1.0)) -class TestCov(TestCase): +class TestCov(object): x1 = np.array([[0, 2], [1, 1], [2, 0]]).T res1 = np.array([[1., -1.], [-1., 1.]]) x2 = np.array([0.0, 1.0, 2.0], ndmin=2) @@ -2267,7 +2267,7 @@ class TestCov(TestCase): self.res1) -class Test_I0(TestCase): +class Test_I0(object): def test_simple(self): assert_almost_equal( @@ -2293,7 +2293,7 @@ class Test_I0(TestCase): [1.05884290, 1.06432317]])) -class TestKaiser(TestCase): +class TestKaiser(object): def test_simple(self): assert_(np.isfinite(kaiser(1, 1.0))) @@ -2312,7 +2312,7 @@ class TestKaiser(TestCase): kaiser(3, 4) -class TestMsort(TestCase): +class TestMsort(object): def test_simple(self): A = np.array([[0.44567325, 0.79115165, 0.54900530], @@ -2325,7 +2325,7 @@ class TestMsort(TestCase): [0.64864341, 0.79115165, 0.96098397]])) -class TestMeshgrid(TestCase): +class TestMeshgrid(object): def test_simple(self): [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) @@ -2414,7 +2414,7 @@ class TestMeshgrid(TestCase): assert_equal(x[1, :], X) -class TestPiecewise(TestCase): +class TestPiecewise(object): def test_simple(self): # Condition is single bool list @@ -2490,7 +2490,7 @@ class TestPiecewise(TestCase): [3., 3., 1.]])) -class TestBincount(TestCase): +class TestBincount(object): def test_simple(self): y = np.bincount(np.arange(4)) @@ -2577,7 +2577,7 @@ class TestBincount(TestCase): assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) -class TestInterp(TestCase): +class TestInterp(object): def test_exceptions(self): assert_raises(ValueError, interp, 0, [], []) @@ -2695,7 +2695,7 @@ def compare_results(res, desired): assert_array_equal(res[i], desired[i]) -class TestPercentile(TestCase): +class TestPercentile(object): def test_basic(self): x = np.arange(8) * 0.5 @@ -2799,7 +2799,7 @@ class TestPercentile(TestCase): # test for no empty dimensions for compatibility with old percentile x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50), 5.5) - self.assertTrue(np.isscalar(np.percentile(x, 50))) + assert_(np.isscalar(np.percentile(x, 50))) r0 = np.array([4., 5., 6., 7.]) assert_equal(np.percentile(x, 50, axis=0), r0) assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) @@ -2820,7 +2820,7 @@ class TestPercentile(TestCase): # test for no empty dimensions for compatibility with old percentile x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50, interpolation='lower'), 5.) - self.assertTrue(np.isscalar(np.percentile(x, 50))) + assert_(np.isscalar(np.percentile(x, 50))) r0 = np.array([4., 5., 6., 7.]) c0 = np.percentile(x, 50, interpolation='lower', axis=0) assert_equal(c0, r0) @@ -3126,7 +3126,7 @@ class TestPercentile(TestCase): a, [0.3, 0.6], (0, 2), interpolation='nearest'), b) -class TestMedian(TestCase): +class TestMedian(object): def test_basic(self): a0 = np.array(1) @@ -3383,7 +3383,7 @@ class TestMedian(TestCase): (1, 1, 7, 1)) -class TestAdd_newdoc_ufunc(TestCase): +class TestAdd_newdoc_ufunc(object): def test_ufunc_arg(self): assert_raises(TypeError, add_newdoc_ufunc, 2, "blah") @@ -3393,15 +3393,15 @@ class TestAdd_newdoc_ufunc(TestCase): assert_raises(TypeError, add_newdoc_ufunc, np.add, 3) -class TestAdd_newdoc(TestCase): +class TestAdd_newdoc(object): @dec.skipif(sys.flags.optimize == 2) def test_add_doc(self): # test np.add_newdoc tgt = "Current flat index into the array." - self.assertEqual(np.core.flatiter.index.__doc__[:len(tgt)], tgt) - self.assertTrue(len(np.core.ufunc.identity.__doc__) > 300) - self.assertTrue(len(np.lib.index_tricks.mgrid.__doc__) > 300) + assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt) + assert_(len(np.core.ufunc.identity.__doc__) > 300) + assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300) if __name__ == "__main__": -- cgit v1.2.1 From 01fd010c6fd29a38b8013ba87c2fb89e8c4c7706 Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Thu, 27 Jul 2017 17:26:11 -0400 Subject: MAINT: Changed diff to use iterative instead of recursive approach TST: Added tests for `n` parameter Added test for `datetime64` type change Added tests for axis normalization Added test for subtype handling DOC: Minor updates to docs: Added explanation for `n==0` Added documentation describing `datetime64` handling Updated formatting Added call to normalize_axis_index --- numpy/lib/tests/test_function_base.py | 53 +++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 099a2d407..4ecb02821 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -6,6 +6,7 @@ import sys import decimal import numpy as np +from numpy import ma from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, @@ -647,6 +648,19 @@ class TestDiff(object): assert_array_equal(diff(x), out) assert_array_equal(diff(x, n=2), out2) + def test_axis(self): + x = np.zeros((10, 20, 30)) + x[:, 1::2, :] = 1 + exp = np.ones((10, 19, 30)) + exp[:, 1::2, :] = -1 + assert_array_equal(diff(x), np.zeros((10, 20, 29))) + assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29))) + assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30))) + assert_array_equal(diff(x, axis=1), exp) + assert_array_equal(diff(x, axis=-2), exp) + assert_raises(np.AxisError, diff, x, axis=3) + assert_raises(np.AxisError, diff, x, axis=-4) + def test_nd(self): x = 20 * rand(10, 20, 30) out1 = x[:, :, 1:] - x[:, :, :-1] @@ -658,6 +672,45 @@ class TestDiff(object): assert_array_equal(diff(x, axis=0), out3) assert_array_equal(diff(x, n=2, axis=0), out4) + def test_n(self): + x = list(range(3)) + assert_raises(ValueError, diff, x, n=-1) + output = [diff(x, n=n) for n in range(1, 5)] + expected = [[1, 1], [0], [], []] + assert_(diff(x, n=0) is x) + for n, (expected, out) in enumerate(zip(expected, output), start=1): + assert_(type(out) is np.ndarray) + assert_array_equal(out, expected) + assert_equal(out.dtype, np.int_) + assert_equal(len(out), max(0, len(x) - n)) + + def test_times(self): + x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + expected = [ + np.array([1, 1], dtype='timedelta64[D]'), + np.array([0], dtype='timedelta64[D]'), + ] + expected.extend([np.array([], dtype='timedelta64[D]')] * 3) + for n, exp in enumerate(expected, start=1): + out = diff(x, n=n) + assert_array_equal(out, exp) + assert_equal(out.dtype, exp.dtype) + + def test_subclass(self): + x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], + mask=[[False, False], [True, False], + [False, True], [True, True], [False, False]]) + out = diff(x) + assert_array_equal(out.data, [[1], [1], [1], [1], [1]]) + assert_array_equal(out.mask, [[False], [True], + [True], [True], [False]]) + assert_(type(out) is type(x)) + + out3 = diff(x, n=3) + assert_array_equal(out3.data, [[], [], [], [], []]) + assert_array_equal(out3.mask, [[], [], [], [], []]) + assert_(type(out3) is type(x)) + class TestDelete(object): -- cgit v1.2.1 From 2b781f8967488dc007f8f0a1e6a7f49208788d12 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Tue, 1 Aug 2017 20:29:36 +0000 Subject: MAINT/DOC: Use builtin when np.{x} is builtins.{x}. This is the case for x in {int, bool, str, float, complex, object}. Using the np.{x} version is deceptive as it suggests that there is a difference. This change doesn't affect any external behaviour. The `long` type is missing in python 3, so np.long is still useful --- numpy/lib/tests/test_function_base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 4ecb02821..ad840f8ef 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2663,28 +2663,28 @@ class TestInterp(object): incres = interp(incpts, xp, yp) decres = interp(decpts, xp, yp) - inctgt = np.array([1, 1, 1, 1], dtype=np.float) + inctgt = np.array([1, 1, 1, 1], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, left=0) decres = interp(decpts, xp, yp, left=0) - inctgt = np.array([0, 1, 1, 1], dtype=np.float) + inctgt = np.array([0, 1, 1, 1], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, right=2) decres = interp(decpts, xp, yp, right=2) - inctgt = np.array([1, 1, 1, 2], dtype=np.float) + inctgt = np.array([1, 1, 1, 2], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, left=0, right=2) decres = interp(decpts, xp, yp, left=0, right=2) - inctgt = np.array([0, 1, 1, 2], dtype=np.float) + inctgt = np.array([0, 1, 1, 2], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) -- cgit v1.2.1 From fb004f8bc610b0cf52342d35314a9e0a0434e745 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Tue, 1 Aug 2017 17:40:56 +0000 Subject: MAINT: Stop using the undocumented coercion-then-downcast feature of subdtype --- numpy/lib/tests/test_function_base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index ad840f8ef..151c20a4b 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1711,16 +1711,16 @@ class TestHistogram(object): # Check the type of the returned histogram a = np.arange(10) + .5 h, b = histogram(a) - assert_(np.issubdtype(h.dtype, int)) + assert_(np.issubdtype(h.dtype, np.integer)) h, b = histogram(a, normed=True) - assert_(np.issubdtype(h.dtype, float)) + assert_(np.issubdtype(h.dtype, np.floating)) h, b = histogram(a, weights=np.ones(10, int)) - assert_(np.issubdtype(h.dtype, int)) + assert_(np.issubdtype(h.dtype, np.integer)) h, b = histogram(a, weights=np.ones(10, float)) - assert_(np.issubdtype(h.dtype, float)) + assert_(np.issubdtype(h.dtype, np.floating)) def test_f32_rounding(self): # gh-4799, check that the rounding of the edges works with float32 -- cgit v1.2.1 From 029863eae86b9df2de4b9a9843ca8f88c99130df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nico=20Schl=C3=B6mer?= Date: Fri, 11 Aug 2017 03:01:06 +0200 Subject: MAINT: Use moveaxis instead of rollaxis internally (#9475) Also add a hint to the documentation advising the use of moveaxis over rollaxis. Tests for rollaxis are left alone. --- numpy/lib/tests/test_function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 151c20a4b..4c90abbf6 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -3011,7 +3011,7 @@ class TestPercentile(object): o = np.random.normal(size=(71, 23)) x = np.dstack([o] * 10) assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) - x = np.rollaxis(x, -1, 0) + x = np.moveaxis(x, -1, 0) assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) x = x.swapaxes(0, 1).copy() assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) @@ -3392,7 +3392,7 @@ class TestMedian(object): o = np.random.normal(size=(71, 23)) x = np.dstack([o] * 10) assert_equal(np.median(x, axis=(0, 1)), np.median(o)) - x = np.rollaxis(x, -1, 0) + x = np.moveaxis(x, -1, 0) assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) x = x.swapaxes(0, 1).copy() assert_equal(np.median(x, axis=(0, -1)), np.median(o)) -- cgit v1.2.1 From f9b99715be1844ce4c612dc66275f1bc662a0af8 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sat, 1 Jul 2017 23:25:37 +0100 Subject: MAINT: improve wording of error messages --- numpy/lib/tests/test_function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 10440d97c..39edc18b4 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2623,7 +2623,7 @@ class TestBincount(object): "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, - "must be non-negative", + "must not be negative", lambda: np.bincount(x, minlength=-1)) x = np.arange(5) @@ -2631,7 +2631,7 @@ class TestBincount(object): "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, - "minlength must be non-negative", + "must not be negative", lambda: np.bincount(x, minlength=-1)) @dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount") -- cgit v1.2.1 From 4712875840b5413a47bd8dcbed9bedeaefe829cd Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sat, 21 Oct 2017 14:39:49 -0700 Subject: MAINT/BUG: Remove special-casing for 0d arrays, now that indexing with a single boolean is ok Also fix the test added in gh-4792, which didn't make sense, but passed anyway --- numpy/lib/tests/test_function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 39edc18b4..2a42c44e6 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2538,7 +2538,7 @@ class TestPiecewise(object): assert_(y == 0) x = 5 - y = piecewise(x, [[True], [False]], [1, 0]) + y = piecewise(x, [True, False], [1, 0]) assert_(y.ndim == 0) assert_(y == 1) -- cgit v1.2.1 From 303941c929ee2938dc55ad633c9fc063610941b9 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 22 Oct 2017 16:17:51 -0700 Subject: TST: Add test for 0d conditions in np.piecewise --- numpy/lib/tests/test_function_base.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 2a42c44e6..e25962da9 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2556,6 +2556,12 @@ class TestPiecewise(object): y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) assert_array_equal(y, 2) + def test_0d_0d_condition(self): + x = np.array(3) + c = np.array(x > 3) + y = piecewise(x, [c], [1, 2]) + assert_equal(y, 2) + def test_multidimensional_extrafunc(self): x = np.array([[-2.5, -1.5, -0.5], [0.5, 1.5, 2.5]]) -- cgit v1.2.1 From c875b1391286a982c958d349d58eb720eb081069 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 22 Oct 2017 16:45:05 -0700 Subject: BUG: Throw an error if too many functions are given to piecewise Especially necessary given the strange heuristics that decay the number of conditions to 1 --- numpy/lib/tests/test_function_base.py | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index e25962da9..8381c2465 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2514,6 +2514,11 @@ class TestPiecewise(object): x = piecewise([0, 0], [[False, True]], [lambda x:-1]) assert_array_equal(x, [0, -1]) + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], []) + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], [1, 2, 3]) + def test_two_conditions(self): x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) assert_array_equal(x, [3, 4]) @@ -2556,6 +2561,11 @@ class TestPiecewise(object): y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) assert_array_equal(y, 2) + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1]) + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1]) + def test_0d_0d_condition(self): x = np.array(3) c = np.array(x > 3) -- cgit v1.2.1 From 34c9950ccdb8a2f64916903b09c2c39233be0adf Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sat, 9 Dec 2017 13:58:42 -0800 Subject: MAINT: Move histogram and histogramdd into their own module 800 self-contained lines are easily enough to go in their own file, as are the 500 lines of tests. For compatibility, the names are still available through `np.lib.function_base.histogram` and `from np.lib.function_base import *` For simplicity of imports, all of the unqualified `np.` names are now qualified --- numpy/lib/tests/test_function_base.py | 512 ---------------------------------- 1 file changed, 512 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 8381c2465..dbab69436 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1617,518 +1617,6 @@ class TestSinc(object): assert_array_equal(y1, y3) -class TestHistogram(object): - - def setup(self): - pass - - def teardown(self): - pass - - def test_simple(self): - n = 100 - v = rand(n) - (a, b) = histogram(v) - # check if the sum of the bins equals the number of samples - assert_equal(np.sum(a, axis=0), n) - # check that the bin counts are evenly spaced when the data is from - # a linear function - (a, b) = histogram(np.linspace(0, 10, 100)) - assert_array_equal(a, 10) - - def test_one_bin(self): - # Ticket 632 - hist, edges = histogram([1, 2, 3, 4], [1, 2]) - assert_array_equal(hist, [2, ]) - assert_array_equal(edges, [1, 2]) - assert_raises(ValueError, histogram, [1, 2], bins=0) - h, e = histogram([1, 2], bins=1) - assert_equal(h, np.array([2])) - assert_allclose(e, np.array([1., 2.])) - - def test_normed(self): - # Check that the integral of the density equals 1. - n = 100 - v = rand(n) - a, b = histogram(v, normed=True) - area = np.sum(a * diff(b)) - assert_almost_equal(area, 1) - - # Check with non-constant bin widths (buggy but backwards - # compatible) - v = np.arange(10) - bins = [0, 1, 5, 9, 10] - a, b = histogram(v, bins, normed=True) - area = np.sum(a * diff(b)) - assert_almost_equal(area, 1) - - def test_density(self): - # Check that the integral of the density equals 1. - n = 100 - v = rand(n) - a, b = histogram(v, density=True) - area = np.sum(a * diff(b)) - assert_almost_equal(area, 1) - - # Check with non-constant bin widths - v = np.arange(10) - bins = [0, 1, 3, 6, 10] - a, b = histogram(v, bins, density=True) - assert_array_equal(a, .1) - assert_equal(np.sum(a * diff(b)), 1) - - # Variale bin widths are especially useful to deal with - # infinities. - v = np.arange(10) - bins = [0, 1, 3, 6, np.inf] - a, b = histogram(v, bins, density=True) - assert_array_equal(a, [.1, .1, .1, 0.]) - - # Taken from a bug report from N. Becker on the numpy-discussion - # mailing list Aug. 6, 2010. - counts, dmy = np.histogram( - [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) - assert_equal(counts, [.25, 0]) - - def test_outliers(self): - # Check that outliers are not tallied - a = np.arange(10) + .5 - - # Lower outliers - h, b = histogram(a, range=[0, 9]) - assert_equal(h.sum(), 9) - - # Upper outliers - h, b = histogram(a, range=[1, 10]) - assert_equal(h.sum(), 9) - - # Normalization - h, b = histogram(a, range=[1, 9], normed=True) - assert_almost_equal((h * diff(b)).sum(), 1, decimal=15) - - # Weights - w = np.arange(10) + .5 - h, b = histogram(a, range=[1, 9], weights=w, normed=True) - assert_equal((h * diff(b)).sum(), 1) - - h, b = histogram(a, bins=8, range=[1, 9], weights=w) - assert_equal(h, w[1:-1]) - - def test_type(self): - # Check the type of the returned histogram - a = np.arange(10) + .5 - h, b = histogram(a) - assert_(np.issubdtype(h.dtype, np.integer)) - - h, b = histogram(a, normed=True) - assert_(np.issubdtype(h.dtype, np.floating)) - - h, b = histogram(a, weights=np.ones(10, int)) - assert_(np.issubdtype(h.dtype, np.integer)) - - h, b = histogram(a, weights=np.ones(10, float)) - assert_(np.issubdtype(h.dtype, np.floating)) - - def test_f32_rounding(self): - # gh-4799, check that the rounding of the edges works with float32 - x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32) - y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) - counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) - assert_equal(counts_hist.sum(), 3.) - - def test_weights(self): - v = rand(100) - w = np.ones(100) * 5 - a, b = histogram(v) - na, nb = histogram(v, normed=True) - wa, wb = histogram(v, weights=w) - nwa, nwb = histogram(v, weights=w, normed=True) - assert_array_almost_equal(a * 5, wa) - assert_array_almost_equal(na, nwa) - - # Check weights are properly applied. - v = np.linspace(0, 10, 10) - w = np.concatenate((np.zeros(5), np.ones(5))) - wa, wb = histogram(v, bins=np.arange(11), weights=w) - assert_array_almost_equal(wa, w) - - # Check with integer weights - wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) - assert_array_equal(wa, [4, 5, 0, 1]) - wa, wb = histogram( - [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True) - assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) - - # Check weights with non-uniform bin widths - a, b = histogram( - np.arange(9), [0, 1, 3, 6, 10], - weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) - assert_almost_equal(a, [.2, .1, .1, .075]) - - def test_exotic_weights(self): - - # Test the use of weights that are not integer or floats, but e.g. - # complex numbers or object types. - - # Complex weights - values = np.array([1.3, 2.5, 2.3]) - weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2]) - - # Check with custom bins - wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) - assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) - - # Check with even bins - wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) - assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) - - # Decimal weights - from decimal import Decimal - values = np.array([1.3, 2.5, 2.3]) - weights = np.array([Decimal(1), Decimal(2), Decimal(3)]) - - # Check with custom bins - wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) - assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) - - # Check with even bins - wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) - assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) - - def test_no_side_effects(self): - # This is a regression test that ensures that values passed to - # ``histogram`` are unchanged. - values = np.array([1.3, 2.5, 2.3]) - np.histogram(values, range=[-10, 10], bins=100) - assert_array_almost_equal(values, [1.3, 2.5, 2.3]) - - def test_empty(self): - a, b = histogram([], bins=([0, 1])) - assert_array_equal(a, np.array([0])) - assert_array_equal(b, np.array([0, 1])) - - def test_error_binnum_type (self): - # Tests if right Error is raised if bins argument is float - vals = np.linspace(0.0, 1.0, num=100) - histogram(vals, 5) - assert_raises(TypeError, histogram, vals, 2.4) - - def test_finite_range(self): - # Normal ranges should be fine - vals = np.linspace(0.0, 1.0, num=100) - histogram(vals, range=[0.25,0.75]) - assert_raises(ValueError, histogram, vals, range=[np.nan,0.75]) - assert_raises(ValueError, histogram, vals, range=[0.25,np.inf]) - - def test_bin_edge_cases(self): - # Ensure that floating-point computations correctly place edge cases. - arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) - hist, edges = np.histogram(arr, bins=8296, range=(2, 2280)) - mask = hist > 0 - left_edges = edges[:-1][mask] - right_edges = edges[1:][mask] - for x, left, right in zip(arr, left_edges, right_edges): - assert_(x >= left) - assert_(x < right) - - def test_last_bin_inclusive_range(self): - arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) - hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) - assert_equal(hist[-1], 1) - - def test_unsigned_monotonicity_check(self): - # Ensures ValueError is raised if bins not increasing monotonically - # when bins contain unsigned values (see #9222) - arr = np.array([2]) - bins = np.array([1, 3, 1], dtype='uint64') - with assert_raises(ValueError): - hist, edges = np.histogram(arr, bins=bins) - - -class TestHistogramOptimBinNums(object): - """ - Provide test coverage when using provided estimators for optimal number of - bins - """ - - def test_empty(self): - estimator_list = ['fd', 'scott', 'rice', 'sturges', - 'doane', 'sqrt', 'auto'] - # check it can deal with empty data - for estimator in estimator_list: - a, b = histogram([], bins=estimator) - assert_array_equal(a, np.array([0])) - assert_array_equal(b, np.array([0, 1])) - - def test_simple(self): - """ - Straightforward testing with a mixture of linspace data (for - consistency). All test values have been precomputed and the values - shouldn't change - """ - # Some basic sanity checking, with some fixed data. - # Checking for the correct number of bins - basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, - 'doane': 8, 'sqrt': 8, 'auto': 7}, - 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, - 'doane': 12, 'sqrt': 23, 'auto': 10}, - 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, - 'doane': 17, 'sqrt': 71, 'auto': 17}} - - for testlen, expectedResults in basic_test.items(): - # Create some sort of non uniform data to test with - # (2 peak uniform mixture) - x1 = np.linspace(-10, -1, testlen // 5 * 2) - x2 = np.linspace(1, 10, testlen // 5 * 3) - x = np.concatenate((x1, x2)) - for estimator, numbins in expectedResults.items(): - a, b = np.histogram(x, estimator) - assert_equal(len(a), numbins, err_msg="For the {0} estimator " - "with datasize of {1}".format(estimator, testlen)) - - def test_small(self): - """ - Smaller datasets have the potential to cause issues with the data - adaptive methods, especially the FD method. All bin numbers have been - precalculated. - """ - small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, - 'doane': 1, 'sqrt': 1}, - 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2, - 'doane': 1, 'sqrt': 2}, - 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3, - 'doane': 3, 'sqrt': 2}} - - for testlen, expectedResults in small_dat.items(): - testdat = np.arange(testlen) - for estimator, expbins in expectedResults.items(): - a, b = np.histogram(testdat, estimator) - assert_equal(len(a), expbins, err_msg="For the {0} estimator " - "with datasize of {1}".format(estimator, testlen)) - - def test_incorrect_methods(self): - """ - Check a Value Error is thrown when an unknown string is passed in - """ - check_list = ['mad', 'freeman', 'histograms', 'IQR'] - for estimator in check_list: - assert_raises(ValueError, histogram, [1, 2, 3], estimator) - - def test_novariance(self): - """ - Check that methods handle no variance in data - Primarily for Scott and FD as the SD and IQR are both 0 in this case - """ - novar_dataset = np.ones(100) - novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, - 'doane': 1, 'sqrt': 1, 'auto': 1} - - for estimator, numbins in novar_resultdict.items(): - a, b = np.histogram(novar_dataset, estimator) - assert_equal(len(a), numbins, err_msg="{0} estimator, " - "No Variance test".format(estimator)) - - def test_outlier(self): - """ - Check the FD, Scott and Doane with outliers. - - The FD estimates a smaller binwidth since it's less affected by - outliers. Since the range is so (artificially) large, this means more - bins, most of which will be empty, but the data of interest usually is - unaffected. The Scott estimator is more affected and returns fewer bins, - despite most of the variance being in one area of the data. The Doane - estimator lies somewhere between the other two. - """ - xcenter = np.linspace(-10, 10, 50) - outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter)) - - outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11} - - for estimator, numbins in outlier_resultdict.items(): - a, b = np.histogram(outlier_dataset, estimator) - assert_equal(len(a), numbins) - - def test_simple_range(self): - """ - Straightforward testing with a mixture of linspace data (for - consistency). Adding in a 3rd mixture that will then be - completely ignored. All test values have been precomputed and - the shouldn't change. - """ - # some basic sanity checking, with some fixed data. - # Checking for the correct number of bins - basic_test = { - 50: {'fd': 8, 'scott': 8, 'rice': 15, - 'sturges': 14, 'auto': 14}, - 500: {'fd': 15, 'scott': 16, 'rice': 32, - 'sturges': 20, 'auto': 20}, - 5000: {'fd': 33, 'scott': 33, 'rice': 69, - 'sturges': 27, 'auto': 33} - } - - for testlen, expectedResults in basic_test.items(): - # create some sort of non uniform data to test with - # (3 peak uniform mixture) - x1 = np.linspace(-10, -1, testlen // 5 * 2) - x2 = np.linspace(1, 10, testlen // 5 * 3) - x3 = np.linspace(-100, -50, testlen) - x = np.hstack((x1, x2, x3)) - for estimator, numbins in expectedResults.items(): - a, b = np.histogram(x, estimator, range = (-20, 20)) - msg = "For the {0} estimator".format(estimator) - msg += " with datasize of {0}".format(testlen) - assert_equal(len(a), numbins, err_msg=msg) - - def test_simple_weighted(self): - """ - Check that weighted data raises a TypeError - """ - estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] - for estimator in estimator_list: - assert_raises(TypeError, histogram, [1, 2, 3], - estimator, weights=[1, 2, 3]) - - -class TestHistogramdd(object): - - def test_simple(self): - x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], - [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) - H, edges = histogramdd(x, (2, 3, 3), - range=[[-1, 1], [0, 3], [0, 3]]) - answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], - [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) - assert_array_equal(H, answer) - - # Check normalization - ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] - H, edges = histogramdd(x, bins=ed, normed=True) - assert_(np.all(H == answer / 12.)) - - # Check that H has the correct shape. - H, edges = histogramdd(x, (2, 3, 4), - range=[[-1, 1], [0, 3], [0, 4]], - normed=True) - answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], - [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) - assert_array_almost_equal(H, answer / 6., 4) - # Check that a sequence of arrays is accepted and H has the correct - # shape. - z = [np.squeeze(y) for y in split(x, 3, axis=1)] - H, edges = histogramdd( - z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) - answer = np.array([[[0, 0], [0, 0], [0, 0]], - [[0, 1], [0, 0], [1, 0]], - [[0, 1], [0, 0], [0, 0]], - [[0, 0], [0, 0], [0, 0]]]) - assert_array_equal(H, answer) - - Z = np.zeros((5, 5, 5)) - Z[list(range(5)), list(range(5)), list(range(5))] = 1. - H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) - assert_array_equal(H, Z) - - def test_shape_3d(self): - # All possible permutations for bins of different lengths in 3D. - bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), - (4, 5, 6)) - r = rand(10, 3) - for b in bins: - H, edges = histogramdd(r, b) - assert_(H.shape == b) - - def test_shape_4d(self): - # All possible permutations for bins of different lengths in 4D. - bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), - (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), - (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), - (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), - (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), - (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) - - r = rand(10, 4) - for b in bins: - H, edges = histogramdd(r, b) - assert_(H.shape == b) - - def test_weights(self): - v = rand(100, 2) - hist, edges = histogramdd(v) - n_hist, edges = histogramdd(v, normed=True) - w_hist, edges = histogramdd(v, weights=np.ones(100)) - assert_array_equal(w_hist, hist) - w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, normed=True) - assert_array_equal(w_hist, n_hist) - w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) - assert_array_equal(w_hist, 2 * hist) - - def test_identical_samples(self): - x = np.zeros((10, 2), int) - hist, edges = histogramdd(x, bins=2) - assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) - - def test_empty(self): - a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) - assert_array_max_ulp(a, np.array([[0.]])) - a, b = np.histogramdd([[], [], []], bins=2) - assert_array_max_ulp(a, np.zeros((2, 2, 2))) - - def test_bins_errors(self): - # There are two ways to specify bins. Check for the right errors - # when mixing those. - x = np.arange(8).reshape(2, 4) - assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) - assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) - assert_raises( - ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]]) - assert_raises( - ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) - assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) - - def test_inf_edges(self): - # Test using +/-inf bin edges works. See #1788. - with np.errstate(invalid='ignore'): - x = np.arange(6).reshape(3, 2) - expected = np.array([[1, 0], [0, 1], [0, 1]]) - h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) - assert_allclose(h, expected) - h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) - assert_allclose(h, expected) - h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) - assert_allclose(h, expected) - - def test_rightmost_binedge(self): - # Test event very close to rightmost binedge. See Github issue #4266 - x = [0.9999999995] - bins = [[0., 0.5, 1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 1.) - x = [1.0] - bins = [[0., 0.5, 1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 1.) - x = [1.0000000001] - bins = [[0., 0.5, 1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 1.) - x = [1.0001] - bins = [[0., 0.5, 1.0]] - hist, _ = histogramdd(x, bins=bins) - assert_(hist[0] == 0.0) - assert_(hist[1] == 0.0) - - def test_finite_range(self): - vals = np.random.random((100, 3)) - histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) - assert_raises(ValueError, histogramdd, vals, - range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) - assert_raises(ValueError, histogramdd, vals, - range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) - - class TestUnique(object): def test_simple(self): -- cgit v1.2.1 From e772a1a9bb9626490316e6c9ae8d57cd25cff33c Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 28 Dec 2017 07:40:57 +0000 Subject: ENH: Allow ptp to take an axis tuple and keepdims --- numpy/lib/tests/test_function_base.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index dbab69436..dc5fe3397 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -556,6 +556,9 @@ class TestPtp(object): assert_equal(b.ptp(axis=0), [5.0, 7.0, 7.0]) assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0]) + assert_equal(b.ptp(axis=0, keepdims=True), [[5.0, 7.0, 7.0]]) + assert_equal(b.ptp(axis=(0,1), keepdims=True), [[8.0]]) + class TestCumsum(object): -- cgit v1.2.1 From 8eccbf7df2f564e23c1bf7c9f5ee08e4b0dc6a36 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 25 Feb 2018 11:28:31 -0800 Subject: BUG/MAINT: Remove special handling of 0d arrays and scalars in interp These are now handled generically by the underlying C function This fixes the period argument for 0d arrays. Now never returns a pure-python scalar, which matches the behaviour of most of numpy. Rework of b66a200a4a1e98f1955c8a774e4ebfb4588dab5b --- numpy/lib/tests/test_function_base.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index dc5fe3397..49b450175 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2252,8 +2252,17 @@ class TestInterp(object): y = np.linspace(0, 1, 5) x0 = np.array(.3) assert_almost_equal(np.interp(x0, x, y), x0) - x0 = np.array(.3, dtype=object) - assert_almost_equal(np.interp(x0, x, y), .3) + + xp = np.array([0, 2, 4]) + fp = np.array([1, -1, 1]) + + actual = np.interp(np.array(1), xp, fp) + assert_equal(actual, 0) + assert_(isinstance(actual, np.float64)) + + actual = np.interp(np.array(4.5), xp, fp, period=4) + assert_equal(actual, 0.5) + assert_(isinstance(actual, np.float64)) def test_if_len_x_is_small(self): xp = np.arange(0, 10, 0.0001) -- cgit v1.2.1 From 018dfbcaa69d8bcda4d909be11a65a188cf5d1dd Mon Sep 17 00:00:00 2001 From: David Freese Date: Sun, 25 Feb 2018 20:09:15 -0800 Subject: BUG: fix complex casting error in cov with aweights When using cov with a complex input and with aweights specified, cov will error as a result of trying to cast a complex value into a float64. This comes about since average is used to calculate the sum of the weights from aweights. average returns the sum of weights as the same type as its result, not the weights type. For a complex input m, and any type for aweights, this would result in a complex value for fact. It appears the primary purpose of np.float64(fact) is to provide a NaN value from the divide when fact is an integer zero. This has been replaced by using numpy.divide to replicate the same behavior, but to also handle complex types. --- numpy/lib/tests/test_function_base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index dc5fe3397..89bc02fb7 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1752,7 +1752,9 @@ class TestCov(object): def test_complex(self): x = np.array([[1, 2, 3], [1j, 2j, 3j]]) - assert_allclose(cov(x), np.array([[1., -1.j], [1.j, 1.]])) + res = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(cov(x), res) + assert_allclose(cov(x, aweights=np.ones(3)), res) def test_xy(self): x = np.array([[1, 2, 3]]) -- cgit v1.2.1 From e97de95d4cae6805ed6c258655e7492a5f2ce863 Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Mon, 12 Mar 2018 22:47:12 +0000 Subject: Fix low-hanging Pypy compatibility issues (#10737) * TST: skip refcount-requiring tests if sys.refcount is missing * ENH: io: add refcheck=False to a safe .resize() call The array is allocated immediately above, and the resize always succeeds so it is not necessary to check it. Fixes Pypy compatibility. * TST: remove unused code * TST: factor skipif(not HAS_REFCOUNT) into a separate decorator --- numpy/lib/tests/test_function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 49b450175..2b1ac1cc0 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -11,7 +11,7 @@ from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex, - dec, suppress_warnings, HAS_REFCOUNT, + dec, suppress_warnings, ) import numpy.lib.function_base as nfb from numpy.random import rand @@ -2141,7 +2141,7 @@ class TestBincount(object): "must not be negative", lambda: np.bincount(x, minlength=-1)) - @dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount") + @dec._needs_refcount def test_dtype_reference_leaks(self): # gh-6805 intp_refcount = sys.getrefcount(np.dtype(np.intp)) -- cgit v1.2.1 From 7e5a41de9fab731e27a761c01302a0a93e2d1070 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 25 Mar 2018 12:34:16 -0600 Subject: TST: Switch to using pytest markers Use standard pytest markers everywhere in the numpy tests. At this point there should be no nose dependency. However, nose is required to test the legacy decorators if so desired. At this point, numpy test cannot be run in the way with runtests, rather installed numpy can be tested with `pytest --pyargs numpy` as long as that is not run from the repo. Run it from the tools directory or some such. --- numpy/lib/tests/test_function_base.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index c28257c6d..2ea2dbc56 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -4,6 +4,7 @@ import operator import warnings import sys import decimal +import pytest import numpy as np from numpy import ma @@ -11,7 +12,7 @@ from numpy.testing import ( run_module_suite, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex, - dec, suppress_warnings, + suppress_warnings, HAS_REFCOUNT, ) import numpy.lib.function_base as nfb from numpy.random import rand @@ -2143,7 +2144,7 @@ class TestBincount(object): "must not be negative", lambda: np.bincount(x, minlength=-1)) - @dec._needs_refcount + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_dtype_reference_leaks(self): # gh-6805 intp_refcount = sys.getrefcount(np.dtype(np.intp)) @@ -2987,7 +2988,7 @@ class TestAdd_newdoc_ufunc(object): class TestAdd_newdoc(object): - @dec.skipif(sys.flags.optimize == 2) + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") def test_add_doc(self): # test np.add_newdoc tgt = "Current flat index into the array." -- cgit v1.2.1 From 7bf0564f87511d9e7b6287b3eec0857d0d7742df Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 4 Apr 2018 10:33:07 -0600 Subject: MAINT: Remove all uses of run_module_suite. That function is nose specific and has not worked since `__init__` files were added to the tests directories. --- numpy/lib/tests/test_function_base.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 2ea2dbc56..0a4c7c370 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -9,11 +9,11 @@ import pytest import numpy as np from numpy import ma from numpy.testing import ( - run_module_suite, assert_, assert_equal, assert_array_equal, - assert_almost_equal, assert_array_almost_equal, assert_raises, - assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex, - suppress_warnings, HAS_REFCOUNT, -) + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_allclose, + assert_array_max_ulp, assert_warns, assert_raises_regex, suppress_warnings, + HAS_REFCOUNT, + ) import numpy.lib.function_base as nfb from numpy.random import rand from numpy.lib import ( @@ -22,7 +22,7 @@ from numpy.lib import ( histogram, histogramdd, i0, insert, interp, kaiser, meshgrid, msort, piecewise, place, rot90, select, setxor1d, sinc, split, trapz, trim_zeros, unwrap, unique, vectorize -) + ) from numpy.compat import long @@ -2995,7 +2995,3 @@ class TestAdd_newdoc(object): assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt) assert_(len(np.core.ufunc.identity.__doc__) > 300) assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300) - - -if __name__ == "__main__": - run_module_suite() -- cgit v1.2.1 From 5d5c379ed5af0df19eec68fdde1f87ad994ce6b1 Mon Sep 17 00:00:00 2001 From: Chun-Wei Yuan Date: Tue, 16 Jan 2018 15:11:42 -0800 Subject: ENH: Adding np.quantile() and np.nanquantile(). #10199 --- numpy/lib/tests/test_function_base.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 0a4c7c370..280715bc3 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2719,6 +2719,28 @@ class TestPercentile(object): a, [0.3, 0.6], (0, 2), interpolation='nearest'), b) +class TestQuantile(object): + # most of this is already tested by TestPercentile + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.quantile(x, 0), 0.) + assert_equal(np.quantile(x, 1), 3.5) + assert_equal(np.quantile(x, 0.5), 1.75) + + def test_no_p_overwrite(self): + # this is worth retesting, beause quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.quantile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.quantile(np.arange(100.), p, interpolation="midpoint") + assert_array_equal(p, p0) + + class TestMedian(object): def test_basic(self): -- cgit v1.2.1 From 9d1d44fc120c451a4925720027e85c70e13c26f2 Mon Sep 17 00:00:00 2001 From: Junjie Bai Date: Tue, 10 Apr 2018 01:24:35 -0700 Subject: ENH: Extend np.flip to work over multiple axes Closes #10847 --- numpy/lib/tests/test_function_base.py | 36 ++++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 0a4c7c370..6653b5ba1 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -104,9 +104,10 @@ class TestRot90(object): class TestFlip(object): def test_axes(self): - assert_raises(ValueError, np.flip, np.ones(4), axis=1) - assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=2) - assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=-3) + assert_raises(np.AxisError, np.flip, np.ones(4), axis=1) + assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=2) + assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=-3) + assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=(0, 3)) def test_basic_lr(self): a = get_mat(4) @@ -173,6 +174,35 @@ class TestFlip(object): assert_equal(np.flip(a, i), np.flipud(a.swapaxes(0, i)).swapaxes(i, 0)) + def test_default_axis(self): + a = np.array([[1, 2, 3], + [4, 5, 6]]) + b = np.array([[6, 5, 4], + [3, 2, 1]]) + assert_equal(np.flip(a), b) + + def test_multiple_axes(self): + a = np.array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + assert_equal(np.flip(a, axis=()), a) + + b = np.array([[[5, 4], + [7, 6]], + [[1, 0], + [3, 2]]]) + + assert_equal(np.flip(a, axis=(0, 2)), b) + + c = np.array([[[3, 2], + [1, 0]], + [[7, 6], + [5, 4]]]) + + assert_equal(np.flip(a, axis=(1, 2)), c) + class TestAny(object): -- cgit v1.2.1 From 1394d0a723832d22ca749e402975786c7707fe02 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Sun, 29 Apr 2018 17:13:49 -0400 Subject: MAINT: move matrix tests in lib to matrixlib. --- numpy/lib/tests/test_function_base.py | 21 --------------------- 1 file changed, 21 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 43d62a7ff..6c4f58424 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -287,9 +287,6 @@ class TestAverage(object): assert_almost_equal(y5.mean(0), average(y5, 0)) assert_almost_equal(y5.mean(1), average(y5, 1)) - y6 = np.matrix(rand(5, 5)) - assert_array_equal(y6.mean(0), average(y6, 0)) - def test_weights(self): y = np.arange(10) w = np.arange(10) @@ -357,14 +354,6 @@ class TestAverage(object): assert_equal(type(np.average(a)), subclass) assert_equal(type(np.average(a, weights=w)), subclass) - # also test matrices - a = np.matrix([[1,2],[3,4]]) - w = np.matrix([[1,2],[3,4]]) - - r = np.average(a, axis=0, weights=w) - assert_equal(type(r), np.matrix) - assert_equal(r, [[2.5, 10.0/3]]) - def test_upcasting(self): types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'), ('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')] @@ -1623,16 +1612,6 @@ class TestTrapz(object): xm = np.ma.array(x, mask=mask) assert_almost_equal(trapz(y, xm), r) - def test_matrix(self): - # Test to make sure matrices give the same answer as ndarrays - x = np.linspace(0, 5) - y = x * x - r = trapz(y, x) - mx = np.matrix(x) - my = np.matrix(y) - mr = trapz(my, mx) - assert_almost_equal(mr, r) - class TestSinc(object): -- cgit v1.2.1 From 820765d762513510a8e46f108e8bc8b366127f8f Mon Sep 17 00:00:00 2001 From: luzpaz Date: Tue, 1 May 2018 04:28:18 +0000 Subject: MAINT: Misc. typos (#11005) User- and non-user-facing typos. Some source typos fixes as well. Found via `codespell`. --- numpy/lib/tests/test_function_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 43d62a7ff..5dc96775b 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1525,9 +1525,9 @@ class TestDigitize(object): class TestUnwrap(object): def test_simple(self): - # check that unwrap removes jumps greather that 2*pi + # check that unwrap removes jumps greater that 2*pi assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) - # check that unwrap maintans continuity + # check that unwrap maintains continuity assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) @@ -2759,7 +2759,7 @@ class TestQuantile(object): assert_equal(np.quantile(x, 0.5), 1.75) def test_no_p_overwrite(self): - # this is worth retesting, beause quantile does not make a copy + # this is worth retesting, because quantile does not make a copy p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) p = p0.copy() np.quantile(np.arange(100.), p, interpolation="midpoint") -- cgit v1.2.1 From fad8c8f9f572db79391e6819f3880026cf9bb3f5 Mon Sep 17 00:00:00 2001 From: Jack Vreeken Date: Thu, 28 Jun 2018 15:30:47 +0200 Subject: BUG: fix interpolation with inf and NaN present Values like NaN and inf would result in wrong interpolated values on exactly matching sampling points. To produce the correct behavior, we add an additional check to avoid interpolation when handling such a point. Closes #11439 --- numpy/lib/tests/test_function_base.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 4103a9eb3..d2a9181db 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2237,6 +2237,14 @@ class TestInterp(object): x0 = np.nan assert_almost_equal(np.interp(x0, x, y), x0) + def test_non_finite_behavior(self): + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4]) + fp = [1, 2, np.nan, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4]) + def test_complex_interp(self): # test complex interpolation x = np.linspace(0, 1, 5) @@ -2251,6 +2259,12 @@ class TestInterp(object): x0 = 2.0 right = 2 + 3.0j assert_almost_equal(np.interp(x0, x, y, right=right), right) + # test complex non finite + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2+1j, np.inf, 4] + y = [1, 2+1j, np.inf+0.5j, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), y) # test complex periodic x = [-180, -170, -185, 185, -10, -5, 0, 365] xp = [190, -190, 350, -350] -- cgit v1.2.1 From 307dd76cb0fb770e07524e4cd29cd08e17edc2c3 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Fri, 29 Jun 2018 23:56:38 -0700 Subject: BUG: Don't convert inputs to `np.float64` in digitize This converts digitize to a pure-python function that falls back on searchsorted. Performance doesn't really matter here anyway - if you care about performance, then you should just call searchsorted directly, rather than checking the order of the bins. Partially fixes gh-11022 --- numpy/lib/tests/test_function_base.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'numpy/lib/tests/test_function_base.py') diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index d2a9181db..ba5b90e8c 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1510,6 +1510,18 @@ class TestDigitize(object): assert_(not isinstance(digitize(b, a, False), A)) assert_(not isinstance(digitize(b, a, True), A)) + def test_large_integers_increasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x - 1, x + 1]), 1) + + @pytest.mark.xfail( + reason="gh-11022: np.core.multiarray._monoticity loses precision") + def test_large_integers_decreasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x + 1, x - 1]), 1) + class TestUnwrap(object): -- cgit v1.2.1