summaryrefslogtreecommitdiff
path: root/numpy/lib
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/lib')
-rw-r--r--numpy/lib/function_base.py319
-rw-r--r--numpy/lib/tests/test_function_base.py205
2 files changed, 398 insertions, 126 deletions
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 4d1ffbccc..fc49a6fd7 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -128,7 +128,8 @@ def rot90(m, k=1, axes=(0,1)):
return flip(flip(m, axes[0]), axes[1])
axes_list = arange(0, m.ndim)
- axes_list[axes[0]], axes_list[axes[1]] = axes_list[axes[1]], axes_list[axes[0]]
+ (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],
+ axes_list[axes[0]])
if k == 1:
return transpose(flip(m,axes[1]), axes_list)
@@ -332,7 +333,7 @@ def _hist_bin_doane(x):
Improved version of Sturges' formula which works better for
non-normal data. See
- http://stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
+ stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
@@ -638,7 +639,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
- >>> plt.hist(a, bins='auto') # plt.hist passes its arguments to np.histogram
+ >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
@@ -764,15 +765,19 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
decrement = tmp_a_data < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
- increment = (tmp_a_data >= bin_edges[indices + 1]) & (indices != bins - 1)
+ increment = ((tmp_a_data >= bin_edges[indices + 1])
+ & (indices != bins - 1))
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
- n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
- n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
+ n.real += np.bincount(indices, weights=tmp_w.real,
+ minlength=bins)
+ n.imag += np.bincount(indices, weights=tmp_w.imag,
+ minlength=bins)
else:
- n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
+ n += np.bincount(indices, weights=tmp_w,
+ minlength=bins).astype(ntype)
# Rename the bin edges for return.
bins = bin_edges
@@ -1499,19 +1504,29 @@ def gradient(f, *varargs, **kwargs):
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
- in the interior and either first differences or second order accurate
- one-sides (forward or backwards) differences at the boundaries. The
- returned gradient hence has the same shape as the input array.
+ in the interior points and either first or second order accurate one-sides
+ (forward or backwards) differences at the boundaries.
+ The returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
- varargs : scalar or list of scalar, optional
- N scalars specifying the sample distances for each dimension,
- i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
- single scalar specifies sample distance for all dimensions.
- if `axis` is given, the number of varargs must equal the number of axes.
+ varargs : list of scalar or array, optional
+ Spacing between f values. Default unitary spacing for all dimensions.
+ Spacing can be specified using:
+
+ 1. single scalar to specify a sample distance for all dimensions.
+ 2. N scalars to specify a constant sample distance for each dimension.
+ i.e. `dx`, `dy`, `dz`, ...
+ 3. N arrays to specify the coordinates of the values along each
+ dimension of F. The length of the array must match the size of
+ the corresponding dimension
+ 4. Any combination of N scalars/arrays with the meaning of 2. and 3.
+
+ If `axis` is given, the number of varargs must equal the number of axes.
+ Default: 1.
+
edge_order : {1, 2}, optional
Gradient is calculated using N-th order accurate differences
at the boundaries. Default: 1.
@@ -1520,8 +1535,9 @@ def gradient(f, *varargs, **kwargs):
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
- The default (axis = None) is to calculate the gradient for all the axes of the input array.
- axis may be negative, in which case it counts from the last to the first axis.
+ The default (axis = None) is to calculate the gradient for all the axes
+ of the input array. axis may be negative, in which case it counts from
+ the last to the first axis.
.. versionadded:: 1.11.0
@@ -1529,17 +1545,31 @@ def gradient(f, *varargs, **kwargs):
-------
gradient : ndarray or list of ndarray
A set of ndarrays (or a single ndarray if there is only one dimension)
- correposnding to the derivatives of f with respect to each dimension.
+ corresponding to the derivatives of f with respect to each dimension.
Each derivative has the same shape as f.
-
+
Examples
--------
- >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
- >>> np.gradient(x)
+ >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
+ >>> np.gradient(f)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
- >>> np.gradient(x, 2)
+ >>> np.gradient(f, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
+
+ Spacing can be also specified with an array that represents the coordinates
+ of the values F along the dimensions.
+ For instance a uniform spacing:
+
+ >>> x = np.arange(f.size)
+ >>> np.gradient(f, x)
+ array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
+ Or a non uniform one:
+
+ >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float)
+ >>> np.gradient(f, x)
+ array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5])
+
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
@@ -1549,15 +1579,99 @@ def gradient(f, *varargs, **kwargs):
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
+ In this example the spacing is also specified:
+ uniform for axis=0 and non uniform for axis=1
+
+ >>> dx = 2.
+ >>> y = [1., 1.5, 3.5]
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), dx, y)
+ [array([[ 1. , 1. , -0.5],
+ [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ],
+ [ 2. , 1.7, 0.5]])]
+
+ It is possible to specify how boundaries are treated using `edge_order`
+
>>> x = np.array([0, 1, 2, 3, 4])
- >>> y = x**2
- >>> np.gradient(y, edge_order=2)
+ >>> f = x**2
+ >>> np.gradient(f, edge_order=1)
+ array([ 1., 2., 4., 6., 7.])
+ >>> np.gradient(f, edge_order=2)
array([-0., 2., 4., 6., 8.])
- The axis keyword can be used to specify a subset of axes of which the gradient is calculated
+ The `axis` keyword can be used to specify a subset of axes of which the
+ gradient is calculated
+
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
+
+ Notes
+ -----
+ Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continous
+ derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the
+ spacing the finite difference coefficients are computed by minimising
+ the consistency error :math:`\\eta_{i}`:
+
+ .. math::
+
+ \\eta_{i} = f_{i}^{\\left(1\\right)} -
+ \\left[ \\alpha f\\left(x_{i}\\right) +
+ \\beta f\\left(x_{i} + h_{d}\\right) +
+ \\gamma f\\left(x_{i}-h_{s}\\right)
+ \\right]
+
+ By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})`
+ with their Taylor series expansion, this translates into solving
+ the following the linear system:
+
+ .. math::
+
+ \\left\\{
+ \\begin{array}{r}
+ \\alpha+\\beta+\\gamma=0 \\\\
+ -\\beta h_{d}+\\gamma h_{s}=1 \\\\
+ \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0
+ \\end{array}
+ \\right.
+
+ The resulting approximation of :math:`f_{i}^{(1)}` is the following:
+
+ .. math::
+
+ \\hat f_{i}^{(1)} =
+ \\frac{
+ h_{s}^{2}f\\left(x_{i} + h_{d}\\right)
+ + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right)
+ - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)}
+ { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)}
+ + \mathcal{O}\\left(\\frac{h_{d}h_{s}^{2}
+ + h_{s}h_{d}^{2}}{h_{d}
+ + h_{s}}\\right)
+
+ It is worth noting that if :math:`h_{s}=h_{d}`
+ (i.e., data are evenly spaced)
+ we find the standard second order approximation:
+
+ .. math::
+
+ \\hat f_{i}^{(1)}=
+ \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h}
+ + \mathcal{O}\\left(h^{2}\\right)
+
+ With a similar procedure the forward/backward approximations used for
+ boundaries can be derived.
+
+ References
+ ----------
+ .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics
+ (Texts in Applied Mathematics). New York: Springer.
+ .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations
+ in Geophysical Fluid Dynamics. New York: Springer.
+ .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on
+ Arbitrarily Spaced Grids,
+ Mathematics of Computation 51, no. 184 : 699-706.
+ `PDF <http://www.ams.org/journals/mcom/1988-51-184/
+ S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
@@ -1576,22 +1690,32 @@ def gradient(f, *varargs, **kwargs):
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
- if len(set(axes)) != len(axes):
+ len_axes = len(axes)
+ if len(set(axes)) != len_axes:
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
- dx = [1.0]*N
- elif n == 1:
- dx = [varargs[0]]*N
- elif n == len(axes):
+ dx = [1.0] * len_axes
+ elif n == len_axes or (n == 1 and np.isscalar(varargs[0])):
dx = list(varargs)
+ for i, distances in enumerate(dx):
+ if np.isscalar(distances):
+ continue
+ if len(distances) != f.shape[axes[i]]:
+ raise ValueError("distances must be either scalars or match "
+ "the length of the corresponding dimension")
+ diffx = np.diff(dx[i])
+ # if distances are constant reduce to the scalar case
+ # since it brings a consistent speedup
+ if (diffx == diffx[0]).all():
+ diffx = diffx[0]
+ dx[i] = diffx
+ if len(dx) == 1:
+ dx *= len_axes
else:
- raise SyntaxError(
- "invalid number of arguments")
- if any([not np.isscalar(dxi) for dxi in dx]):
- raise ValueError("distances must be scalars")
-
+ raise TypeError("invalid number of arguments")
+
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
@@ -1631,62 +1755,88 @@ def gradient(f, *varargs, **kwargs):
y = f
for i, axis in enumerate(axes):
-
- if y.shape[axis] < 2:
+ if y.shape[axis] < edge_order + 1:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
- "at least two elements are required.")
-
- # Numerical differentiation: 1st order edges, 2nd order interior
- if y.shape[axis] == 2 or edge_order == 1:
- # Use first order differences for time data
- out = np.empty_like(y, dtype=otype)
-
- slice1[axis] = slice(1, -1)
- slice2[axis] = slice(2, None)
- slice3[axis] = slice(None, -2)
- # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
- out[slice1] = (y[slice2] - y[slice3])/2.0
-
+ "at least (edge_order + 1) elements are required.")
+ # result allocation
+ out = np.empty_like(y, dtype=otype)
+
+ uniform_spacing = np.isscalar(dx[i])
+
+ # Numerical differentiation: 2nd order interior
+ slice1[axis] = slice(1, -1)
+ slice2[axis] = slice(None, -2)
+ slice3[axis] = slice(1, -1)
+ slice4[axis] = slice(2, None)
+
+ if uniform_spacing:
+ out[slice1] = (f[slice4] - f[slice2]) / (2. * dx[i])
+ else:
+ dx1 = dx[i][0:-1]
+ dx2 = dx[i][1:]
+ a = -(dx2)/(dx1 * (dx1 + dx2))
+ b = (dx2 - dx1) / (dx1 * dx2)
+ c = dx1 / (dx2 * (dx1 + dx2))
+ # fix the shape for broadcasting
+ shape = np.ones(N, dtype=int)
+ shape[axis] = -1
+ a.shape = b.shape = c.shape = shape
+ # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
+ out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4]
+
+ # Numerical differentiation: 1st order edges
+ if edge_order == 1:
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
- # 1D equivalent -- out[0] = (y[1] - y[0])
- out[slice1] = (y[slice2] - y[slice3])
-
+ dx_0 = dx[i] if uniform_spacing else dx[i][0]
+ # 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0])
+ out[slice1] = (y[slice2] - y[slice3]) / dx_0
+
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
- # 1D equivalent -- out[-1] = (y[-1] - y[-2])
- out[slice1] = (y[slice2] - y[slice3])
-
- # Numerical differentiation: 2st order edges, 2nd order interior
+ dx_n = dx[i] if uniform_spacing else dx[i][-1]
+ # 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
+ out[slice1] = (y[slice2] - y[slice3]) / dx_n
+
+ # Numerical differentiation: 2nd order edges
else:
- # Use second order differences where possible
- out = np.empty_like(y, dtype=otype)
-
- slice1[axis] = slice(1, -1)
- slice2[axis] = slice(2, None)
- slice3[axis] = slice(None, -2)
- # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
- out[slice1] = (y[slice2] - y[slice3])/2.0
-
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
- # 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
- out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
-
+ if uniform_spacing:
+ a = -1.5 / dx[i]
+ b = 2. / dx[i]
+ c = -0.5 / dx[i]
+ else:
+ dx1 = dx[i][0]
+ dx2 = dx[i][1]
+ a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2))
+ b = (dx1 + dx2) / (dx1 * dx2)
+ c = - dx1 / (dx2 * (dx1 + dx2))
+ # 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2]
+ out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
+
slice1[axis] = -1
- slice2[axis] = -1
+ slice2[axis] = -3
slice3[axis] = -2
- slice4[axis] = -3
- # 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
- out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
-
- # divide by step size
- out /= dx[i]
+ slice4[axis] = -1
+ if uniform_spacing:
+ a = 0.5 / dx[i]
+ b = -2. / dx[i]
+ c = 1.5 / dx[i]
+ else:
+ dx1 = dx[i][-2]
+ dx2 = dx[i][-1]
+ a = (dx2) / (dx1 * (dx1 + dx2))
+ b = - (dx2 + dx1) / (dx1 * dx2)
+ c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2))
+ # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
+ out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
+
outvals.append(out)
# reset the slice object in this dimension to ":"
@@ -1695,7 +1845,7 @@ def gradient(f, *varargs, **kwargs):
slice3[axis] = slice(None)
slice4[axis] = slice(None)
- if len(axes) == 1:
+ if len_axes == 1:
return outvals[0]
else:
return outvals
@@ -2746,9 +2896,9 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
- number of observations given (unbiased estimate). If `bias` is True, then
- normalization is by ``N``. These values can be overridden by using the
- keyword ``ddof`` in numpy versions >= 1.5.
+ number of observations given (unbiased estimate). If `bias` is True,
+ then normalization is by ``N``. These values can be overridden by using
+ the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
@@ -2912,7 +3062,8 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
- warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2)
+ warnings.warn("Degrees of freedom <= 0 for slice",
+ RuntimeWarning, stacklevel=2)
fact = 0.0
X -= avg[:, None]
@@ -4665,9 +4816,9 @@ def delete(arr, obj, axis=None):
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
- warnings.warn(
- "in the future insert will treat boolean arrays and array-likes "
- "as boolean index instead of casting it to integer", FutureWarning, stacklevel=2)
+ warnings.warn("in the future insert will treat boolean arrays and "
+ "array-likes as boolean index instead of casting it "
+ "to integer", FutureWarning, stacklevel=2)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index d914260ad..4fb0dba51 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -96,7 +96,7 @@ class TestRot90(TestCase):
for k in range(1,5):
assert_equal(rot90(a, k=k, axes=(2, 0)),
- rot90(a_rot90_20, k=k-1, axes=(2, 0)))
+ rot90(a_rot90_20, k=k-1, axes=(2, 0)))
class TestFlip(TestCase):
@@ -168,7 +168,8 @@ class TestFlip(TestCase):
def test_4d(self):
a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5)
for i in range(a.ndim):
- assert_equal(np.flip(a, i), np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
+ assert_equal(np.flip(a, i),
+ np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
class TestAny(TestCase):
@@ -219,7 +220,7 @@ class TestCopy(TestCase):
def test_order(self):
# It turns out that people rely on np.copy() preserving order by
# default; changing this broke scikit-learn:
- # https://github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783
+ # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa
a = np.array([[1, 2], [3, 4]])
assert_(a.flags.c_contiguous)
assert_(not a.flags.f_contiguous)
@@ -555,7 +556,8 @@ class TestCumsum(TestCase):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
- np.uint32, np.float32, np.float64, np.complex64, np.complex128]:
+ np.uint32, np.float32, np.float64, np.complex64,
+ np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
@@ -726,14 +728,51 @@ class TestGradient(TestCase):
assert_array_equal(gradient(x), dx)
assert_array_equal(gradient(v), dx)
+ def test_args(self):
+ dx = np.cumsum(np.ones(5))
+ dx_uneven = [1., 2., 5., 9., 11.]
+ f_2d = np.arange(25).reshape(5, 5)
+
+ # distances must be scalars or have size equal to gradient[axis]
+ gradient(np.arange(5), 3.)
+ gradient(np.arange(5), dx)
+ gradient(f_2d, 1.5) # dy is set equal to dx because scalar
+
+ gradient(f_2d, dx_uneven, dx_uneven)
+ # mix between even and uneven spaces and
+ # mix between scalar and vector
+ gradient(f_2d, dx, 2)
+
+ # 2D but axis specified
+ gradient(f_2d, dx, axis=1)
+
def test_badargs(self):
- # for 2D array, gradient can take 0, 1, or 2 extra args
- x = np.array([[1, 1], [3, 4]])
- assert_raises(SyntaxError, gradient, x, np.array([1., 1.]),
- np.array([1., 1.]), np.array([1., 1.]))
+ f_2d = np.arange(25).reshape(5, 5)
+ x = np.cumsum(np.ones(5))
+
+ # wrong sizes
+ assert_raises(ValueError, gradient, f_2d, x, np.ones(2))
+ assert_raises(ValueError, gradient, f_2d, 1, np.ones(2))
+ assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2))
+ # wrong number of arguments
+ assert_raises(TypeError, gradient, f_2d, x)
+ assert_raises(TypeError, gradient, f_2d, x, axis=(0,1))
+ assert_raises(TypeError, gradient, f_2d, x, x, x)
+ assert_raises(TypeError, gradient, f_2d, 1, 1, 1)
+ assert_raises(TypeError, gradient, f_2d, x, x, axis=1)
+ assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1)
- # disallow arrays as distances, see gh-6847
- assert_raises(ValueError, gradient, np.arange(5), np.ones(5))
+ def test_datetime64(self):
+ # Make sure gradient() can handle special types like datetime64
+ x = np.array(
+ ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',
+ '1910-10-12', '1910-12-12', '1912-12-12'],
+ dtype='datetime64[D]')
+ dx = np.array(
+ [-5, -3, 0, 31, 61, 396, 731],
+ dtype='timedelta64[D]')
+ assert_array_equal(gradient(x), dx)
+ assert_(dx.dtype == np.dtype('timedelta64[D]'))
def test_masked(self):
# Make sure that gradient supports subclasses like masked arrays
@@ -750,29 +789,6 @@ class TestGradient(TestCase):
np.gradient(x2, edge_order=2)
assert_array_equal(x2.mask, [False, False, True, False, False])
- def test_datetime64(self):
- # Make sure gradient() can handle special types like datetime64
- x = np.array(
- ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',
- '1910-10-12', '1910-12-12', '1912-12-12'],
- dtype='datetime64[D]')
- dx = np.array(
- [-5, -3, 0, 31, 61, 396, 731],
- dtype='timedelta64[D]')
- assert_array_equal(gradient(x), dx)
- assert_(dx.dtype == np.dtype('timedelta64[D]'))
-
- def test_timedelta64(self):
- # Make sure gradient() can handle special types like timedelta64
- x = np.array(
- [-5, -3, 10, 12, 61, 321, 300],
- dtype='timedelta64[D]')
- dx = np.array(
- [2, 7, 7, 25, 154, 119, -21],
- dtype='timedelta64[D]')
- assert_array_equal(gradient(x), dx)
- assert_(dx.dtype == np.dtype('timedelta64[D]'))
-
def test_second_order_accurate(self):
# Testing that the relative numerical error is less that 3% for
# this example problem. This corresponds to second order
@@ -785,6 +801,78 @@ class TestGradient(TestCase):
num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1)
assert_(np.all(num_error < 0.03) == True)
+ # test with unevenly spaced
+ np.random.seed(0)
+ x = np.sort(np.random.random(10))
+ y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
+ analytical = 6 * x ** 2 + 8 * x + 2
+ num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1)
+ assert_(np.all(num_error < 0.03) == True)
+
+ def test_spacing(self):
+ f = np.array([0, 2., 3., 4., 5., 5.])
+ f = np.tile(f, (6,1)) + f.reshape(-1, 1)
+ x_uneven = np.array([0., 0.5, 1., 3., 5., 7.])
+ x_even = np.arange(6.)
+
+ fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1))
+ fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1))
+ fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1))
+ fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1))
+
+ # evenly spaced
+ for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]:
+ res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order)
+ res2 = gradient(f, x_even, x_even,
+ axis=(0,1), edge_order=edge_order)
+ res3 = gradient(f, x_even, x_even,
+ axis=None, edge_order=edge_order)
+ assert_array_equal(res1, res2)
+ assert_array_equal(res2, res3)
+ assert_almost_equal(res1[0], exp_res.T)
+ assert_almost_equal(res1[1], exp_res)
+
+ res1 = gradient(f, 1., axis=0, edge_order=edge_order)
+ res2 = gradient(f, x_even, axis=0, edge_order=edge_order)
+ assert_(res1.shape == res2.shape)
+ assert_almost_equal(res2, exp_res.T)
+
+ res1 = gradient(f, 1., axis=1, edge_order=edge_order)
+ res2 = gradient(f, x_even, axis=1, edge_order=edge_order)
+ assert_(res1.shape == res2.shape)
+ assert_array_equal(res2, exp_res)
+
+ # unevenly spaced
+ for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]:
+ res1 = gradient(f, x_uneven, x_uneven,
+ axis=(0,1), edge_order=edge_order)
+ res2 = gradient(f, x_uneven, x_uneven,
+ axis=None, edge_order=edge_order)
+ assert_array_equal(res1, res2)
+ assert_almost_equal(res1[0], exp_res.T)
+ assert_almost_equal(res1[1], exp_res)
+
+ res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order)
+ assert_almost_equal(res1, exp_res.T)
+
+ res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order)
+ assert_almost_equal(res1, exp_res)
+
+ # mixed
+ res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1)
+ res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1)
+ assert_array_equal(res1[0], res2[1])
+ assert_array_equal(res1[1], res2[0])
+ assert_almost_equal(res1[0], fdx_even_ord1.T)
+ assert_almost_equal(res1[1], fdx_uneven_ord1)
+
+ res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2)
+ res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2)
+ assert_array_equal(res1[0], res2[1])
+ assert_array_equal(res1[1], res2[0])
+ assert_almost_equal(res1[0], fdx_even_ord2.T)
+ assert_almost_equal(res1[1], fdx_uneven_ord2)
+
def test_specific_axes(self):
# Testing that gradient can work on a given axis only
v = [[1, 1], [3, 4]]
@@ -802,13 +890,37 @@ class TestGradient(TestCase):
assert_almost_equal(gradient(x, axis=None), gradient(x))
# test vararg order
- assert_array_equal(gradient(x, 2, 3, axis=(1, 0)), [dx[1]/2.0, dx[0]/3.0])
+ assert_array_equal(gradient(x, 2, 3, axis=(1, 0)),
+ [dx[1]/2.0, dx[0]/3.0])
# test maximal number of varargs
- assert_raises(SyntaxError, gradient, x, 1, 2, axis=1)
+ assert_raises(TypeError, gradient, x, 1, 2, axis=1)
assert_raises(ValueError, gradient, x, axis=3)
assert_raises(ValueError, gradient, x, axis=-3)
assert_raises(TypeError, gradient, x, axis=[1,])
+
+ def test_timedelta64(self):
+ # Make sure gradient() can handle special types like timedelta64
+ x = np.array(
+ [-5, -3, 10, 12, 61, 321, 300],
+ dtype='timedelta64[D]')
+ dx = np.array(
+ [2, 7, 7, 25, 154, 119, -21],
+ dtype='timedelta64[D]')
+ assert_array_equal(gradient(x), dx)
+ assert_(dx.dtype == np.dtype('timedelta64[D]'))
+
+ def test_values(self):
+ # needs at least 2 points for edge_order ==1
+ gradient(np.arange(2), edge_order=1)
+ # needs at least 3 points for edge_order ==1
+ gradient(np.arange(3), edge_order=2)
+
+ assert_raises(ValueError, gradient, np.arange(0), edge_order=1)
+ assert_raises(ValueError, gradient, np.arange(0), edge_order=2)
+ assert_raises(ValueError, gradient, np.arange(1), edge_order=1)
+ assert_raises(ValueError, gradient, np.arange(1), edge_order=2)
+ assert_raises(ValueError, gradient, np.arange(2), edge_order=2)
class TestAngle(TestCase):
@@ -1753,20 +1865,28 @@ class TestHistogramOptimBinNums(TestCase):
completely ignored. All test values have been precomputed and
the shouldn't change.
"""
- # some basic sanity checking, with some fixed data. Checking for the correct number of bins
- basic_test = {50: {'fd': 8, 'scott': 8, 'rice': 15, 'sturges': 14, 'auto': 14},
- 500: {'fd': 15, 'scott': 16, 'rice': 32, 'sturges': 20, 'auto': 20},
- 5000: {'fd': 33, 'scott': 33, 'rice': 69, 'sturges': 27, 'auto': 33}}
+ # some basic sanity checking, with some fixed data.
+ # Checking for the correct number of bins
+ basic_test = {
+ 50: {'fd': 8, 'scott': 8, 'rice': 15,
+ 'sturges': 14, 'auto': 14},
+ 500: {'fd': 15, 'scott': 16, 'rice': 32,
+ 'sturges': 20, 'auto': 20},
+ 5000: {'fd': 33, 'scott': 33, 'rice': 69,
+ 'sturges': 27, 'auto': 33}
+ }
for testlen, expectedResults in basic_test.items():
- # create some sort of non uniform data to test with (3 peak uniform mixture)
+ # create some sort of non uniform data to test with
+ # (3 peak uniform mixture)
x1 = np.linspace(-10, -1, testlen // 5 * 2)
x2 = np.linspace(1, 10, testlen // 5 * 3)
x3 = np.linspace(-100, -50, testlen)
x = np.hstack((x1, x2, x3))
for estimator, numbins in expectedResults.items():
a, b = np.histogram(x, estimator, range = (-20, 20))
- msg = "For the {0} estimator with datasize of {1}".format(estimator, testlen)
+ msg = "For the {0} estimator".format(estimator)
+ msg += " with datasize of {0}".format(testlen)
assert_equal(len(a), numbins, err_msg=msg)
def test_simple_weighted(self):
@@ -1775,7 +1895,8 @@ class TestHistogramOptimBinNums(TestCase):
"""
estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
for estimator in estimator_list:
- assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3])
+ assert_raises(TypeError, histogram, [1, 2, 3],
+ estimator, weights=[1, 2, 3])
class TestHistogramdd(TestCase):