From 370b6506f128460371484a50c813d66e64582f44 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Wed, 8 Feb 2017 22:05:11 +0000 Subject: MAINT: Use normalize_axis_index in all python axis checking As a result, some exceptions change from ValueError to IndexError This also changes the exception types raised in places where normalize_axis_index is not quite appropriate --- numpy/lib/function_base.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index ae1420b72..4d1ffbccc 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -25,7 +25,7 @@ from numpy.core.numerictypes import typecodes, number from numpy.lib.twodim_base import diag from .utils import deprecate from numpy.core.multiarray import ( - _insert, add_docstring, digitize, bincount, + _insert, add_docstring, digitize, bincount, normalize_axis_index, interp as compiled_interp, interp_complex as compiled_interp_complex ) from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc @@ -4828,14 +4828,7 @@ def insert(arr, obj, values, axis=None): arr = arr.ravel() ndim = arr.ndim axis = ndim - 1 - else: - if ndim > 0 and (axis < -ndim or axis >= ndim): - raise IndexError( - "axis %i is out of bounds for an array of " - "dimension %i" % (axis, ndim)) - if (axis < 0): - axis += ndim - if (ndim == 0): + elif ndim == 0: # 2013-09-24, 1.9 warnings.warn( "in the future the special handling of scalars will be removed " @@ -4846,6 +4839,8 @@ def insert(arr, obj, values, axis=None): return wrap(arr) else: return arr + else: + axis = normalize_axis_index(axis, ndim) slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) -- cgit v1.2.1 From 9520de90837d0afaac3d1612047f4b952563b3d5 Mon Sep 17 00:00:00 2001 From: Alessandro Pietro Bardelli Date: Wed, 4 Jan 2017 00:58:44 +0100 Subject: ENH: gradient support for unevenly spaced data This somehow reverts #7618 and solves #6847, #7548 by implementing support for unevenly spaced data. Now the behaviour is similar to that of Matlab/Octave function. As argument it can take: 1. A single scalar to specify a sample distance for all dimensions. 2. N scalars to specify a constant sample distance for each dimension. i.e. `dx`, `dy`, `dz`, ... 3. N arrays to specify the coordinates of the values along each dimension of F. The length of the array must match the size of the corresponding dimension 4. Any combination of N scalars/arrays with the meaning of 2. and 3. --- numpy/lib/function_base.py | 319 +++++++++++++++++++++++++++++++++------------ 1 file changed, 235 insertions(+), 84 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 4d1ffbccc..fc49a6fd7 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -128,7 +128,8 @@ def rot90(m, k=1, axes=(0,1)): return flip(flip(m, axes[0]), axes[1]) axes_list = arange(0, m.ndim) - axes_list[axes[0]], axes_list[axes[1]] = axes_list[axes[1]], axes_list[axes[0]] + (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], + axes_list[axes[0]]) if k == 1: return transpose(flip(m,axes[1]), axes_list) @@ -332,7 +333,7 @@ def _hist_bin_doane(x): Improved version of Sturges' formula which works better for non-normal data. See - http://stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning + stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning Parameters ---------- @@ -638,7 +639,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, >>> rng = np.random.RandomState(10) # deterministic random data >>> a = np.hstack((rng.normal(size=1000), ... rng.normal(loc=5, scale=2, size=1000))) - >>> plt.hist(a, bins='auto') # plt.hist passes its arguments to np.histogram + >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram >>> plt.title("Histogram with 'auto' bins") >>> plt.show() @@ -764,15 +765,19 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, decrement = tmp_a_data < bin_edges[indices] indices[decrement] -= 1 # The last bin includes the right edge. The other bins do not. - increment = (tmp_a_data >= bin_edges[indices + 1]) & (indices != bins - 1) + increment = ((tmp_a_data >= bin_edges[indices + 1]) + & (indices != bins - 1)) indices[increment] += 1 # We now compute the histogram using bincount if ntype.kind == 'c': - n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins) - n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins) + n.real += np.bincount(indices, weights=tmp_w.real, + minlength=bins) + n.imag += np.bincount(indices, weights=tmp_w.imag, + minlength=bins) else: - n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype) + n += np.bincount(indices, weights=tmp_w, + minlength=bins).astype(ntype) # Rename the bin edges for return. bins = bin_edges @@ -1499,19 +1504,29 @@ def gradient(f, *varargs, **kwargs): Return the gradient of an N-dimensional array. The gradient is computed using second order accurate central differences - in the interior and either first differences or second order accurate - one-sides (forward or backwards) differences at the boundaries. The - returned gradient hence has the same shape as the input array. + in the interior points and either first or second order accurate one-sides + (forward or backwards) differences at the boundaries. + The returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like An N-dimensional array containing samples of a scalar function. - varargs : scalar or list of scalar, optional - N scalars specifying the sample distances for each dimension, - i.e. `dx`, `dy`, `dz`, ... Default distance: 1. - single scalar specifies sample distance for all dimensions. - if `axis` is given, the number of varargs must equal the number of axes. + varargs : list of scalar or array, optional + Spacing between f values. Default unitary spacing for all dimensions. + Spacing can be specified using: + + 1. single scalar to specify a sample distance for all dimensions. + 2. N scalars to specify a constant sample distance for each dimension. + i.e. `dx`, `dy`, `dz`, ... + 3. N arrays to specify the coordinates of the values along each + dimension of F. The length of the array must match the size of + the corresponding dimension + 4. Any combination of N scalars/arrays with the meaning of 2. and 3. + + If `axis` is given, the number of varargs must equal the number of axes. + Default: 1. + edge_order : {1, 2}, optional Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. @@ -1520,8 +1535,9 @@ def gradient(f, *varargs, **kwargs): axis : None or int or tuple of ints, optional Gradient is calculated only along the given axis or axes - The default (axis = None) is to calculate the gradient for all the axes of the input array. - axis may be negative, in which case it counts from the last to the first axis. + The default (axis = None) is to calculate the gradient for all the axes + of the input array. axis may be negative, in which case it counts from + the last to the first axis. .. versionadded:: 1.11.0 @@ -1529,17 +1545,31 @@ def gradient(f, *varargs, **kwargs): ------- gradient : ndarray or list of ndarray A set of ndarrays (or a single ndarray if there is only one dimension) - correposnding to the derivatives of f with respect to each dimension. + corresponding to the derivatives of f with respect to each dimension. Each derivative has the same shape as f. - + Examples -------- - >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) - >>> np.gradient(x) + >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) + >>> np.gradient(f) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) - >>> np.gradient(x, 2) + >>> np.gradient(f, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + + Spacing can be also specified with an array that represents the coordinates + of the values F along the dimensions. + For instance a uniform spacing: + + >>> x = np.arange(f.size) + >>> np.gradient(f, x) + array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + Or a non uniform one: + + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float) + >>> np.gradient(f, x) + array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) + For two dimensional arrays, the return will be two arrays ordered by axis. In this example the first array stands for the gradient in rows and the second one in columns direction: @@ -1549,15 +1579,99 @@ def gradient(f, *varargs, **kwargs): [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] + In this example the spacing is also specified: + uniform for axis=0 and non uniform for axis=1 + + >>> dx = 2. + >>> y = [1., 1.5, 3.5] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), dx, y) + [array([[ 1. , 1. , -0.5], + [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ], + [ 2. , 1.7, 0.5]])] + + It is possible to specify how boundaries are treated using `edge_order` + >>> x = np.array([0, 1, 2, 3, 4]) - >>> y = x**2 - >>> np.gradient(y, edge_order=2) + >>> f = x**2 + >>> np.gradient(f, edge_order=1) + array([ 1., 2., 4., 6., 7.]) + >>> np.gradient(f, edge_order=2) array([-0., 2., 4., 6., 8.]) - The axis keyword can be used to specify a subset of axes of which the gradient is calculated + The `axis` keyword can be used to specify a subset of axes of which the + gradient is calculated + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) + + Notes + ----- + Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continous + derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the + spacing the finite difference coefficients are computed by minimising + the consistency error :math:`\\eta_{i}`: + + .. math:: + + \\eta_{i} = f_{i}^{\\left(1\\right)} - + \\left[ \\alpha f\\left(x_{i}\\right) + + \\beta f\\left(x_{i} + h_{d}\\right) + + \\gamma f\\left(x_{i}-h_{s}\\right) + \\right] + + By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` + with their Taylor series expansion, this translates into solving + the following the linear system: + + .. math:: + + \\left\\{ + \\begin{array}{r} + \\alpha+\\beta+\\gamma=0 \\\\ + -\\beta h_{d}+\\gamma h_{s}=1 \\\\ + \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 + \\end{array} + \\right. + + The resulting approximation of :math:`f_{i}^{(1)}` is the following: + + .. math:: + + \\hat f_{i}^{(1)} = + \\frac{ + h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) + - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} + { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + + \mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + + h_{s}h_{d}^{2}}{h_{d} + + h_{s}}\\right) + + It is worth noting that if :math:`h_{s}=h_{d}` + (i.e., data are evenly spaced) + we find the standard second order approximation: + + .. math:: + + \\hat f_{i}^{(1)}= + \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + + \mathcal{O}\\left(h^{2}\\right) + + With a similar procedure the forward/backward approximations used for + boundaries can be derived. + + References + ---------- + .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics + (Texts in Applied Mathematics). New York: Springer. + .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations + in Geophysical Fluid Dynamics. New York: Springer. + .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids, + Mathematics of Computation 51, no. 184 : 699-706. + `PDF `_. """ f = np.asanyarray(f) N = len(f.shape) # number of dimensions @@ -1576,22 +1690,32 @@ def gradient(f, *varargs, **kwargs): if max(axes) >= N or min(axes) < 0: raise ValueError("'axis' entry is out of bounds") - if len(set(axes)) != len(axes): + len_axes = len(axes) + if len(set(axes)) != len_axes: raise ValueError("duplicate value in 'axis'") n = len(varargs) if n == 0: - dx = [1.0]*N - elif n == 1: - dx = [varargs[0]]*N - elif n == len(axes): + dx = [1.0] * len_axes + elif n == len_axes or (n == 1 and np.isscalar(varargs[0])): dx = list(varargs) + for i, distances in enumerate(dx): + if np.isscalar(distances): + continue + if len(distances) != f.shape[axes[i]]: + raise ValueError("distances must be either scalars or match " + "the length of the corresponding dimension") + diffx = np.diff(dx[i]) + # if distances are constant reduce to the scalar case + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): + diffx = diffx[0] + dx[i] = diffx + if len(dx) == 1: + dx *= len_axes else: - raise SyntaxError( - "invalid number of arguments") - if any([not np.isscalar(dxi) for dxi in dx]): - raise ValueError("distances must be scalars") - + raise TypeError("invalid number of arguments") + edge_order = kwargs.pop('edge_order', 1) if kwargs: raise TypeError('"{}" are not valid keyword arguments.'.format( @@ -1631,62 +1755,88 @@ def gradient(f, *varargs, **kwargs): y = f for i, axis in enumerate(axes): - - if y.shape[axis] < 2: + if y.shape[axis] < edge_order + 1: raise ValueError( "Shape of array too small to calculate a numerical gradient, " - "at least two elements are required.") - - # Numerical differentiation: 1st order edges, 2nd order interior - if y.shape[axis] == 2 or edge_order == 1: - # Use first order differences for time data - out = np.empty_like(y, dtype=otype) - - slice1[axis] = slice(1, -1) - slice2[axis] = slice(2, None) - slice3[axis] = slice(None, -2) - # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 - out[slice1] = (y[slice2] - y[slice3])/2.0 - + "at least (edge_order + 1) elements are required.") + # result allocation + out = np.empty_like(y, dtype=otype) + + uniform_spacing = np.isscalar(dx[i]) + + # Numerical differentiation: 2nd order interior + slice1[axis] = slice(1, -1) + slice2[axis] = slice(None, -2) + slice3[axis] = slice(1, -1) + slice4[axis] = slice(2, None) + + if uniform_spacing: + out[slice1] = (f[slice4] - f[slice2]) / (2. * dx[i]) + else: + dx1 = dx[i][0:-1] + dx2 = dx[i][1:] + a = -(dx2)/(dx1 * (dx1 + dx2)) + b = (dx2 - dx1) / (dx1 * dx2) + c = dx1 / (dx2 * (dx1 + dx2)) + # fix the shape for broadcasting + shape = np.ones(N, dtype=int) + shape[axis] = -1 + a.shape = b.shape = c.shape = shape + # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] + out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] + + # Numerical differentiation: 1st order edges + if edge_order == 1: slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 - # 1D equivalent -- out[0] = (y[1] - y[0]) - out[slice1] = (y[slice2] - y[slice3]) - + dx_0 = dx[i] if uniform_spacing else dx[i][0] + # 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0]) + out[slice1] = (y[slice2] - y[slice3]) / dx_0 + slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 - # 1D equivalent -- out[-1] = (y[-1] - y[-2]) - out[slice1] = (y[slice2] - y[slice3]) - - # Numerical differentiation: 2st order edges, 2nd order interior + dx_n = dx[i] if uniform_spacing else dx[i][-1] + # 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2]) + out[slice1] = (y[slice2] - y[slice3]) / dx_n + + # Numerical differentiation: 2nd order edges else: - # Use second order differences where possible - out = np.empty_like(y, dtype=otype) - - slice1[axis] = slice(1, -1) - slice2[axis] = slice(2, None) - slice3[axis] = slice(None, -2) - # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 - out[slice1] = (y[slice2] - y[slice3])/2.0 - slice1[axis] = 0 slice2[axis] = 0 slice3[axis] = 1 slice4[axis] = 2 - # 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0 - out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 - + if uniform_spacing: + a = -1.5 / dx[i] + b = 2. / dx[i] + c = -0.5 / dx[i] + else: + dx1 = dx[i][0] + dx2 = dx[i][1] + a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) + b = (dx1 + dx2) / (dx1 * dx2) + c = - dx1 / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2] + out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] + slice1[axis] = -1 - slice2[axis] = -1 + slice2[axis] = -3 slice3[axis] = -2 - slice4[axis] = -3 - # 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3]) - out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 - - # divide by step size - out /= dx[i] + slice4[axis] = -1 + if uniform_spacing: + a = 0.5 / dx[i] + b = -2. / dx[i] + c = 1.5 / dx[i] + else: + dx1 = dx[i][-2] + dx2 = dx[i][-1] + a = (dx2) / (dx1 * (dx1 + dx2)) + b = - (dx2 + dx1) / (dx1 * dx2) + c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] + out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] + outvals.append(out) # reset the slice object in this dimension to ":" @@ -1695,7 +1845,7 @@ def gradient(f, *varargs, **kwargs): slice3[axis] = slice(None) slice4[axis] = slice(None) - if len(axes) == 1: + if len_axes == 1: return outvals[0] else: return outvals @@ -2746,9 +2896,9 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, contain observations. bias : bool, optional Default normalization (False) is by ``(N - 1)``, where ``N`` is the - number of observations given (unbiased estimate). If `bias` is True, then - normalization is by ``N``. These values can be overridden by using the - keyword ``ddof`` in numpy versions >= 1.5. + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. These values can be overridden by using + the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional If not ``None`` the default value implied by `bias` is overridden. Note that ``ddof=1`` will return the unbiased estimate, even if both @@ -2912,7 +3062,8 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, fact = w_sum - ddof*sum(w*aweights)/w_sum if fact <= 0: - warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2) + warnings.warn("Degrees of freedom <= 0 for slice", + RuntimeWarning, stacklevel=2) fact = 0.0 X -= avg[:, None] @@ -4665,9 +4816,9 @@ def delete(arr, obj, axis=None): # After removing the special handling of booleans and out of # bounds values, the conversion to the array can be removed. if obj.dtype == bool: - warnings.warn( - "in the future insert will treat boolean arrays and array-likes " - "as boolean index instead of casting it to integer", FutureWarning, stacklevel=2) + warnings.warn("in the future insert will treat boolean arrays and " + "array-likes as boolean index instead of casting it " + "to integer", FutureWarning, stacklevel=2) obj = obj.astype(intp) if isinstance(_obj, (int, long, integer)): # optimization for a single value -- cgit v1.2.1 From 48783e5ceb7f60c33db81ab72e5024f42b220990 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Fri, 24 Feb 2017 16:46:58 +0000 Subject: MAINT: replace len(x.shape) with x.ndim --- numpy/lib/function_base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index fc49a6fd7..1cf57d617 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1674,7 +1674,7 @@ def gradient(f, *varargs, **kwargs): S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_. """ f = np.asanyarray(f) - N = len(f.shape) # number of dimensions + N = f.ndim # number of dimensions axes = kwargs.pop('axis', None) if axes is None: @@ -1900,7 +1900,7 @@ def diff(a, n=1, axis=-1): raise ValueError( "order must be non-negative but got " + repr(n)) a = asanyarray(a) - nd = len(a.shape) + nd = a.ndim slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) @@ -2144,7 +2144,7 @@ def unwrap(p, discont=pi, axis=-1): """ p = asarray(p) - nd = len(p.shape) + nd = p.ndim dd = diff(p, axis=axis) slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) @@ -4488,7 +4488,7 @@ def trapz(y, x=None, dx=1.0, axis=-1): d = d.reshape(shape) else: d = diff(x, axis=axis) - nd = len(y.shape) + nd = y.ndim slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd slice1[axis] = slice(1, None) -- cgit v1.2.1 From d7d3e212ce1ff176c718485aed889c77a48ef88f Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 24 Feb 2017 13:58:48 -0700 Subject: MAINT: Fix use of Python 2.6 deprecated escape sequences. Closes #8687. --- numpy/lib/function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index fc49a6fd7..bee60fa20 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1644,7 +1644,7 @@ def gradient(f, *varargs, **kwargs): + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} - + \mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + h_{s}h_{d}^{2}}{h_{d} + h_{s}}\\right) @@ -1656,7 +1656,7 @@ def gradient(f, *varargs, **kwargs): \\hat f_{i}^{(1)}= \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} - + \mathcal{O}\\left(h^{2}\\right) + + \\mathcal{O}\\left(h^{2}\\right) With a similar procedure the forward/backward approximations used for boundaries can be derived. -- cgit v1.2.1 From 2cd13fec69551b1718763a03c180b85bf820f139 Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Fri, 24 Feb 2017 17:08:56 -0500 Subject: DOC: Added note to np.diff Also noted that type is preserved. [ci skip] --- numpy/lib/function_base.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index fc49a6fd7..a00ee8338 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1872,12 +1872,19 @@ def diff(a, n=1, axis=-1): ------- diff : ndarray The n-th differences. The shape of the output is the same as `a` - except along `axis` where the dimension is smaller by `n`. + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as that of the input. See Also -------- gradient, ediff1d, cumsum + Notes + ----- + For boolean arrays, the preservation of type means that the result + will contain `False` when consecutive elements are the same and + `True` when they differ. + Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) -- cgit v1.2.1 From 4f8e1a36d099c8ed901645daca3e99976835cd95 Mon Sep 17 00:00:00 2001 From: Michael Seifert Date: Sun, 26 Feb 2017 16:53:20 +0100 Subject: DOC: Fixed small mistakes in numpy.copy documentation. Including missing backticks around link, included a missing colon in example and removed excessive indentation before "doctest skip directive". [skip ci] --- numpy/lib/function_base.py | 136 ++++++++++++++++++++++----------------------- 1 file changed, 68 insertions(+), 68 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index d01a4a6cd..c54512c21 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1462,7 +1462,7 @@ def copy(a, order='K'): Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. (Note that this function and :meth:ndarray.copy are very + as possible. (Note that this function and :meth:`ndarray.copy` are very similar, but have different default values for their order= arguments.) @@ -1473,9 +1473,9 @@ def copy(a, order='K'): Notes ----- - This is equivalent to + This is equivalent to: - >>> np.array(a, copy=True) #doctest: +SKIP + >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- @@ -1515,18 +1515,18 @@ def gradient(f, *varargs, **kwargs): varargs : list of scalar or array, optional Spacing between f values. Default unitary spacing for all dimensions. Spacing can be specified using: - + 1. single scalar to specify a sample distance for all dimensions. 2. N scalars to specify a constant sample distance for each dimension. i.e. `dx`, `dy`, `dz`, ... - 3. N arrays to specify the coordinates of the values along each - dimension of F. The length of the array must match the size of + 3. N arrays to specify the coordinates of the values along each + dimension of F. The length of the array must match the size of the corresponding dimension 4. Any combination of N scalars/arrays with the meaning of 2. and 3. - + If `axis` is given, the number of varargs must equal the number of axes. Default: 1. - + edge_order : {1, 2}, optional Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. @@ -1535,8 +1535,8 @@ def gradient(f, *varargs, **kwargs): axis : None or int or tuple of ints, optional Gradient is calculated only along the given axis or axes - The default (axis = None) is to calculate the gradient for all the axes - of the input array. axis may be negative, in which case it counts from + The default (axis = None) is to calculate the gradient for all the axes + of the input array. axis may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.11.0 @@ -1547,7 +1547,7 @@ def gradient(f, *varargs, **kwargs): A set of ndarrays (or a single ndarray if there is only one dimension) corresponding to the derivatives of f with respect to each dimension. Each derivative has the same shape as f. - + Examples -------- >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) @@ -1555,7 +1555,7 @@ def gradient(f, *varargs, **kwargs): array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) - + Spacing can be also specified with an array that represents the coordinates of the values F along the dimensions. For instance a uniform spacing: @@ -1564,12 +1564,12 @@ def gradient(f, *varargs, **kwargs): >>> np.gradient(f, x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) - Or a non uniform one: - + Or a non uniform one: + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float) >>> np.gradient(f, x) array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) - + For two dimensional arrays, the return will be two arrays ordered by axis. In this example the first array stands for the gradient in rows and the second one in columns direction: @@ -1579,7 +1579,7 @@ def gradient(f, *varargs, **kwargs): [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] - In this example the spacing is also specified: + In this example the spacing is also specified: uniform for axis=0 and non uniform for axis=1 >>> dx = 2. @@ -1590,7 +1590,7 @@ def gradient(f, *varargs, **kwargs): [ 2. , 1.7, 0.5]])] It is possible to specify how boundaries are treated using `edge_order` - + >>> x = np.array([0, 1, 2, 3, 4]) >>> f = x**2 >>> np.gradient(f, edge_order=1) @@ -1598,35 +1598,35 @@ def gradient(f, *varargs, **kwargs): >>> np.gradient(f, edge_order=2) array([-0., 2., 4., 6., 8.]) - The `axis` keyword can be used to specify a subset of axes of which the + The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated - + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) Notes ----- - Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continous - derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the - spacing the finite difference coefficients are computed by minimising + Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continous + derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the + spacing the finite difference coefficients are computed by minimising the consistency error :math:`\\eta_{i}`: - - .. math:: - - \\eta_{i} = f_{i}^{\\left(1\\right)} - - \\left[ \\alpha f\\left(x_{i}\\right) + + + .. math:: + + \\eta_{i} = f_{i}^{\\left(1\\right)} - + \\left[ \\alpha f\\left(x_{i}\\right) + \\beta f\\left(x_{i} + h_{d}\\right) + \\gamma f\\left(x_{i}-h_{s}\\right) \\right] - - By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` - with their Taylor series expansion, this translates into solving + + By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` + with their Taylor series expansion, this translates into solving the following the linear system: - .. math:: + .. math:: - \\left\\{ + \\left\\{ \\begin{array}{r} \\alpha+\\beta+\\gamma=0 \\\\ -\\beta h_{d}+\\gamma h_{s}=1 \\\\ @@ -1636,40 +1636,40 @@ def gradient(f, *varargs, **kwargs): The resulting approximation of :math:`f_{i}^{(1)}` is the following: - .. math:: - - \\hat f_{i}^{(1)} = - \\frac{ - h_{s}^{2}f\\left(x_{i} + h_{d}\\right) - + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) + .. math:: + + \\hat f_{i}^{(1)} = + \\frac{ + h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} - + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} - + h_{s}h_{d}^{2}}{h_{d} + + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + + h_{s}h_{d}^{2}}{h_{d} + h_{s}}\\right) - It is worth noting that if :math:`h_{s}=h_{d}` - (i.e., data are evenly spaced) + It is worth noting that if :math:`h_{s}=h_{d}` + (i.e., data are evenly spaced) we find the standard second order approximation: - - .. math:: - + + .. math:: + \\hat f_{i}^{(1)}= - \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + \\mathcal{O}\\left(h^{2}\\right) - With a similar procedure the forward/backward approximations used for + With a similar procedure the forward/backward approximations used for boundaries can be derived. - + References ---------- - .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics + .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics (Texts in Applied Mathematics). New York: Springer. - .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations + .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations in Geophysical Fluid Dynamics. New York: Springer. - .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on - Arbitrarily Spaced Grids, - Mathematics of Computation 51, no. 184 : 699-706. + .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids, + Mathematics of Computation 51, no. 184 : 699-706. `PDF `_. """ @@ -1707,15 +1707,15 @@ def gradient(f, *varargs, **kwargs): "the length of the corresponding dimension") diffx = np.diff(dx[i]) # if distances are constant reduce to the scalar case - # since it brings a consistent speedup - if (diffx == diffx[0]).all(): + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): diffx = diffx[0] dx[i] = diffx if len(dx) == 1: dx *= len_axes else: raise TypeError("invalid number of arguments") - + edge_order = kwargs.pop('edge_order', 1) if kwargs: raise TypeError('"{}" are not valid keyword arguments.'.format( @@ -1762,14 +1762,14 @@ def gradient(f, *varargs, **kwargs): # result allocation out = np.empty_like(y, dtype=otype) - uniform_spacing = np.isscalar(dx[i]) + uniform_spacing = np.isscalar(dx[i]) # Numerical differentiation: 2nd order interior slice1[axis] = slice(1, -1) slice2[axis] = slice(None, -2) slice3[axis] = slice(1, -1) slice4[axis] = slice(2, None) - + if uniform_spacing: out[slice1] = (f[slice4] - f[slice2]) / (2. * dx[i]) else: @@ -1784,7 +1784,7 @@ def gradient(f, *varargs, **kwargs): a.shape = b.shape = c.shape = shape # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] - + # Numerical differentiation: 1st order edges if edge_order == 1: slice1[axis] = 0 @@ -1793,14 +1793,14 @@ def gradient(f, *varargs, **kwargs): dx_0 = dx[i] if uniform_spacing else dx[i][0] # 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0]) out[slice1] = (y[slice2] - y[slice3]) / dx_0 - + slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 dx_n = dx[i] if uniform_spacing else dx[i][-1] # 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2]) out[slice1] = (y[slice2] - y[slice3]) / dx_n - + # Numerical differentiation: 2nd order edges else: slice1[axis] = 0 @@ -1819,7 +1819,7 @@ def gradient(f, *varargs, **kwargs): c = - dx1 / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2] out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] - + slice1[axis] = -1 slice2[axis] = -3 slice3[axis] = -2 @@ -1835,8 +1835,8 @@ def gradient(f, *varargs, **kwargs): b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] - out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] - + out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] + outvals.append(out) # reset the slice object in this dimension to ":" @@ -2903,8 +2903,8 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, contain observations. bias : bool, optional Default normalization (False) is by ``(N - 1)``, where ``N`` is the - number of observations given (unbiased estimate). If `bias` is True, - then normalization is by ``N``. These values can be overridden by using + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional If not ``None`` the default value implied by `bias` is overridden. @@ -3069,7 +3069,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, fact = w_sum - ddof*sum(w*aweights)/w_sum if fact <= 0: - warnings.warn("Degrees of freedom <= 0 for slice", + warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2) fact = 0.0 @@ -4823,7 +4823,7 @@ def delete(arr, obj, axis=None): # After removing the special handling of booleans and out of # bounds values, the conversion to the array can be removed. if obj.dtype == bool: - warnings.warn("in the future insert will treat boolean arrays and " + warnings.warn("in the future insert will treat boolean arrays and " "array-likes as boolean index instead of casting it " "to integer", FutureWarning, stacklevel=2) obj = obj.astype(intp) -- cgit v1.2.1 From 1588ae39ffb51ea916f03510671aab711fdfb568 Mon Sep 17 00:00:00 2001 From: Duke Vijitbenjaronk Date: Tue, 7 Mar 2017 11:11:53 -0600 Subject: BUG: Fix np.average with object array weights Fixes #8696 --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index c54512c21..0903790bd 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1135,7 +1135,7 @@ def average(a, axis=None, weights=None, returned=False): wgt = wgt.swapaxes(-1, axis) scl = wgt.sum(axis=axis, dtype=result_dtype) - if (scl == 0.0).any(): + if np.any(scl == 0.0): raise ZeroDivisionError( "Weights sum to zero, can't be normalized") -- cgit v1.2.1 From 9f7a9e00eec6f0a795c859ff8328a6b82603e506 Mon Sep 17 00:00:00 2001 From: Egor Klenin Date: Mon, 20 Mar 2017 16:26:59 +0300 Subject: DOC: Include np. prefix in meshgrid examples Add "np." prefix to meshgrid calls for consistency --- numpy/lib/function_base.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 0903790bd..cc0e8feeb 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -4601,12 +4601,12 @@ def meshgrid(*xi, **kwargs): 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is illustrated by the following code snippet:: - xv, yv = meshgrid(x, y, sparse=False, indexing='ij') + xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') for i in range(nx): for j in range(ny): # treat xv[i,j], yv[i,j] - xv, yv = meshgrid(x, y, sparse=False, indexing='xy') + xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy') for i in range(nx): for j in range(ny): # treat xv[j,i], yv[j,i] @@ -4625,14 +4625,14 @@ def meshgrid(*xi, **kwargs): >>> nx, ny = (3, 2) >>> x = np.linspace(0, 1, nx) >>> y = np.linspace(0, 1, ny) - >>> xv, yv = meshgrid(x, y) + >>> xv, yv = np.meshgrid(x, y) >>> xv array([[ 0. , 0.5, 1. ], [ 0. , 0.5, 1. ]]) >>> yv array([[ 0., 0., 0.], [ 1., 1., 1.]]) - >>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays + >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays >>> xv array([[ 0. , 0.5, 1. ]]) >>> yv @@ -4643,7 +4643,7 @@ def meshgrid(*xi, **kwargs): >>> x = np.arange(-5, 5, 0.1) >>> y = np.arange(-5, 5, 0.1) - >>> xx, yy = meshgrid(x, y, sparse=True) + >>> xx, yy = np.meshgrid(x, y, sparse=True) >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) >>> h = plt.contourf(x,y,z) -- cgit v1.2.1 From efa1bd2c0de6cd9c07b410002f2e1c0bae8cf385 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 26 Mar 2017 10:38:28 +0100 Subject: MAINT: Reuse _validate_axis in np.gradient This also means that its axis argument invokes operator.index like others do. _validate_axis currently accepts lists of axes, which is a bug. --- numpy/lib/function_base.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index cc0e8feeb..2a8a13caa 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1679,21 +1679,10 @@ def gradient(f, *varargs, **kwargs): axes = kwargs.pop('axis', None) if axes is None: axes = tuple(range(N)) - # check axes to have correct type and no duplicate entries - if isinstance(axes, int): - axes = (axes,) - if not isinstance(axes, tuple): - raise TypeError("A tuple of integers or a single integer is required") - - # normalize axis values: - axes = tuple(x + N if x < 0 else x for x in axes) - if max(axes) >= N or min(axes) < 0: - raise ValueError("'axis' entry is out of bounds") + else: + axes = _nx._validate_axis(axes, N) len_axes = len(axes) - if len(set(axes)) != len_axes: - raise ValueError("duplicate value in 'axis'") - n = len(varargs) if n == 0: dx = [1.0] * len_axes -- cgit v1.2.1 From e3ed705e5d91b584e9191a20f3a4780d354271ff Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 26 Mar 2017 11:22:43 +0100 Subject: MAINT: Use _validate_axis inside _ureduce This fixes an omission where duplicate axes would only be detected when positive --- numpy/lib/function_base.py | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 2a8a13caa..5b3af311b 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -12,7 +12,7 @@ from numpy.core import linspace, atleast_1d, atleast_2d, transpose from numpy.core.numeric import ( ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, empty_like, ndarray, around, floor, ceil, take, dot, where, intp, - integer, isscalar, absolute + integer, isscalar, absolute, AxisError ) from numpy.core.umath import ( pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, @@ -3972,21 +3972,15 @@ def _ureduce(a, func, **kwargs): if axis is not None: keepdim = list(a.shape) nd = a.ndim - try: - axis = operator.index(axis) - if axis >= nd or axis < -nd: - raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim)) - keepdim[axis] = 1 - except TypeError: - sax = set() - for x in axis: - if x >= nd or x < -nd: - raise IndexError("axis %d out of bounds (%d)" % (x, nd)) - if x in sax: - raise ValueError("duplicate value in axis") - sax.add(x % nd) - keepdim[x] = 1 - keep = sax.symmetric_difference(frozenset(range(nd))) + axis = _nx._validate_axis(axis, nd) + + for ax in axis: + keepdim[ax] = 1 + + if len(axis) == 1: + kwargs['axis'] = axis[0] + else: + keep = set(range(nd)) - set(axis) nkeep = len(keep) # swap axis that should not be reduced to front for i, s in enumerate(sorted(keep)): @@ -4742,7 +4736,8 @@ def delete(arr, obj, axis=None): if ndim != 1: arr = arr.ravel() ndim = arr.ndim - axis = ndim - 1 + axis = -1 + if ndim == 0: # 2013-09-24, 1.9 warnings.warn( @@ -4753,6 +4748,8 @@ def delete(arr, obj, axis=None): else: return arr.copy(order=arrorder) + axis = normalize_axis_index(axis, ndim) + slobj = [slice(None)]*ndim N = arr.shape[axis] newshape = list(arr.shape) -- cgit v1.2.1 From 17466ad1839718c091c629bb647e881b7922a148 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 26 Mar 2017 11:30:23 +0100 Subject: MAINT: Rename _validate_axis, and document it --- numpy/lib/function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 5b3af311b..2e6de9ec7 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1680,7 +1680,7 @@ def gradient(f, *varargs, **kwargs): if axes is None: axes = tuple(range(N)) else: - axes = _nx._validate_axis(axes, N) + axes = _nx.normalize_axis_tuple(axes, N) len_axes = len(axes) n = len(varargs) @@ -3972,7 +3972,7 @@ def _ureduce(a, func, **kwargs): if axis is not None: keepdim = list(a.shape) nd = a.ndim - axis = _nx._validate_axis(axis, nd) + axis = _nx.normalize_axis_tuple(axis, nd) for ax in axis: keepdim[ax] = 1 -- cgit v1.2.1 From eb12b7106fb2962167627af21bc0bef24f6f650d Mon Sep 17 00:00:00 2001 From: Baurzhan Muftakhidinov Date: Thu, 6 Apr 2017 15:12:23 +0500 Subject: DOC: Fix typos in percentile (#8900) --- numpy/lib/function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 2e6de9ec7..4a07815e8 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -4209,8 +4209,8 @@ def percentile(a, q, axis=None, out=None, Notes ----- Given a vector ``V`` of length ``N``, the ``q``-th percentile of - ``V`` is the value ``q/100`` of the way from the mimumum to the - maximum in in a sorted copy of ``V``. The values and distances of + ``V`` is the value ``q/100`` of the way from the minimum to the + maximum in a sorted copy of ``V``. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of ``q`` exactly. This function is the same as -- cgit v1.2.1 From ae3d1fa3e0f4a048618cb4e7c8fd694185df69b6 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Tue, 25 Apr 2017 20:24:57 +0100 Subject: DOC: Explain the behavior of diff on unsigned types Fixes #2522 --- numpy/lib/function_base.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 4a07815e8..3c39d1a7b 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1874,6 +1874,23 @@ def diff(a, n=1, axis=-1): will contain `False` when consecutive elements are the same and `True` when they differ. + For unsigned integer arrays, the results will also be unsigned. This should + not be surprising, as the result is consistent with calculating the + difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.diff(u8_arr) + array([255], dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + array(255, np.uint8) + + If this is not desirable, then the array should be cast to a larger integer + type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.diff(i16_arr) + array([-1], dtype=int16) + Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) -- cgit v1.2.1 From ef5684564e3074daf614846f30bfdd7f15f5254f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Skytt=C3=A4?= Date: Tue, 9 May 2017 12:16:14 +0300 Subject: ENH: Spelling fixes --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 3c39d1a7b..8ee6a54dd 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1607,7 +1607,7 @@ def gradient(f, *varargs, **kwargs): Notes ----- - Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continous + Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the spacing the finite difference coefficients are computed by minimising the consistency error :math:`\\eta_{i}`: -- cgit v1.2.1 From ecab1ff2b77b06e0fcbbfafd26d80aef486f74b8 Mon Sep 17 00:00:00 2001 From: Joseph Paul Cohen Date: Fri, 2 Jun 2017 13:20:36 -0400 Subject: MAINT: Use np.concatenate instead of np.vstack (#8934) The np.vstack function is maintained for backward compatibility, it's use in new code is discouraged. * MAINT: Replace internal uses of vstack vstack is supported only for backward compatibility We should use concatenate or stack instead * MAINT: Remove 1d special casing in piecewise * STY: Fix missing blank line in ma.tests.test_extras.py --- numpy/lib/function_base.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 8ee6a54dd..1418baa77 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1321,16 +1321,8 @@ def piecewise(x, condlist, funclist, *args, **kw): x = x[None] zerod = True if n == n2 - 1: # compute the "otherwise" condition. - totlist = np.logical_or.reduce(condlist, axis=0) - # Only able to stack vertically if the array is 1d or less - if x.ndim <= 1: - condlist = np.vstack([condlist, ~totlist]) - else: - condlist = [asarray(c, dtype=bool) for c in condlist] - totlist = condlist[0] - for k in range(1, n): - totlist |= condlist[k] - condlist.append(~totlist) + condelse = ~np.any(condlist, axis=0, keepdims=True) + condlist = np.concatenate([condlist, condelse], axis=0) n += 1 y = zeros(x.shape, x.dtype) @@ -2982,7 +2974,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] - >>> X = np.vstack((x,y)) + >>> X = np.stack((x, y), axis=0) >>> print(np.cov(X)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] @@ -3020,7 +3012,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, y = array(y, copy=False, ndmin=2, dtype=dtype) if not rowvar and y.shape[0] != 1: y = y.T - X = np.vstack((X, y)) + X = np.concatenate((X, y), axis=0) if ddof is None: if bias == 0: -- cgit v1.2.1 From ad1e3c118f378fd62883655a9a983dd0c682f968 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sat, 3 Jun 2017 11:37:30 +0100 Subject: MAINT: Combine similar branches --- numpy/lib/function_base.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 8ee6a54dd..b22fe5112 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -4005,8 +4005,9 @@ def _ureduce(a, func, **kwargs): # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 + keepdim = tuple(keepdim) else: - keepdim = [1] * a.ndim + keepdim = (1,) * a.ndim r = func(a, **kwargs) return r, keepdim @@ -4268,10 +4269,7 @@ def percentile(a, q, axis=None, out=None, overwrite_input=overwrite_input, interpolation=interpolation) if keepdims: - if q.ndim == 0: - return r.reshape(k) - else: - return r.reshape([len(q)] + k) + return r.reshape(q.shape + k) else: return r -- cgit v1.2.1 From cbaa809820656bf39f4ec78a9bec1426239ce440 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sat, 3 Jun 2017 13:23:46 +0100 Subject: MAINT: Don't internally use the one-argument where nonzero is a clearer spelling --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 6dddbc11b..ac2cc0a6c 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -974,7 +974,7 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal)) # Shift these points one bin to the left. - Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1 + Ncount[i][nonzero(on_edge & not_smaller_than_edge)[0]] -= 1 # Flattened histogram matrix (1D) # Reshape is used so that overlarge arrays -- cgit v1.2.1 From 1608e53072b035bd40de7a202e75354f0e802120 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sat, 3 Jun 2017 13:41:26 +0100 Subject: BUG: KeyboardInterrupt is swallowed all over the place Bare except is very rarely the right thing --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 6dddbc11b..1e6223e7c 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -4531,7 +4531,7 @@ def add_newdoc(place, obj, doc): elif isinstance(doc, list): for val in doc: add_docstring(getattr(new, val[0]), val[1].strip()) - except: + except Exception: pass -- cgit v1.2.1 From 34075a54e7aa10e80d41ef33ec7102292ecff0d5 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Sat, 10 Jun 2017 15:56:14 +1200 Subject: DOC: BLD: fix lots of Sphinx warnings/errors. --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index a127c366b..4739b2176 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -627,7 +627,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) >>> hist.sum() 2.4999999999999996 - >>> np.sum(hist*np.diff(bin_edges)) + >>> np.sum(hist * np.diff(bin_edges)) 1.0 .. versionadded:: 1.11.0 -- cgit v1.2.1 From 0bf437fb0ae42fefdb0040692b993f17dedb2e3d Mon Sep 17 00:00:00 2001 From: Egor Panfilov Date: Sat, 17 Jun 2017 15:11:06 +0300 Subject: BUG: Switched to xor for bool arrays in diff, added corresponding tests --- numpy/lib/function_base.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 4739b2176..f37d79663 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1,7 +1,6 @@ from __future__ import division, absolute_import, print_function import collections -import operator import re import sys import warnings @@ -1912,10 +1911,16 @@ def diff(a, n=1, axis=-1): slice2[axis] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) + + if a.dtype == np.bool_: + da = a[slice1] ^ a[slice2] + else: + da = a[slice1] - a[slice2] + if n > 1: - return diff(a[slice1]-a[slice2], n-1, axis=axis) + return diff(da, n-1, axis=axis) else: - return a[slice1]-a[slice2] + return da def interp(x, xp, fp, left=None, right=None, period=None): @@ -2061,6 +2066,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): else: return interp_func(x, xp, fp, left, right).item() + def angle(z, deg=0): """ Return the angle of the complex argument. @@ -2083,8 +2089,6 @@ def angle(z, deg=0): arctan2 absolute - - Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians -- cgit v1.2.1 From 47242a25920e72366e906a7ffaa2fb41144623d3 Mon Sep 17 00:00:00 2001 From: Egor Panfilov Date: Sat, 17 Jun 2017 23:31:54 +0300 Subject: MAINT: Use neq instead of xor in diff --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index f37d79663..a3db3494c 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1913,7 +1913,7 @@ def diff(a, n=1, axis=-1): slice2 = tuple(slice2) if a.dtype == np.bool_: - da = a[slice1] ^ a[slice2] + da = a[slice1] != a[slice2] else: da = a[slice1] - a[slice2] -- cgit v1.2.1 From ea7fac99e65037699eefe97b714808f7c73bf147 Mon Sep 17 00:00:00 2001 From: Brandon Carter Date: Sat, 24 Jun 2017 23:13:38 -0400 Subject: BUG: fixes unsigned bins monotonicity check, see #9222 --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index a3db3494c..8bde521bf 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -782,7 +782,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, bins = bin_edges else: bins = asarray(bins) - if (np.diff(bins) < 0).any(): + if not np.all(bins[1:] > bins[:-1]): raise ValueError( 'bins must increase monotonically.') -- cgit v1.2.1 From e12f16df86301881ac72f6bcab8831e9f3e23a24 Mon Sep 17 00:00:00 2001 From: Brandon Carter Date: Sun, 25 Jun 2017 23:04:51 -0400 Subject: minor change to the logic --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 8bde521bf..48922721f 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -782,7 +782,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, bins = bin_edges else: bins = asarray(bins) - if not np.all(bins[1:] > bins[:-1]): + if np.any(bins[1:] <= bins[:-1]): raise ValueError( 'bins must increase monotonically.') -- cgit v1.2.1 From d86e3fee3c9906ce0fad36520de86d9ac306cee8 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 13 Jul 2017 12:40:05 +0100 Subject: BUG: Allow 0d arrays instead of scalars in gradient This fixes gh-8292 --- numpy/lib/function_base.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 8ee6a54dd..989030e03 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1674,6 +1674,7 @@ def gradient(f, *varargs, **kwargs): S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_. """ f = np.asanyarray(f) + varargs = [asanyarray(d) for d in varargs] N = f.ndim # number of dimensions axes = kwargs.pop('axis', None) @@ -1685,11 +1686,16 @@ def gradient(f, *varargs, **kwargs): len_axes = len(axes) n = len(varargs) if n == 0: - dx = [1.0] * len_axes - elif n == len_axes or (n == 1 and np.isscalar(varargs[0])): - dx = list(varargs) + # no spacing argument - use 1 in all axes + dx = [np.array(1.0)] * len_axes + elif n == 1 and varargs[0].ndim == 0: + # single scalar for all axes + dx = varargs * len_axes + elif n == len_axes: + # scalar or 1d array for each axis + dx = varargs[:] for i, distances in enumerate(dx): - if np.isscalar(distances): + if distances.ndim == 0: continue if len(distances) != f.shape[axes[i]]: raise ValueError("distances must be either scalars or match " @@ -1700,8 +1706,6 @@ def gradient(f, *varargs, **kwargs): if (diffx == diffx[0]).all(): diffx = diffx[0] dx[i] = diffx - if len(dx) == 1: - dx *= len_axes else: raise TypeError("invalid number of arguments") @@ -1751,7 +1755,7 @@ def gradient(f, *varargs, **kwargs): # result allocation out = np.empty_like(y, dtype=otype) - uniform_spacing = np.isscalar(dx[i]) + uniform_spacing = dx[i].ndim == 0 # Numerical differentiation: 2nd order interior slice1[axis] = slice(1, -1) -- cgit v1.2.1 From 2bfec2c0dd92958694e6d736530ae6333d7d62d7 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 13 Jul 2017 12:40:32 +0100 Subject: BUG: Only allow 1d distance arrays 2d arrays would work, but in unpredictable and undocumented ways. This at least makes gh-9401 give a better error message. --- numpy/lib/function_base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 989030e03..91e7dc616 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1697,8 +1697,10 @@ def gradient(f, *varargs, **kwargs): for i, distances in enumerate(dx): if distances.ndim == 0: continue + elif distances.ndim != 1: + raise ValueError("distances must be either scalars or 1d") if len(distances) != f.shape[axes[i]]: - raise ValueError("distances must be either scalars or match " + raise ValueError("when 1d, distances must match " "the length of the corresponding dimension") diffx = np.diff(dx[i]) # if distances are constant reduce to the scalar case -- cgit v1.2.1 From 24508c1220a608df116db9035df30418bfc74a9b Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 13 Jul 2017 13:08:46 +0100 Subject: BUG: Use np.ndim not asarray, to allow duck-types --- numpy/lib/function_base.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 91e7dc616..b616011d1 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1674,7 +1674,6 @@ def gradient(f, *varargs, **kwargs): S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_. """ f = np.asanyarray(f) - varargs = [asanyarray(d) for d in varargs] N = f.ndim # number of dimensions axes = kwargs.pop('axis', None) @@ -1687,17 +1686,17 @@ def gradient(f, *varargs, **kwargs): n = len(varargs) if n == 0: # no spacing argument - use 1 in all axes - dx = [np.array(1.0)] * len_axes - elif n == 1 and varargs[0].ndim == 0: + dx = [1.0] * len_axes + elif n == 1 and np.ndim(varargs[0]) == 0: # single scalar for all axes dx = varargs * len_axes elif n == len_axes: # scalar or 1d array for each axis - dx = varargs[:] + dx = list(varargs) for i, distances in enumerate(dx): - if distances.ndim == 0: + if np.ndim(distances) == 0: continue - elif distances.ndim != 1: + elif np.ndim(distances) != 1: raise ValueError("distances must be either scalars or 1d") if len(distances) != f.shape[axes[i]]: raise ValueError("when 1d, distances must match " @@ -1757,7 +1756,7 @@ def gradient(f, *varargs, **kwargs): # result allocation out = np.empty_like(y, dtype=otype) - uniform_spacing = dx[i].ndim == 0 + uniform_spacing = np.ndim(dx[i]) == 0 # Numerical differentiation: 2nd order interior slice1[axis] = slice(1, -1) -- cgit v1.2.1 From 1f4ba5bee1240cc7a888087dc06acb7709f12870 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 13 Jul 2017 13:10:59 +0100 Subject: MAINT: Use clearer variable --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index b616011d1..32c999dfc 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1701,7 +1701,7 @@ def gradient(f, *varargs, **kwargs): if len(distances) != f.shape[axes[i]]: raise ValueError("when 1d, distances must match " "the length of the corresponding dimension") - diffx = np.diff(dx[i]) + diffx = np.diff(distances) # if distances are constant reduce to the scalar case # since it brings a consistent speedup if (diffx == diffx[0]).all(): -- cgit v1.2.1 From 575a48e0da892d447a4d884f024df82c567e6f3f Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 13 Jul 2017 15:53:34 +0100 Subject: BUG: float16 is promoted to float64 by gradient This isn't the case for `diff` --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index a3db3494c..c291cf587 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1715,7 +1715,7 @@ def gradient(f, *varargs, **kwargs): slice4 = [slice(None)]*N otype = f.dtype.char - if otype not in ['f', 'd', 'F', 'D', 'm', 'M']: + if otype not in ['f', 'd', 'F', 'D', 'm', 'M', 'e']: otype = 'd' # Difference of datetime64 elements results in timedelta64 -- cgit v1.2.1 From e7c83f0115f33378af94f74dfbd8196726734ad5 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 13 Jul 2017 15:55:08 +0100 Subject: MAINT: Use dtypes, not typecodes, and remove special casing for timedelta --- numpy/lib/function_base.py | 48 ++++++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 27 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index c291cf587..dda3e021b 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1714,33 +1714,27 @@ def gradient(f, *varargs, **kwargs): slice3 = [slice(None)]*N slice4 = [slice(None)]*N - otype = f.dtype.char - if otype not in ['f', 'd', 'F', 'D', 'm', 'M', 'e']: - otype = 'd' - - # Difference of datetime64 elements results in timedelta64 - if otype == 'M': - # Need to use the full dtype name because it contains unit information - otype = f.dtype.name.replace('datetime', 'timedelta') - elif otype == 'm': - # Needs to keep the specific units, can't be a general unit - otype = f.dtype - - # Convert datetime64 data into ints. Make dummy variable `y` - # that is a view of ints if the data is datetime64, otherwise - # just set y equal to the array `f`. - if f.dtype.char in ["M", "m"]: - y = f.view('int64') + otype = f.dtype + if otype.type is np.datetime64: + # the timedelta dtype with the same unit information + otype = np.dtype(otype.name.replace('datetime', 'timedelta')) + # view as timedelta to allow addition + f = f.view(otype) + elif otype.type is np.timedelta64: + pass + elif np.issubdtype(otype, np.inexact): + pass else: - y = f + # all other types convert to floating point + otype = np.double for i, axis in enumerate(axes): - if y.shape[axis] < edge_order + 1: + if f.shape[axis] < edge_order + 1: raise ValueError( "Shape of array too small to calculate a numerical gradient, " "at least (edge_order + 1) elements are required.") # result allocation - out = np.empty_like(y, dtype=otype) + out = np.empty_like(f, dtype=otype) uniform_spacing = np.isscalar(dx[i]) @@ -1771,15 +1765,15 @@ def gradient(f, *varargs, **kwargs): slice2[axis] = 1 slice3[axis] = 0 dx_0 = dx[i] if uniform_spacing else dx[i][0] - # 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0]) - out[slice1] = (y[slice2] - y[slice3]) / dx_0 + # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) + out[slice1] = (f[slice2] - f[slice3]) / dx_0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 dx_n = dx[i] if uniform_spacing else dx[i][-1] - # 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2]) - out[slice1] = (y[slice2] - y[slice3]) / dx_n + # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) + out[slice1] = (f[slice2] - f[slice3]) / dx_n # Numerical differentiation: 2nd order edges else: @@ -1797,8 +1791,8 @@ def gradient(f, *varargs, **kwargs): a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) b = (dx1 + dx2) / (dx1 * dx2) c = - dx1 / (dx2 * (dx1 + dx2)) - # 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2] - out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] + # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] + out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] slice1[axis] = -1 slice2[axis] = -3 @@ -1815,7 +1809,7 @@ def gradient(f, *varargs, **kwargs): b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] - out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] + out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] outvals.append(out) -- cgit v1.2.1 From 01fd010c6fd29a38b8013ba87c2fb89e8c4c7706 Mon Sep 17 00:00:00 2001 From: Joseph Fox-Rabinovitz Date: Thu, 27 Jul 2017 17:26:11 -0400 Subject: MAINT: Changed diff to use iterative instead of recursive approach TST: Added tests for `n` parameter Added test for `datetime64` type change Added tests for axis normalization Added test for subtype handling DOC: Minor updates to docs: Added explanation for `n==0` Added documentation describing `datetime64` handling Updated formatting Added call to normalize_axis_index --- numpy/lib/function_base.py | 54 ++++++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 23 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index dda3e021b..f5e9ff2a5 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -15,7 +15,7 @@ from numpy.core.numeric import ( ) from numpy.core.umath import ( pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, - mod, exp, log10 + mod, exp, log10, not_equal, subtract ) from numpy.core.fromnumeric import ( ravel, nonzero, sort, partition, mean, any, sum @@ -1827,7 +1827,7 @@ def gradient(f, *varargs, **kwargs): def diff(a, n=1, axis=-1): """ - Calculate the n-th discrete difference along given axis. + Calculate the n-th discrete difference along the given axis. The first difference is given by ``out[n] = a[n+1] - a[n]`` along the given axis, higher differences are calculated by using `diff` @@ -1838,16 +1838,21 @@ def diff(a, n=1, axis=-1): a : array_like Input array n : int, optional - The number of times values are differenced. + The number of times values are differenced. If zero, the input + is returned as-is. axis : int, optional - The axis along which the difference is taken, default is the last axis. + The axis along which the difference is taken, default is the + last axis. Returns ------- diff : ndarray The n-th differences. The shape of the output is the same as `a` except along `axis` where the dimension is smaller by `n`. The - type of the output is the same as that of the input. + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. See Also -------- @@ -1855,13 +1860,13 @@ def diff(a, n=1, axis=-1): Notes ----- - For boolean arrays, the preservation of type means that the result - will contain `False` when consecutive elements are the same and - `True` when they differ. + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. - For unsigned integer arrays, the results will also be unsigned. This should - not be surprising, as the result is consistent with calculating the - difference directly: + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: >>> u8_arr = np.array([1, 0], dtype=np.uint8) >>> np.diff(u8_arr) @@ -1869,8 +1874,8 @@ def diff(a, n=1, axis=-1): >>> u8_arr[1,...] - u8_arr[0,...] array(255, np.uint8) - If this is not desirable, then the array should be cast to a larger integer - type first: + If this is not desirable, then the array should be cast to a larger + integer type first: >>> i16_arr = u8_arr.astype(np.int16) >>> np.diff(i16_arr) @@ -1891,30 +1896,33 @@ def diff(a, n=1, axis=-1): >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) + >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + >>> np.diff(x) + array([1, 1], dtype='timedelta64[D]') + """ if n == 0: return a if n < 0: raise ValueError( "order must be non-negative but got " + repr(n)) + a = asanyarray(a) nd = a.ndim - slice1 = [slice(None)]*nd - slice2 = [slice(None)]*nd + axis = normalize_axis_index(axis, nd) + + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) - if a.dtype == np.bool_: - da = a[slice1] != a[slice2] - else: - da = a[slice1] - a[slice2] + op = not_equal if a.dtype == np.bool_ else subtract + for _ in range(n): + a = op(a[slice1], a[slice2]) - if n > 1: - return diff(da, n-1, axis=axis) - else: - return da + return a def interp(x, xp, fp, left=None, right=None, period=None): -- cgit v1.2.1 From 2b781f8967488dc007f8f0a1e6a7f49208788d12 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Tue, 1 Aug 2017 20:29:36 +0000 Subject: MAINT/DOC: Use builtin when np.{x} is builtins.{x}. This is the case for x in {int, bool, str, float, complex, object}. Using the np.{x} version is deceptive as it suggests that there is a difference. This change doesn't affect any external behaviour. The `long` type is missing in python 3, so np.long is still useful --- numpy/lib/function_base.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index f5e9ff2a5..c185f9639 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -717,7 +717,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, # At this point, if the weights are not integer, floating point, or # complex, we have to use the slow algorithm. if weights is not None and not (np.can_cast(weights.dtype, np.double) or - np.can_cast(weights.dtype, np.complex)): + np.can_cast(weights.dtype, complex)): bins = linspace(mn, mx, bins + 1, endpoint=True) if not iterable(bins): @@ -1541,7 +1541,7 @@ def gradient(f, *varargs, **kwargs): Examples -------- - >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) + >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) >>> np.gradient(f) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) @@ -1557,7 +1557,7 @@ def gradient(f, *varargs, **kwargs): Or a non uniform one: - >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float) + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) >>> np.gradient(f, x) array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) @@ -1565,7 +1565,7 @@ def gradient(f, *varargs, **kwargs): axis. In this example the first array stands for the gradient in rows and the second one in columns direction: - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] @@ -1575,7 +1575,7 @@ def gradient(f, *varargs, **kwargs): >>> dx = 2. >>> y = [1., 1.5, 3.5] - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), dx, y) + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) [array([[ 1. , 1. , -0.5], [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ], [ 2. , 1.7, 0.5]])] @@ -1592,7 +1592,7 @@ def gradient(f, *varargs, **kwargs): The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0) + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) @@ -2600,7 +2600,7 @@ class vectorize(object): >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) - >>> vfunc = np.vectorize(myfunc, otypes=[np.float]) + >>> vfunc = np.vectorize(myfunc, otypes=[float]) >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) @@ -3029,7 +3029,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, # Get the product of frequencies and weights w = None if fweights is not None: - fweights = np.asarray(fweights, dtype=np.float) + fweights = np.asarray(fweights, dtype=float) if not np.all(fweights == np.around(fweights)): raise TypeError( "fweights must be integer") @@ -3044,7 +3044,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, "fweights cannot be negative") w = fweights if aweights is not None: - aweights = np.asarray(aweights, dtype=np.float) + aweights = np.asarray(aweights, dtype=float) if aweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional aweights") -- cgit v1.2.1 From 029863eae86b9df2de4b9a9843ca8f88c99130df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nico=20Schl=C3=B6mer?= Date: Fri, 11 Aug 2017 03:01:06 +0200 Subject: MAINT: Use moveaxis instead of rollaxis internally (#9475) Also add a hint to the documentation advising the use of moveaxis over rollaxis. Tests for rollaxis are left alone. --- numpy/lib/function_base.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index c185f9639..b1ebcad67 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -4336,7 +4336,7 @@ def _percentile(a, q, axis=None, out=None, ap.partition(indices, axis=axis) # ensure axis with qth is first - ap = np.rollaxis(ap, axis, 0) + ap = np.moveaxis(ap, axis, 0) axis = 0 # Check if the array contains any nan's @@ -4369,9 +4369,9 @@ def _percentile(a, q, axis=None, out=None, ap.partition(concatenate((indices_below, indices_above)), axis=axis) # ensure axis with qth is first - ap = np.rollaxis(ap, axis, 0) - weights_below = np.rollaxis(weights_below, axis, 0) - weights_above = np.rollaxis(weights_above, axis, 0) + ap = np.moveaxis(ap, axis, 0) + weights_below = np.moveaxis(weights_below, axis, 0) + weights_above = np.moveaxis(weights_above, axis, 0) axis = 0 # Check if the array contains any nan's @@ -4383,8 +4383,8 @@ def _percentile(a, q, axis=None, out=None, x2 = take(ap, indices_above, axis=axis) * weights_above # ensure axis with qth is first - x1 = np.rollaxis(x1, axis, 0) - x2 = np.rollaxis(x2, axis, 0) + x1 = np.moveaxis(x1, axis, 0) + x2 = np.moveaxis(x2, axis, 0) if zerod: x1 = x1.squeeze(0) @@ -5040,7 +5040,7 @@ def insert(arr, obj, values, axis=None): # broadcasting is very different here, since a[:,0,:] = ... behaves # very different from a[:,[0],:] = ...! This changes values so that # it works likes the second case. (here a[:,0:1,:]) - values = np.rollaxis(values, 0, (axis % values.ndim) + 1) + values = np.moveaxis(values, 0, axis) numnew = values.shape[axis] newshape[axis] += numnew new = empty(newshape, arr.dtype, arrorder) -- cgit v1.2.1 From 1a1db29588055021e4fde133ff0d65b3d881ff3d Mon Sep 17 00:00:00 2001 From: Brandon Carter Date: Sat, 19 Aug 2017 21:12:03 -0400 Subject: allow non-strictly increasing bins --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 48922721f..5b039ca23 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -782,7 +782,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, bins = bin_edges else: bins = asarray(bins) - if np.any(bins[1:] <= bins[:-1]): + if np.any(bins[:-1] > bins[1:]): raise ValueError( 'bins must increase monotonically.') -- cgit v1.2.1 From 9fdf66066ce5084faa493559576fe6d23bf098f6 Mon Sep 17 00:00:00 2001 From: Michael Seifert Date: Fri, 25 Aug 2017 01:04:46 +0200 Subject: DOC: rot90 wrongly positioned versionadded directive. [skip ci] --- numpy/lib/function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index b1ebcad67..93cbd69dd 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -56,8 +56,6 @@ def rot90(m, k=1, axes=(0,1)): Rotation direction is from the first towards the second axis. - .. versionadded:: 1.12.0 - Parameters ---------- m : array_like @@ -68,6 +66,8 @@ def rot90(m, k=1, axes=(0,1)): The array is rotated in the plane defined by the axes. Axes must be different. + .. versionadded:: 1.12.0 + Returns ------- y : ndarray -- cgit v1.2.1 From fb8d809d8b6773d26ec43775fbee7b23294f125a Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 13 Jul 2017 19:10:09 +0100 Subject: MAINT: Use zip, not enumerate Double-indexing is just a little harder to read --- numpy/lib/function_base.py | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 905e60512..706e9726f 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1733,7 +1733,7 @@ def gradient(f, *varargs, **kwargs): # all other types convert to floating point otype = np.double - for i, axis in enumerate(axes): + for axis, ax_dx in zip(axes, dx): if f.shape[axis] < edge_order + 1: raise ValueError( "Shape of array too small to calculate a numerical gradient, " @@ -1741,7 +1741,8 @@ def gradient(f, *varargs, **kwargs): # result allocation out = np.empty_like(f, dtype=otype) - uniform_spacing = np.ndim(dx[i]) == 0 + # spacing for the current axis + uniform_spacing = np.ndim(ax_dx) == 0 # Numerical differentiation: 2nd order interior slice1[axis] = slice(1, -1) @@ -1750,10 +1751,10 @@ def gradient(f, *varargs, **kwargs): slice4[axis] = slice(2, None) if uniform_spacing: - out[slice1] = (f[slice4] - f[slice2]) / (2. * dx[i]) + out[slice1] = (f[slice4] - f[slice2]) / (2. * ax_dx) else: - dx1 = dx[i][0:-1] - dx2 = dx[i][1:] + dx1 = ax_dx[0:-1] + dx2 = ax_dx[1:] a = -(dx2)/(dx1 * (dx1 + dx2)) b = (dx2 - dx1) / (dx1 * dx2) c = dx1 / (dx2 * (dx1 + dx2)) @@ -1769,14 +1770,14 @@ def gradient(f, *varargs, **kwargs): slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 - dx_0 = dx[i] if uniform_spacing else dx[i][0] + dx_0 = ax_dx if uniform_spacing else ax_dx[0] # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) out[slice1] = (f[slice2] - f[slice3]) / dx_0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 - dx_n = dx[i] if uniform_spacing else dx[i][-1] + dx_n = ax_dx if uniform_spacing else ax_dx[-1] # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) out[slice1] = (f[slice2] - f[slice3]) / dx_n @@ -1787,12 +1788,12 @@ def gradient(f, *varargs, **kwargs): slice3[axis] = 1 slice4[axis] = 2 if uniform_spacing: - a = -1.5 / dx[i] - b = 2. / dx[i] - c = -0.5 / dx[i] + a = -1.5 / ax_dx + b = 2. / ax_dx + c = -0.5 / ax_dx else: - dx1 = dx[i][0] - dx2 = dx[i][1] + dx1 = ax_dx[0] + dx2 = ax_dx[1] a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) b = (dx1 + dx2) / (dx1 * dx2) c = - dx1 / (dx2 * (dx1 + dx2)) @@ -1804,12 +1805,12 @@ def gradient(f, *varargs, **kwargs): slice3[axis] = -2 slice4[axis] = -1 if uniform_spacing: - a = 0.5 / dx[i] - b = -2. / dx[i] - c = 1.5 / dx[i] + a = 0.5 / ax_dx + b = -2. / ax_dx + c = 1.5 / ax_dx else: - dx1 = dx[i][-2] - dx2 = dx[i][-1] + dx1 = ax_dx[-2] + dx2 = ax_dx[-1] a = (dx2) / (dx1 * (dx1 + dx2)) b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) -- cgit v1.2.1 From 57d225c11bb253981639d52442384eb3e43bb5f6 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Wed, 18 Oct 2017 23:46:39 -0700 Subject: MAINT: Tidy np.histogram, and improve error messages Split up the overloaded `bins` variable into separate names depending on its meaning Helpful errors are now emitted for: * non-integer bin counts (fixes gh-8072) * non-1d bin edges Removes another use of `np.isscalar`... --- numpy/lib/function_base.py | 136 +++++++++++++++++++++++++-------------------- 1 file changed, 77 insertions(+), 59 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 2745b49d1..1de4e906c 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -4,6 +4,7 @@ import collections import re import sys import warnings +import operator import numpy as np import numpy.core.numeric as _nx @@ -646,7 +647,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, a = asarray(a) if weights is not None: weights = asarray(weights) - if np.any(weights.shape != a.shape): + if weights.shape != a.shape: raise ValueError( 'weights should have the same shape as a.') weights = weights.ravel() @@ -671,11 +672,21 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, mn -= 0.5 mx += 0.5 + # density overrides the normed keyword + if density is not None: + normed = False + + # parse the overloaded bins argument + n_equal_bins = None + bin_edges = None + if isinstance(bins, basestring): + bin_name = bins # if `bins` is a string for an automatic method, # this will replace it with the number of bins calculated - if bins not in _hist_bin_selectors: - raise ValueError("{0} not a valid estimator for bins".format(bins)) + if bin_name not in _hist_bin_selectors: + raise ValueError( + "{!r} is not a valid estimator for `bins`".format(bin_name)) if weights is not None: raise TypeError("Automated estimation of the number of " "bins is not supported for weighted data") @@ -689,16 +700,40 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, b = a[keep] if b.size == 0: - bins = 1 + n_equal_bins = 1 else: # Do not call selectors on empty arrays - width = _hist_bin_selectors[bins](b) + width = _hist_bin_selectors[bin_name](b) if width: - bins = int(np.ceil((mx - mn) / width)) + n_equal_bins = int(np.ceil((mx - mn) / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. - bins = 1 + n_equal_bins = 1 + + elif np.ndim(bins) == 0: + try: + n_equal_bins = operator.index(bins) + except TypeError: + raise TypeError( + '`bins` must be an integer, a string, or an array') + if n_equal_bins < 1: + raise ValueError('`bins` must be positive, when an integer') + + elif np.ndim(bins) == 1: + bin_edges = np.asarray(bins) + if np.any(bin_edges[:-1] > bin_edges[1:]): + raise ValueError( + '`bins` must increase monotonically, when an array') + + else: + raise ValueError('`bins` must be 1d, when an array') + + del bins + + # compute the bins if only the count was specified + if n_equal_bins is not None: + bin_edges = linspace(mn, mx, n_equal_bins + 1, endpoint=True) # Histogram is an integer or a float array depending on the weights. if weights is None: @@ -710,27 +745,24 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, # computing histograms, to minimize memory usage. BLOCK = 65536 - if not iterable(bins): - if np.isscalar(bins) and bins < 1: - raise ValueError( - '`bins` should be a positive integer.') - # At this point, if the weights are not integer, floating point, or - # complex, we have to use the slow algorithm. - if weights is not None and not (np.can_cast(weights.dtype, np.double) or - np.can_cast(weights.dtype, complex)): - bins = linspace(mn, mx, bins + 1, endpoint=True) - - if not iterable(bins): + # The fast path uses bincount, but that only works for certain types + # of weight + simple_weights = ( + weights is None or + np.can_cast(weights.dtype, np.double) or + np.can_cast(weights.dtype, complex) + ) + + if n_equal_bins is not None and simple_weights: + # Fast algorithm for equal bins # We now convert values of a to bin indices, under the assumption of # equal bin widths (which is valid here). # Initialize empty histogram - n = np.zeros(bins, ntype) - # Pre-compute histogram scaling factor - norm = bins / (mx - mn) + n = np.zeros(n_equal_bins, ntype) - # Compute the bin edges for potential correction. - bin_edges = linspace(mn, mx, bins + 1, endpoint=True) + # Pre-compute histogram scaling factor + norm = n_equal_bins / (mx - mn) # We iterate over blocks here for two reasons: the first is that for # large arrays, it is actually faster (for example for a 10^8 array it @@ -757,7 +789,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, # Compute the bin indices, and for values that lie exactly on mx we # need to subtract one indices = tmp_a.astype(np.intp) - indices[indices == bins] -= 1 + indices[indices == n_equal_bins] -= 1 # The index computation is not guaranteed to give exactly # consistent results within ~1 ULP of the bin edges. @@ -765,35 +797,26 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, indices[decrement] -= 1 # The last bin includes the right edge. The other bins do not. increment = ((tmp_a_data >= bin_edges[indices + 1]) - & (indices != bins - 1)) + & (indices != n_equal_bins - 1)) indices[increment] += 1 # We now compute the histogram using bincount if ntype.kind == 'c': n.real += np.bincount(indices, weights=tmp_w.real, - minlength=bins) + minlength=n_equal_bins) n.imag += np.bincount(indices, weights=tmp_w.imag, - minlength=bins) + minlength=n_equal_bins) else: n += np.bincount(indices, weights=tmp_w, - minlength=bins).astype(ntype) - - # Rename the bin edges for return. - bins = bin_edges + minlength=n_equal_bins).astype(ntype) else: - bins = asarray(bins) - if np.any(bins[:-1] > bins[1:]): - raise ValueError( - 'bins must increase monotonically.') - - # Initialize empty histogram - n = np.zeros(bins.shape, ntype) - + # Compute via cumulative histogram + cum_n = np.zeros(bin_edges.shape, ntype) if weights is None: for i in arange(0, len(a), BLOCK): sa = sort(a[i:i+BLOCK]) - n += np.r_[sa.searchsorted(bins[:-1], 'left'), - sa.searchsorted(bins[-1], 'right')] + cum_n += np.r_[sa.searchsorted(bin_edges[:-1], 'left'), + sa.searchsorted(bin_edges[-1], 'right')] else: zero = array(0, dtype=ntype) for i in arange(0, len(a), BLOCK): @@ -802,27 +825,22 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, sorting_index = np.argsort(tmp_a) sa = tmp_a[sorting_index] sw = tmp_w[sorting_index] - cw = np.concatenate(([zero, ], sw.cumsum())) - bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), - sa.searchsorted(bins[-1], 'right')] - n += cw[bin_index] - + cw = np.concatenate(([zero], sw.cumsum())) + bin_index = np.r_[sa.searchsorted(bin_edges[:-1], 'left'), + sa.searchsorted(bin_edges[-1], 'right')] + cum_n += cw[bin_index] - n = np.diff(n) + n = np.diff(cum_n) - if density is not None: - if density: - db = array(np.diff(bins), float) - return n/db/n.sum(), bins - else: - return n, bins - else: + if density: + db = array(np.diff(bin_edges), float) + return n/db/n.sum(), bin_edges + elif normed: # deprecated, buggy behavior. Remove for NumPy 2.0.0 - if normed: - db = array(np.diff(bins), float) - return n/(n*db).sum(), bins - else: - return n, bins + db = array(np.diff(bin_edges), float) + return n/(n*db).sum(), bin_edges + else: + return n, bin_edges def histogramdd(sample, bins=10, range=None, normed=False, weights=None): -- cgit v1.2.1 From 5b7b87e4ebd91b1f0e17db05113a94e6776a5701 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 19 Oct 2017 23:04:52 -0700 Subject: MAINT: rename mn and mx to first_edge and last_edge --- numpy/lib/function_base.py | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 1de4e906c..3a73409fc 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -657,20 +657,20 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, if range is None: if a.size == 0: # handle empty arrays. Can't determine range, so use 0-1. - mn, mx = 0.0, 1.0 + first_edge, last_edge = 0.0, 1.0 else: - mn, mx = a.min() + 0.0, a.max() + 0.0 + first_edge, last_edge = a.min() + 0.0, a.max() + 0.0 else: - mn, mx = [mi + 0.0 for mi in range] - if mn > mx: + first_edge, last_edge = [mi + 0.0 for mi in range] + if first_edge > last_edge: raise ValueError( 'max must be larger than min in range parameter.') - if not np.all(np.isfinite([mn, mx])): + if not np.all(np.isfinite([first_edge, last_edge])): raise ValueError( 'range parameter must be finite.') - if mn == mx: - mn -= 0.5 - mx += 0.5 + if first_edge == last_edge: + first_edge -= 0.5 + last_edge += 0.5 # density overrides the normed keyword if density is not None: @@ -694,8 +694,8 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, b = a # Update the reference if the range needs truncation if range is not None: - keep = (a >= mn) - keep &= (a <= mx) + keep = (a >= first_edge) + keep &= (a <= last_edge) if not np.logical_and.reduce(keep): b = a[keep] @@ -705,7 +705,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, # Do not call selectors on empty arrays width = _hist_bin_selectors[bin_name](b) if width: - n_equal_bins = int(np.ceil((mx - mn) / width)) + n_equal_bins = int(np.ceil((last_edge - first_edge) / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. @@ -733,7 +733,8 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, # compute the bins if only the count was specified if n_equal_bins is not None: - bin_edges = linspace(mn, mx, n_equal_bins + 1, endpoint=True) + bin_edges = linspace( + first_edge, last_edge, n_equal_bins + 1, endpoint=True) # Histogram is an integer or a float array depending on the weights. if weights is None: @@ -762,7 +763,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, n = np.zeros(n_equal_bins, ntype) # Pre-compute histogram scaling factor - norm = n_equal_bins / (mx - mn) + norm = n_equal_bins / (last_edge - first_edge) # We iterate over blocks here for two reasons: the first is that for # large arrays, it is actually faster (for example for a 10^8 array it @@ -776,18 +777,18 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, tmp_w = weights[i:i + BLOCK] # Only include values in the right range - keep = (tmp_a >= mn) - keep &= (tmp_a <= mx) + keep = (tmp_a >= first_edge) + keep &= (tmp_a <= last_edge) if not np.logical_and.reduce(keep): tmp_a = tmp_a[keep] if tmp_w is not None: tmp_w = tmp_w[keep] tmp_a_data = tmp_a.astype(float) - tmp_a = tmp_a_data - mn + tmp_a = tmp_a_data - first_edge tmp_a *= norm - # Compute the bin indices, and for values that lie exactly on mx we - # need to subtract one + # Compute the bin indices, and for values that lie exactly on + # last_edge we need to subtract one indices = tmp_a.astype(np.intp) indices[indices == n_equal_bins] -= 1 -- cgit v1.2.1 From 4712875840b5413a47bd8dcbed9bedeaefe829cd Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sat, 21 Oct 2017 14:39:49 -0700 Subject: MAINT/BUG: Remove special-casing for 0d arrays, now that indexing with a single boolean is ok Also fix the test added in gh-4792, which didn't make sense, but passed anyway --- numpy/lib/function_base.py | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 3a73409fc..573516f3e 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1323,21 +1323,15 @@ def piecewise(x, condlist, funclist, *args, **kw): """ x = asanyarray(x) n2 = len(funclist) - if (isscalar(condlist) or not (isinstance(condlist[0], list) or - isinstance(condlist[0], ndarray))): - if not isscalar(condlist) and x.size == 1 and x.ndim == 0: - condlist = [[c] for c in condlist] - else: - condlist = [condlist] + + # undocumented: single condition is promoted to a list of one condition + if isscalar(condlist) or ( + not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): + condlist = [condlist] + condlist = array(condlist, dtype=bool) n = len(condlist) - # This is a hack to work around problems with NumPy's - # handling of 0-d arrays and boolean indexing with - # numpy.bool_ scalars - zerod = False - if x.ndim == 0: - x = x[None] - zerod = True + if n == n2 - 1: # compute the "otherwise" condition. condelse = ~np.any(condlist, axis=0, keepdims=True) condlist = np.concatenate([condlist, condelse], axis=0) @@ -1352,8 +1346,7 @@ def piecewise(x, condlist, funclist, *args, **kw): vals = x[condlist[k]] if vals.size > 0: y[condlist[k]] = item(vals, *args, **kw) - if zerod: - y = y.squeeze() + return y -- cgit v1.2.1 From 4729550aa44c97b11d4c9bd39af9bc093c2ce0f8 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sat, 21 Oct 2017 14:55:13 -0700 Subject: DOC: piecewise callables take 1d arrays --- numpy/lib/function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 573516f3e..0f221fe05 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1258,8 +1258,8 @@ def piecewise(x, condlist, funclist, *args, **kw): is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding - condition is True. It should take an array as input and give an array - or a scalar value as output. If, instead of a callable, + condition is True. It should take a 1d array as input and give an 1d + array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. args : tuple, optional -- cgit v1.2.1 From c875b1391286a982c958d349d58eb720eb081069 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 22 Oct 2017 16:45:05 -0700 Subject: BUG: Throw an error if too many functions are given to piecewise Especially necessary given the strange heuristics that decay the number of conditions to 1 --- numpy/lib/function_base.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 0f221fe05..498853d32 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1254,7 +1254,7 @@ def piecewise(x, condlist, funclist, *args, **kw): The length of `condlist` must correspond to that of `funclist`. If one extra function is given, i.e. if - ``len(funclist) - len(condlist) == 1``, then that extra function + ``len(funclist) == len(condlist) + 1``, then that extra function is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding @@ -1336,6 +1336,11 @@ def piecewise(x, condlist, funclist, *args, **kw): condelse = ~np.any(condlist, axis=0, keepdims=True) condlist = np.concatenate([condlist, condelse], axis=0) n += 1 + elif n != n2: + raise ValueError( + "with {} condition(s), either {} or {} functions are expected" + .format(n, n, n+1) + ) y = zeros(x.shape, x.dtype) for k in range(n): -- cgit v1.2.1 From ac6b1a902b99e340cf7eeeeb7392c91e38db9dd8 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Mon, 13 Nov 2017 23:45:45 -0800 Subject: ENH: don't show boolean dtype, as it is implied --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 498853d32..c9a23350d 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -2333,7 +2333,7 @@ def extract(condition, arr): >>> condition array([[ True, False, False, True], [False, False, True, False], - [False, True, False, False]], dtype=bool) + [False, True, False, False]]) >>> np.extract(condition, arr) array([0, 3, 6, 9]) -- cgit v1.2.1 From f2947e5917a2b2a8cbc6582f09944d5064321c8d Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 10 Dec 2017 10:59:21 -0800 Subject: DOC: Fixup percentile docstring, from review in gh-9213 Updates the two docstrings to match. --- numpy/lib/function_base.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index c9a23350d..fb37801b9 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -4188,27 +4188,24 @@ def percentile(a, q, axis=None, out=None, ---------- a : array_like Input array or object that can be converted to an array. - q : float in range of [0,100] (or sequence of floats) - Percentile to compute, which must be between 0 and 100 inclusive. - axis : {int, sequence of int, None}, optional + q : array_like of float + Percentile or sequence of percentiles to compute, which must be between + 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional Axis or axes along which the percentiles are computed. The default is to compute the percentile(s) along a flattened - version of the array. A sequence of axes is supported since - version 1.9.0. + version of the array. + + .. versionchanged:: 1.9.0 + A tuple of axes is supported out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional - If True, then allow use of memory of input array `a` - calculations. The input array will be modified by the call to - `percentile`. This will save memory when you do not need to - preserve the contents of the input array. In this case you - should not make any assumptions about the contents of the input - `a` after this function completes -- treat it as undefined. - Default is False. If `a` is not already an array, this parameter - will have no effect as `a` will be converted to an array - internally regardless of the value of this parameter. + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points @@ -4243,7 +4240,9 @@ def percentile(a, q, axis=None, out=None, See Also -------- - mean, median, nanpercentile + mean + median : equivalent to ``percentile(..., 50)`` + nanpercentile Notes ----- -- cgit v1.2.1 From 34c9950ccdb8a2f64916903b09c2c39233be0adf Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sat, 9 Dec 2017 13:58:42 -0800 Subject: MAINT: Move histogram and histogramdd into their own module 800 self-contained lines are easily enough to go in their own file, as are the 500 lines of tests. For compatibility, the names are still available through `np.lib.function_base.histogram` and `from np.lib.function_base import *` For simplicity of imports, all of the unqualified `np.` names are now qualified --- numpy/lib/function_base.py | 804 +-------------------------------------------- 1 file changed, 3 insertions(+), 801 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index c9a23350d..d8e10007c 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -39,12 +39,14 @@ if sys.version_info[0] < 3: else: import builtins +# needed in this module for compatibility +from numpy.lib.histograms import histogram, histogramdd __all__ = [ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', - 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', + 'bincount', 'digitize', 'cov', 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' @@ -241,806 +243,6 @@ def iterable(y): return True -def _hist_bin_sqrt(x): - """ - Square root histogram bin estimator. - - Bin width is inversely proportional to the data size. Used by many - programs for its simplicity. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - return x.ptp() / np.sqrt(x.size) - - -def _hist_bin_sturges(x): - """ - Sturges histogram bin estimator. - - A very simplistic estimator based on the assumption of normality of - the data. This estimator has poor performance for non-normal data, - which becomes especially obvious for large data sets. The estimate - depends only on size of the data. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - return x.ptp() / (np.log2(x.size) + 1.0) - - -def _hist_bin_rice(x): - """ - Rice histogram bin estimator. - - Another simple estimator with no normality assumption. It has better - performance for large data than Sturges, but tends to overestimate - the number of bins. The number of bins is proportional to the cube - root of data size (asymptotically optimal). The estimate depends - only on size of the data. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - return x.ptp() / (2.0 * x.size ** (1.0 / 3)) - - -def _hist_bin_scott(x): - """ - Scott histogram bin estimator. - - The binwidth is proportional to the standard deviation of the data - and inversely proportional to the cube root of data size - (asymptotically optimal). - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) - - -def _hist_bin_doane(x): - """ - Doane's histogram bin estimator. - - Improved version of Sturges' formula which works better for - non-normal data. See - stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - if x.size > 2: - sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) - sigma = np.std(x) - if sigma > 0.0: - # These three operations add up to - # g1 = np.mean(((x - np.mean(x)) / sigma)**3) - # but use only one temp array instead of three - temp = x - np.mean(x) - np.true_divide(temp, sigma, temp) - np.power(temp, 3, temp) - g1 = np.mean(temp) - return x.ptp() / (1.0 + np.log2(x.size) + - np.log2(1.0 + np.absolute(g1) / sg1)) - return 0.0 - - -def _hist_bin_fd(x): - """ - The Freedman-Diaconis histogram bin estimator. - - The Freedman-Diaconis rule uses interquartile range (IQR) to - estimate binwidth. It is considered a variation of the Scott rule - with more robustness as the IQR is less affected by outliers than - the standard deviation. However, the IQR depends on fewer points - than the standard deviation, so it is less accurate, especially for - long tailed distributions. - - If the IQR is 0, this function returns 1 for the number of bins. - Binwidth is inversely proportional to the cube root of data size - (asymptotically optimal). - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - """ - iqr = np.subtract(*np.percentile(x, [75, 25])) - return 2.0 * iqr * x.size ** (-1.0 / 3.0) - - -def _hist_bin_auto(x): - """ - Histogram bin estimator that uses the minimum width of the - Freedman-Diaconis and Sturges estimators. - - The FD estimator is usually the most robust method, but its width - estimate tends to be too large for small `x`. The Sturges estimator - is quite good for small (<1000) datasets and is the default in the R - language. This method gives good off the shelf behaviour. - - Parameters - ---------- - x : array_like - Input data that is to be histogrammed, trimmed to range. May not - be empty. - - Returns - ------- - h : An estimate of the optimal bin width for the given data. - - See Also - -------- - _hist_bin_fd, _hist_bin_sturges - """ - # There is no need to check for zero here. If ptp is, so is IQR and - # vice versa. Either both are zero or neither one is. - return min(_hist_bin_fd(x), _hist_bin_sturges(x)) - - -# Private dict initialized at module load time -_hist_bin_selectors = {'auto': _hist_bin_auto, - 'doane': _hist_bin_doane, - 'fd': _hist_bin_fd, - 'rice': _hist_bin_rice, - 'scott': _hist_bin_scott, - 'sqrt': _hist_bin_sqrt, - 'sturges': _hist_bin_sturges} - - -def histogram(a, bins=10, range=None, normed=False, weights=None, - density=None): - r""" - Compute the histogram of a set of data. - - Parameters - ---------- - a : array_like - Input data. The histogram is computed over the flattened array. - bins : int or sequence of scalars or str, optional - If `bins` is an int, it defines the number of equal-width - bins in the given range (10, by default). If `bins` is a - sequence, it defines the bin edges, including the rightmost - edge, allowing for non-uniform bin widths. - - .. versionadded:: 1.11.0 - - If `bins` is a string from the list below, `histogram` will use - the method chosen to calculate the optimal bin width and - consequently the number of bins (see `Notes` for more detail on - the estimators) from the data that falls within the requested - range. While the bin width will be optimal for the actual data - in the range, the number of bins will be computed to fill the - entire range, including the empty portions. For visualisation, - using the 'auto' option is suggested. Weighted data is not - supported for automated bin size selection. - - 'auto' - Maximum of the 'sturges' and 'fd' estimators. Provides good - all around performance. - - 'fd' (Freedman Diaconis Estimator) - Robust (resilient to outliers) estimator that takes into - account data variability and data size. - - 'doane' - An improved version of Sturges' estimator that works better - with non-normal datasets. - - 'scott' - Less robust estimator that that takes into account data - variability and data size. - - 'rice' - Estimator does not take variability into account, only data - size. Commonly overestimates number of bins required. - - 'sturges' - R's default method, only accounts for data size. Only - optimal for gaussian data and underestimates number of bins - for large non-gaussian datasets. - - 'sqrt' - Square root (of data size) estimator, used by Excel and - other programs for its speed and simplicity. - - range : (float, float), optional - The lower and upper range of the bins. If not provided, range - is simply ``(a.min(), a.max())``. Values outside the range are - ignored. The first element of the range must be less than or - equal to the second. `range` affects the automatic bin - computation as well. While bin width is computed to be optimal - based on the actual data within `range`, the bin count will fill - the entire range including portions containing no data. - normed : bool, optional - This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy - behavior. It will be removed in NumPy 2.0.0. Use the ``density`` - keyword instead. If ``False``, the result will contain the - number of samples in each bin. If ``True``, the result is the - value of the probability *density* function at the bin, - normalized such that the *integral* over the range is 1. Note - that this latter behavior is known to be buggy with unequal bin - widths; use ``density`` instead. - weights : array_like, optional - An array of weights, of the same shape as `a`. Each value in - `a` only contributes its associated weight towards the bin count - (instead of 1). If `density` is True, the weights are - normalized, so that the integral of the density over the range - remains 1. - density : bool, optional - If ``False``, the result will contain the number of samples in - each bin. If ``True``, the result is the value of the - probability *density* function at the bin, normalized such that - the *integral* over the range is 1. Note that the sum of the - histogram values will not be equal to 1 unless bins of unity - width are chosen; it is not a probability *mass* function. - - Overrides the ``normed`` keyword if given. - - Returns - ------- - hist : array - The values of the histogram. See `density` and `weights` for a - description of the possible semantics. - bin_edges : array of dtype float - Return the bin edges ``(length(hist)+1)``. - - - See Also - -------- - histogramdd, bincount, searchsorted, digitize - - Notes - ----- - All but the last (righthand-most) bin is half-open. In other words, - if `bins` is:: - - [1, 2, 3, 4] - - then the first bin is ``[1, 2)`` (including 1, but excluding 2) and - the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which - *includes* 4. - - .. versionadded:: 1.11.0 - - The methods to estimate the optimal number of bins are well founded - in literature, and are inspired by the choices R provides for - histogram visualisation. Note that having the number of bins - proportional to :math:`n^{1/3}` is asymptotically optimal, which is - why it appears in most estimators. These are simply plug-in methods - that give good starting points for number of bins. In the equations - below, :math:`h` is the binwidth and :math:`n_h` is the number of - bins. All estimators that compute bin counts are recast to bin width - using the `ptp` of the data. The final bin count is obtained from - ``np.round(np.ceil(range / h))`. - - 'Auto' (maximum of the 'Sturges' and 'FD' estimators) - A compromise to get a good value. For small datasets the Sturges - value will usually be chosen, while larger datasets will usually - default to FD. Avoids the overly conservative behaviour of FD - and Sturges for small and large datasets respectively. - Switchover point is usually :math:`a.size \approx 1000`. - - 'FD' (Freedman Diaconis Estimator) - .. math:: h = 2 \frac{IQR}{n^{1/3}} - - The binwidth is proportional to the interquartile range (IQR) - and inversely proportional to cube root of a.size. Can be too - conservative for small datasets, but is quite good for large - datasets. The IQR is very robust to outliers. - - 'Scott' - .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} - - The binwidth is proportional to the standard deviation of the - data and inversely proportional to cube root of ``x.size``. Can - be too conservative for small datasets, but is quite good for - large datasets. The standard deviation is not very robust to - outliers. Values are very similar to the Freedman-Diaconis - estimator in the absence of outliers. - - 'Rice' - .. math:: n_h = 2n^{1/3} - - The number of bins is only proportional to cube root of - ``a.size``. It tends to overestimate the number of bins and it - does not take into account data variability. - - 'Sturges' - .. math:: n_h = \log _{2}n+1 - - The number of bins is the base 2 log of ``a.size``. This - estimator assumes normality of data and is too conservative for - larger, non-normal datasets. This is the default method in R's - ``hist`` method. - - 'Doane' - .. math:: n_h = 1 + \log_{2}(n) + - \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}}) - - g_1 = mean[(\frac{x - \mu}{\sigma})^3] - - \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} - - An improved version of Sturges' formula that produces better - estimates for non-normal datasets. This estimator attempts to - account for the skew of the data. - - 'Sqrt' - .. math:: n_h = \sqrt n - The simplest and fastest estimator. Only takes into account the - data size. - - Examples - -------- - >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) - (array([0, 2, 1]), array([0, 1, 2, 3])) - >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) - (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) - >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) - (array([1, 4, 1]), array([0, 1, 2, 3])) - - >>> a = np.arange(5) - >>> hist, bin_edges = np.histogram(a, density=True) - >>> hist - array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) - >>> hist.sum() - 2.4999999999999996 - >>> np.sum(hist * np.diff(bin_edges)) - 1.0 - - .. versionadded:: 1.11.0 - - Automated Bin Selection Methods example, using 2 peak random data - with 2000 points: - - >>> import matplotlib.pyplot as plt - >>> rng = np.random.RandomState(10) # deterministic random data - >>> a = np.hstack((rng.normal(size=1000), - ... rng.normal(loc=5, scale=2, size=1000))) - >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram - >>> plt.title("Histogram with 'auto' bins") - >>> plt.show() - - """ - a = asarray(a) - if weights is not None: - weights = asarray(weights) - if weights.shape != a.shape: - raise ValueError( - 'weights should have the same shape as a.') - weights = weights.ravel() - a = a.ravel() - - # Do not modify the original value of range so we can check for `None` - if range is None: - if a.size == 0: - # handle empty arrays. Can't determine range, so use 0-1. - first_edge, last_edge = 0.0, 1.0 - else: - first_edge, last_edge = a.min() + 0.0, a.max() + 0.0 - else: - first_edge, last_edge = [mi + 0.0 for mi in range] - if first_edge > last_edge: - raise ValueError( - 'max must be larger than min in range parameter.') - if not np.all(np.isfinite([first_edge, last_edge])): - raise ValueError( - 'range parameter must be finite.') - if first_edge == last_edge: - first_edge -= 0.5 - last_edge += 0.5 - - # density overrides the normed keyword - if density is not None: - normed = False - - # parse the overloaded bins argument - n_equal_bins = None - bin_edges = None - - if isinstance(bins, basestring): - bin_name = bins - # if `bins` is a string for an automatic method, - # this will replace it with the number of bins calculated - if bin_name not in _hist_bin_selectors: - raise ValueError( - "{!r} is not a valid estimator for `bins`".format(bin_name)) - if weights is not None: - raise TypeError("Automated estimation of the number of " - "bins is not supported for weighted data") - # Make a reference to `a` - b = a - # Update the reference if the range needs truncation - if range is not None: - keep = (a >= first_edge) - keep &= (a <= last_edge) - if not np.logical_and.reduce(keep): - b = a[keep] - - if b.size == 0: - n_equal_bins = 1 - else: - # Do not call selectors on empty arrays - width = _hist_bin_selectors[bin_name](b) - if width: - n_equal_bins = int(np.ceil((last_edge - first_edge) / width)) - else: - # Width can be zero for some estimators, e.g. FD when - # the IQR of the data is zero. - n_equal_bins = 1 - - elif np.ndim(bins) == 0: - try: - n_equal_bins = operator.index(bins) - except TypeError: - raise TypeError( - '`bins` must be an integer, a string, or an array') - if n_equal_bins < 1: - raise ValueError('`bins` must be positive, when an integer') - - elif np.ndim(bins) == 1: - bin_edges = np.asarray(bins) - if np.any(bin_edges[:-1] > bin_edges[1:]): - raise ValueError( - '`bins` must increase monotonically, when an array') - - else: - raise ValueError('`bins` must be 1d, when an array') - - del bins - - # compute the bins if only the count was specified - if n_equal_bins is not None: - bin_edges = linspace( - first_edge, last_edge, n_equal_bins + 1, endpoint=True) - - # Histogram is an integer or a float array depending on the weights. - if weights is None: - ntype = np.dtype(np.intp) - else: - ntype = weights.dtype - - # We set a block size, as this allows us to iterate over chunks when - # computing histograms, to minimize memory usage. - BLOCK = 65536 - - # The fast path uses bincount, but that only works for certain types - # of weight - simple_weights = ( - weights is None or - np.can_cast(weights.dtype, np.double) or - np.can_cast(weights.dtype, complex) - ) - - if n_equal_bins is not None and simple_weights: - # Fast algorithm for equal bins - # We now convert values of a to bin indices, under the assumption of - # equal bin widths (which is valid here). - - # Initialize empty histogram - n = np.zeros(n_equal_bins, ntype) - - # Pre-compute histogram scaling factor - norm = n_equal_bins / (last_edge - first_edge) - - # We iterate over blocks here for two reasons: the first is that for - # large arrays, it is actually faster (for example for a 10^8 array it - # is 2x as fast) and it results in a memory footprint 3x lower in the - # limit of large arrays. - for i in arange(0, len(a), BLOCK): - tmp_a = a[i:i+BLOCK] - if weights is None: - tmp_w = None - else: - tmp_w = weights[i:i + BLOCK] - - # Only include values in the right range - keep = (tmp_a >= first_edge) - keep &= (tmp_a <= last_edge) - if not np.logical_and.reduce(keep): - tmp_a = tmp_a[keep] - if tmp_w is not None: - tmp_w = tmp_w[keep] - tmp_a_data = tmp_a.astype(float) - tmp_a = tmp_a_data - first_edge - tmp_a *= norm - - # Compute the bin indices, and for values that lie exactly on - # last_edge we need to subtract one - indices = tmp_a.astype(np.intp) - indices[indices == n_equal_bins] -= 1 - - # The index computation is not guaranteed to give exactly - # consistent results within ~1 ULP of the bin edges. - decrement = tmp_a_data < bin_edges[indices] - indices[decrement] -= 1 - # The last bin includes the right edge. The other bins do not. - increment = ((tmp_a_data >= bin_edges[indices + 1]) - & (indices != n_equal_bins - 1)) - indices[increment] += 1 - - # We now compute the histogram using bincount - if ntype.kind == 'c': - n.real += np.bincount(indices, weights=tmp_w.real, - minlength=n_equal_bins) - n.imag += np.bincount(indices, weights=tmp_w.imag, - minlength=n_equal_bins) - else: - n += np.bincount(indices, weights=tmp_w, - minlength=n_equal_bins).astype(ntype) - else: - # Compute via cumulative histogram - cum_n = np.zeros(bin_edges.shape, ntype) - if weights is None: - for i in arange(0, len(a), BLOCK): - sa = sort(a[i:i+BLOCK]) - cum_n += np.r_[sa.searchsorted(bin_edges[:-1], 'left'), - sa.searchsorted(bin_edges[-1], 'right')] - else: - zero = array(0, dtype=ntype) - for i in arange(0, len(a), BLOCK): - tmp_a = a[i:i+BLOCK] - tmp_w = weights[i:i+BLOCK] - sorting_index = np.argsort(tmp_a) - sa = tmp_a[sorting_index] - sw = tmp_w[sorting_index] - cw = np.concatenate(([zero], sw.cumsum())) - bin_index = np.r_[sa.searchsorted(bin_edges[:-1], 'left'), - sa.searchsorted(bin_edges[-1], 'right')] - cum_n += cw[bin_index] - - n = np.diff(cum_n) - - if density: - db = array(np.diff(bin_edges), float) - return n/db/n.sum(), bin_edges - elif normed: - # deprecated, buggy behavior. Remove for NumPy 2.0.0 - db = array(np.diff(bin_edges), float) - return n/(n*db).sum(), bin_edges - else: - return n, bin_edges - - -def histogramdd(sample, bins=10, range=None, normed=False, weights=None): - """ - Compute the multidimensional histogram of some data. - - Parameters - ---------- - sample : array_like - The data to be histogrammed. It must be an (N,D) array or data - that can be converted to such. The rows of the resulting array - are the coordinates of points in a D dimensional polytope. - bins : sequence or int, optional - The bin specification: - - * A sequence of arrays describing the bin edges along each dimension. - * The number of bins for each dimension (nx, ny, ... =bins) - * The number of bins for all dimensions (nx=ny=...=bins). - - range : sequence, optional - A sequence of lower and upper bin edges to be used if the edges are - not given explicitly in `bins`. Defaults to the minimum and maximum - values along each dimension. - normed : bool, optional - If False, returns the number of samples in each bin. If True, - returns the bin density ``bin_count / sample_count / bin_volume``. - weights : (N,) array_like, optional - An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. - Weights are normalized to 1 if normed is True. If normed is False, - the values of the returned histogram are equal to the sum of the - weights belonging to the samples falling into each bin. - - Returns - ------- - H : ndarray - The multidimensional histogram of sample x. See normed and weights - for the different possible semantics. - edges : list - A list of D arrays describing the bin edges for each dimension. - - See Also - -------- - histogram: 1-D histogram - histogram2d: 2-D histogram - - Examples - -------- - >>> r = np.random.randn(100,3) - >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) - >>> H.shape, edges[0].size, edges[1].size, edges[2].size - ((5, 8, 4), 6, 9, 5) - - """ - - try: - # Sample is an ND-array. - N, D = sample.shape - except (AttributeError, ValueError): - # Sample is a sequence of 1D arrays. - sample = atleast_2d(sample).T - N, D = sample.shape - - nbin = empty(D, int) - edges = D*[None] - dedges = D*[None] - if weights is not None: - weights = asarray(weights) - - try: - M = len(bins) - if M != D: - raise ValueError( - 'The dimension of bins must be equal to the dimension of the ' - ' sample x.') - except TypeError: - # bins is an integer - bins = D*[bins] - - # Select range for each dimension - # Used only if number of bins is given. - if range is None: - # Handle empty input. Range can't be determined in that case, use 0-1. - if N == 0: - smin = zeros(D) - smax = ones(D) - else: - smin = atleast_1d(array(sample.min(0), float)) - smax = atleast_1d(array(sample.max(0), float)) - else: - if not np.all(np.isfinite(range)): - raise ValueError( - 'range parameter must be finite.') - smin = zeros(D) - smax = zeros(D) - for i in arange(D): - smin[i], smax[i] = range[i] - - # Make sure the bins have a finite width. - for i in arange(len(smin)): - if smin[i] == smax[i]: - smin[i] = smin[i] - .5 - smax[i] = smax[i] + .5 - - # avoid rounding issues for comparisons when dealing with inexact types - if np.issubdtype(sample.dtype, np.inexact): - edge_dt = sample.dtype - else: - edge_dt = float - # Create edge arrays - for i in arange(D): - if isscalar(bins[i]): - if bins[i] < 1: - raise ValueError( - "Element at index %s in `bins` should be a positive " - "integer." % i) - nbin[i] = bins[i] + 2 # +2 for outlier bins - edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) - else: - edges[i] = asarray(bins[i], edge_dt) - nbin[i] = len(edges[i]) + 1 # +1 for outlier bins - dedges[i] = diff(edges[i]) - if np.any(np.asarray(dedges[i]) <= 0): - raise ValueError( - "Found bin edge of size <= 0. Did you specify `bins` with" - "non-monotonic sequence?") - - nbin = asarray(nbin) - - # Handle empty input. - if N == 0: - return np.zeros(nbin-2), edges - - # Compute the bin number each sample falls into. - Ncount = {} - for i in arange(D): - Ncount[i] = digitize(sample[:, i], edges[i]) - - # Using digitize, values that fall on an edge are put in the right bin. - # For the rightmost bin, we want values equal to the right edge to be - # counted in the last bin, and not as an outlier. - for i in arange(D): - # Rounding precision - mindiff = dedges[i].min() - if not np.isinf(mindiff): - decimal = int(-log10(mindiff)) + 6 - # Find which points are on the rightmost edge. - not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) - on_edge = (around(sample[:, i], decimal) == - around(edges[i][-1], decimal)) - # Shift these points one bin to the left. - Ncount[i][nonzero(on_edge & not_smaller_than_edge)[0]] -= 1 - - # Flattened histogram matrix (1D) - # Reshape is used so that overlarge arrays - # will raise an error. - hist = zeros(nbin, float).reshape(-1) - - # Compute the sample indices in the flattened histogram matrix. - ni = nbin.argsort() - xy = zeros(N, int) - for i in arange(0, D-1): - xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() - xy += Ncount[ni[-1]] - - # Compute the number of repetitions in xy and assign it to the - # flattened histmat. - if len(xy) == 0: - return zeros(nbin-2, int), edges - - flatcount = bincount(xy, weights) - a = arange(len(flatcount)) - hist[a] = flatcount - - # Shape into a proper matrix - hist = hist.reshape(sort(nbin)) - for i in arange(nbin.size): - j = ni.argsort()[i] - hist = hist.swapaxes(i, j) - ni[i], ni[j] = ni[j], ni[i] - - # Remove outliers (indices 0 and -1 for each dimension). - core = D*[slice(1, -1)] - hist = hist[core] - - # Normalize if normed is True - if normed: - s = hist.sum() - for i in arange(D): - shape = ones(D, int) - shape[i] = nbin[i] - 2 - hist = hist / dedges[i].reshape(shape) - hist /= s - - if (hist.shape != nbin - 2).any(): - raise RuntimeError( - "Internal Shape Error") - return hist, edges - - def average(a, axis=None, weights=None, returned=False): """ Compute the weighted average along the specified axis. -- cgit v1.2.1 From b0740b7b5625181dbed25f6b13bc424c1f37614b Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Mon, 18 Dec 2017 23:41:54 -0800 Subject: MAINT: Avoid repeated validation of percentiles in nanpercentile Previously, this would check the percentiles were 0 <= q <= 100 on every single slice of non-reduction axes. This also pulls the division by 100 to the top level, which makes things clearer, and also reduces the size of the stack trace --- numpy/lib/function_base.py | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index fb37801b9..d9a539f18 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -4284,8 +4284,17 @@ def percentile(a, q, axis=None, out=None, >>> assert not np.all(a == b) """ - q = array(q, dtype=np.float64, copy=True) - r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, + q = np.true_divide(q, 100.0) # handles the asarray for us too + if not _quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + return _quantile_unchecked( + a, q, axis, out, overwrite_input, interpolation, keepdims) + + +def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=False): + """Assumes that q is in [0, 1], and is an ndarray""" + r, k = _ureduce(a, func=_quantile_ureduce_func, q=q, axis=axis, out=out, overwrite_input=overwrite_input, interpolation=interpolation) if keepdims: @@ -4294,8 +4303,21 @@ def percentile(a, q, axis=None, out=None, return r -def _percentile(a, q, axis=None, out=None, - overwrite_input=False, interpolation='linear', keepdims=False): +def _quantile_is_valid(q): + # avoid expensive reductions, relevant for arrays with < O(1000) elements + if q.ndim == 1 and q.size < 10: + for i in range(q.size): + if q[i] < 0.0 or q[i] > 1.0: + return False + else: + # faster than any() + if np.count_nonzero(q < 0.0) or np.count_nonzero(q > 1.0): + return False + return True + + +def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, + interpolation='linear', keepdims=False): a = asarray(a) if q.ndim == 0: # Do not allow 0-d arrays because following code fails for scalar @@ -4304,18 +4326,6 @@ def _percentile(a, q, axis=None, out=None, else: zerod = False - # avoid expensive reductions, relevant for arrays with < O(1000) elements - if q.size < 10: - for i in range(q.size): - if q[i] < 0. or q[i] > 100.: - raise ValueError("Percentiles must be in the range [0,100]") - q[i] /= 100. - else: - # faster than any() - if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.): - raise ValueError("Percentiles must be in the range [0,100]") - q /= 100. - # prepare a for partioning if overwrite_input: if axis is None: -- cgit v1.2.1 From 3fd9d03ef5e56990150d8974223f201e7ba3cc68 Mon Sep 17 00:00:00 2001 From: Tai-Lin Wu Date: Wed, 20 Dec 2017 10:44:32 -0500 Subject: ENH: fix typo --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index fb37801b9..780cf3c5c 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -2942,7 +2942,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, .. versionadded:: 1.5 fweights : array_like, int, optional - 1-D array of integer freguency weights; the number of times each + 1-D array of integer frequency weights; the number of times each observation vector should be repeated. .. versionadded:: 1.10 -- cgit v1.2.1 From 923e16db075302d17d73bc5426d9cd3af02c8483 Mon Sep 17 00:00:00 2001 From: luzpaz Date: Sat, 30 Dec 2017 10:45:49 -0500 Subject: Documentation and misc. typos Found via `codespell` --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index e01333b8b..b4ac7274a 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3195,7 +3195,7 @@ def _ureduce(a, func, **kwargs): Input array or object that can be converted to an array. func : callable Reduction function capable of receiving a single axis argument. - It is is called with `a` as first argument followed by `kwargs`. + It is called with `a` as first argument followed by `kwargs`. kwargs : keyword arguments additional keyword arguments to pass to `func`. -- cgit v1.2.1 From 7cb22f954ed50217f7eb483f4fbdb67953f6482d Mon Sep 17 00:00:00 2001 From: "luz.paz" Date: Thu, 4 Jan 2018 20:48:10 -0500 Subject: More misc. typos Found via `codespell` --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 979bc13d0..7571c4f3b 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3528,7 +3528,7 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, else: zerod = False - # prepare a for partioning + # prepare a for partitioning if overwrite_input: if axis is None: ap = a.ravel() -- cgit v1.2.1 From eb17e184122cfd8cc624b64b3e55c240bdff9ebb Mon Sep 17 00:00:00 2001 From: Zane Bradley Date: Thu, 25 Jan 2018 15:15:28 -0800 Subject: DOC: fix formatting in interp example --- numpy/lib/function_base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 7571c4f3b..391c47a06 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1236,7 +1236,8 @@ def interp(x, xp, fp, left=None, right=None, period=None): >>> np.interp(x, xp, fp, period=360) array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) - Complex interpolation + Complex interpolation: + >>> x = [1.5, 4.0] >>> xp = [2,3,5] >>> fp = [1.0j, 0, 2+3j] -- cgit v1.2.1 From 7e309829bf1a0a14221b2d3623d27895a130fa9d Mon Sep 17 00:00:00 2001 From: hobler Date: Thu, 8 Feb 2018 21:54:48 +0100 Subject: Update function_base.py --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 391c47a06..cc5f7453d 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -839,7 +839,7 @@ def gradient(f, *varargs, **kwargs): \\left\\{ \\begin{array}{r} \\alpha+\\beta+\\gamma=0 \\\\ - -\\beta h_{d}+\\gamma h_{s}=1 \\\\ + \\beta h_{d}-\\gamma h_{s}=1 \\\\ \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 \\end{array} \\right. -- cgit v1.2.1 From e441c291b2e10c8de85a9d950d0add552d0ebd83 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Fri, 16 Feb 2018 21:36:38 -0800 Subject: MAINT: Stop using non-tuple indices internally By not using this type of indexing, it becomes easier for subclasses to override indexing in a way that works correctly with numpy functions. These locations were found by deprecating the behavior in question, which is deliberately not part of this commit --- numpy/lib/function_base.py | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 391c47a06..0434eb472 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -970,7 +970,7 @@ def gradient(f, *varargs, **kwargs): slice4[axis] = slice(2, None) if uniform_spacing: - out[slice1] = (f[slice4] - f[slice2]) / (2. * ax_dx) + out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) else: dx1 = ax_dx[0:-1] dx2 = ax_dx[1:] @@ -982,7 +982,7 @@ def gradient(f, *varargs, **kwargs): shape[axis] = -1 a.shape = b.shape = c.shape = shape # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] - out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] # Numerical differentiation: 1st order edges if edge_order == 1: @@ -991,14 +991,14 @@ def gradient(f, *varargs, **kwargs): slice3[axis] = 0 dx_0 = ax_dx if uniform_spacing else ax_dx[0] # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) - out[slice1] = (f[slice2] - f[slice3]) / dx_0 + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 dx_n = ax_dx if uniform_spacing else ax_dx[-1] # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) - out[slice1] = (f[slice2] - f[slice3]) / dx_n + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n # Numerical differentiation: 2nd order edges else: @@ -1017,7 +1017,7 @@ def gradient(f, *varargs, **kwargs): b = (dx1 + dx2) / (dx1 * dx2) c = - dx1 / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] - out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] slice1[axis] = -1 slice2[axis] = -3 @@ -1034,7 +1034,7 @@ def gradient(f, *varargs, **kwargs): b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] - out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] outvals.append(out) @@ -1385,6 +1385,7 @@ def unwrap(p, discont=pi, axis=-1): dd = diff(p, axis=axis) slice1 = [slice(None, None)]*nd # full slices slice1[axis] = slice(1, None) + slice1 = tuple(slice1) ddmod = mod(dd + pi, 2*pi) - pi _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) ph_correct = ddmod - dd @@ -3367,6 +3368,7 @@ def _median(a, axis=None, out=None, overwrite_input=False): indexer[axis] = slice(index, index+1) else: indexer[axis] = slice(index-1, index+1) + indexer = tuple(indexer) # Check if the array contains any nan's if np.issubdtype(a.dtype, np.inexact) and sz > 0: @@ -3732,12 +3734,12 @@ def trapz(y, x=None, dx=1.0, axis=-1): slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) try: - ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis) + ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) except ValueError: # Operations didn't work, cast to ndarray d = np.asarray(d) y = np.asarray(y) - ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) + ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) return ret @@ -4026,7 +4028,7 @@ def delete(arr, obj, axis=None): pass else: slobj[axis] = slice(None, start) - new[slobj] = arr[slobj] + new[tuple(slobj)] = arr[tuple(slobj)] # copy end chunck if stop == N: pass @@ -4034,7 +4036,7 @@ def delete(arr, obj, axis=None): slobj[axis] = slice(stop-numtodel, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(stop, None) - new[slobj] = arr[slobj2] + new[tuple(slobj)] = arr[tuple(slobj2)] # copy middle pieces if step == 1: pass @@ -4044,9 +4046,9 @@ def delete(arr, obj, axis=None): slobj[axis] = slice(start, stop-numtodel) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(start, stop) - arr = arr[slobj2] + arr = arr[tuple(slobj2)] slobj2[axis] = keep - new[slobj] = arr[slobj2] + new[tuple(slobj)] = arr[tuple(slobj2)] if wrap: return wrap(new) else: @@ -4073,11 +4075,11 @@ def delete(arr, obj, axis=None): newshape[axis] -= 1 new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, obj) - new[slobj] = arr[slobj] + new[tuple(slobj)] = arr[tuple(slobj)] slobj[axis] = slice(obj, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(obj+1, None) - new[slobj] = arr[slobj2] + new[tuple(slobj)] = arr[tuple(slobj2)] else: if obj.size == 0 and not isinstance(_obj, np.ndarray): obj = obj.astype(intp) @@ -4109,7 +4111,7 @@ def delete(arr, obj, axis=None): keep[obj, ] = False slobj[axis] = keep - new = arr[slobj] + new = arr[tuple(slobj)] if wrap: return wrap(new) @@ -4280,13 +4282,13 @@ def insert(arr, obj, values, axis=None): newshape[axis] += numnew new = empty(newshape, arr.dtype, arrorder) slobj[axis] = slice(None, index) - new[slobj] = arr[slobj] + new[tuple(slobj)] = arr[tuple(slobj)] slobj[axis] = slice(index, index+numnew) - new[slobj] = values + new[tuple(slobj)] = values slobj[axis] = slice(index+numnew, None) slobj2 = [slice(None)] * ndim slobj2[axis] = slice(index, None) - new[slobj] = arr[slobj2] + new[tuple(slobj)] = arr[tuple(slobj2)] if wrap: return wrap(new) return new @@ -4315,8 +4317,8 @@ def insert(arr, obj, values, axis=None): slobj2 = [slice(None)]*ndim slobj[axis] = indices slobj2[axis] = old_mask - new[slobj] = values - new[slobj2] = arr + new[tuple(slobj)] = values + new[tuple(slobj2)] = arr if wrap: return wrap(new) -- cgit v1.2.1 From 8eccbf7df2f564e23c1bf7c9f5ee08e4b0dc6a36 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 25 Feb 2018 11:28:31 -0800 Subject: BUG/MAINT: Remove special handling of 0d arrays and scalars in interp These are now handled generically by the underlying C function This fixes the period argument for 0d arrays. Now never returns a pure-python scalar, which matches the behaviour of most of numpy. Rework of b66a200a4a1e98f1955c8a774e4ebfb4588dab5b --- numpy/lib/function_base.py | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 391c47a06..504280cef 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1255,23 +1255,13 @@ def interp(x, xp, fp, left=None, right=None, period=None): interp_func = compiled_interp input_dtype = np.float64 - if period is None: - if isinstance(x, (float, int, number)): - return interp_func([x], xp, fp, left, right).item() - elif isinstance(x, np.ndarray) and x.ndim == 0: - return interp_func([x], xp, fp, left, right).item() - else: - return interp_func(x, xp, fp, left, right) - else: + if period is not None: if period == 0: raise ValueError("period must be a non-zero value") period = abs(period) left = None right = None - return_array = True - if isinstance(x, (float, int, number)): - return_array = False - x = [x] + x = np.asarray(x, dtype=np.float64) xp = np.asarray(xp, dtype=np.float64) fp = np.asarray(fp, dtype=input_dtype) @@ -1289,10 +1279,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) fp = np.concatenate((fp[-1:], fp, fp[0:1])) - if return_array: - return interp_func(x, xp, fp, left, right) - else: - return interp_func(x, xp, fp, left, right).item() + return interp_func(x, xp, fp, left, right) def angle(z, deg=0): -- cgit v1.2.1 From 018dfbcaa69d8bcda4d909be11a65a188cf5d1dd Mon Sep 17 00:00:00 2001 From: David Freese Date: Sun, 25 Feb 2018 20:09:15 -0800 Subject: BUG: fix complex casting error in cov with aweights When using cov with a complex input and with aweights specified, cov will error as a result of trying to cast a complex value into a float64. This comes about since average is used to calculate the sum of the weights from aweights. average returns the sum of weights as the same type as its result, not the weights type. For a complex input m, and any type for aweights, this would result in a complex value for fact. It appears the primary purpose of np.float64(fact) is to provide a NaN value from the divide when fact is an integer zero. This has been replaced by using numpy.divide to replicate the same behavior, but to also handle complex types. --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 391c47a06..e1e6242ae 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -2309,7 +2309,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, else: X_T = (X*w).T c = dot(X, X_T.conj()) - c *= 1. / np.float64(fact) + c *= np.true_divide(1, fact) return c.squeeze() -- cgit v1.2.1 From 2749b32ddb98c25fda40a645a8f7ac1bfdb7f4ac Mon Sep 17 00:00:00 2001 From: Allan Haldane Date: Tue, 6 Mar 2018 20:49:40 -0500 Subject: DOC: Grammar of np.gradient docstring [ci-skip] --- numpy/lib/function_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index e61122107..7b0c3007b 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -818,9 +818,9 @@ def gradient(f, *varargs, **kwargs): Notes ----- Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous - derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the - spacing the finite difference coefficients are computed by minimising - the consistency error :math:`\\eta_{i}`: + derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we + minimize the "consistency error" :math:`\\eta_{i}` between the true gradient + and its estimate from a linear combination of the neighboring grid-points: .. math:: -- cgit v1.2.1 From 6e6602ef7dc2168138959522b18afe556bc1321e Mon Sep 17 00:00:00 2001 From: Louis Potok Date: Thu, 8 Mar 2018 10:02:21 -0800 Subject: BUG: fix error message in numpy.select --- numpy/lib/function_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 7b0c3007b..422a87322 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -632,7 +632,7 @@ def select(condlist, choicelist, default=0): deprecated_ints = True else: raise ValueError( - 'invalid entry in choicelist: should be boolean ndarray') + 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) if deprecated_ints: # 2014-02-24, 1.9 -- cgit v1.2.1 From 53b358ce7eddf78ac2bc22045fbe25e91e663b9a Mon Sep 17 00:00:00 2001 From: Frederick Lefebvre Date: Wed, 14 Mar 2018 03:33:05 +0000 Subject: TST: Import abstract classes from collections.abc Abstract collection classes accessed from the collections module have been deprecated since Python 3.3. They should be accessed through collections.abc. When run with Python 3.7, the deprecation warning cause multiple tests to fail. --- numpy/lib/function_base.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 422a87322..07b845e2b 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1,6 +1,11 @@ from __future__ import division, absolute_import, print_function -import collections +try: + # Accessing collections abstact classes from collections + # has been deprecated since Python 3.3 + import collections.abc as collections_abc +except ImportError: + import collections as collections_abc import re import sys import warnings @@ -547,7 +552,7 @@ def piecewise(x, condlist, funclist, *args, **kw): y = zeros(x.shape, x.dtype) for k in range(n): item = funclist[k] - if not isinstance(item, collections.Callable): + if not isinstance(item, collections_abc.Callable): y[condlist[k]] = item else: vals = x[condlist[k]] -- cgit v1.2.1 From e3ff50194c0633113ae543700d83ebb165c8d06b Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 15 Mar 2018 08:40:26 -0700 Subject: DOC: Add graph showing different behaviors of np.percentile With thanks to @ricardoV94 for inspiring this --- numpy/lib/function_base.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 422a87322..66e1edc2e 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3473,6 +3473,33 @@ def percentile(a, q, axis=None, out=None, array([ 7., 2.]) >>> assert not np.all(a == b) + The different types of interpolation can be visualized graphically: + + ..plot:: + import matplotlib.pyplot as plt + + a = np.arange(4) + p = np.linspace(0, 100, 6001) + ax = plt.gca() + lines = [ + ('linear', None) + ('higher', '--') + ('lower', '--') + ('nearest', '-.') + ('midpoint', '-.') + ] + for interpolation, style in lines: + ax.plot( + p, np.percentile(a, p, interpolation=interpolation), + label=interpolation, linestyle=style) + ax.set( + title='Interpolation methods for list: ' + str(a), + xlabel='Percentile', + ylabel='List item returned', + yticks=a) + ax.legend() + plt.show() + """ q = np.true_divide(q, 100.0) # handles the asarray for us too if not _quantile_is_valid(q): -- cgit v1.2.1 From 3d5caa2b8cb51851f218283cd2435b00ecce5d16 Mon Sep 17 00:00:00 2001 From: Berend Kapelle Date: Tue, 3 Apr 2018 21:37:09 +0200 Subject: doc: fix examples in docstring for np.flip --- numpy/lib/function_base.py | 3 --- 1 file changed, 3 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 9ccbfafa2..8440be52e 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -184,21 +184,18 @@ def flip(m, axis): >>> A array([[[0, 1], [2, 3]], - [[4, 5], [6, 7]]]) >>> flip(A, 0) array([[[4, 5], [6, 7]], - [[0, 1], [2, 3]]]) >>> flip(A, 1) array([[[2, 3], [0, 1]], - [[6, 7], [4, 5]]]) -- cgit v1.2.1 From 5d5c379ed5af0df19eec68fdde1f87ad994ce6b1 Mon Sep 17 00:00:00 2001 From: Chun-Wei Yuan Date: Tue, 16 Jan 2018 15:11:42 -0800 Subject: ENH: Adding np.quantile() and np.nanquantile(). #10199 --- numpy/lib/function_base.py | 110 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 108 insertions(+), 2 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 8440be52e..6a9c40c75 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -54,7 +54,8 @@ __all__ = [ 'bincount', 'digitize', 'cov', 'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', - 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' + 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc', + 'quantile' ] @@ -3402,7 +3403,7 @@ def percentile(a, q, axis=None, out=None, `a` after this function completes is undefined. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to - use when the desired quantile lies between two data points + use when the desired percentile lies between two data points ``i < j``: * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` @@ -3437,6 +3438,7 @@ def percentile(a, q, axis=None, out=None, mean median : equivalent to ``percentile(..., 50)`` nanpercentile + quantile : equivalent to percentile, except with q in the range [0, 1]. Notes ----- @@ -3512,6 +3514,110 @@ def percentile(a, q, axis=None, out=None, a, q, axis, out, overwrite_input, interpolation, keepdims) +def quantile(a, q, axis=None, out=None, + overwrite_input=False, interpolation='linear', keepdims=False): + """ + Compute the `q`th quantile of the data along the specified axis. + ..versionadded:: 1.15.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + q : array_like of float + Quantile or sequence of quantiles to compute, which must be between + 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The + default is to compute the quantile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to + use when the desired quantile lies between two data points + ``i < j``: + * linear: ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. + * lower: ``i``. + * higher: ``j``. + * nearest: ``i`` or ``j``, whichever is nearest. + * midpoint: ``(i + j) / 2``. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single quantile and `axis=None`, then the result + is a scalar. If multiple quantiles are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + percentile : equivalent to quantile, but with q in the range [0, 100]. + median : equivalent to ``quantile(..., 0.5)`` + nanquantile + + Notes + ----- + Given a vector ``V`` of length ``N``, the ``q``-th quantile of + ``V`` is the value ``q`` of the way from the minimum to the + maximum in a sorted copy of ``V``. The values and distances of + the two nearest neighbors as well as the `interpolation` parameter + will determine the quantile if the normalized ranking does not + match the location of ``q`` exactly. This function is the same as + the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the + same as the maximum if ``q=1.0``. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.quantile(a, 0.5) + 3.5 + >>> np.quantile(a, 0.5, axis=0) + array([[ 6.5, 4.5, 2.5]]) + >>> np.quantile(a, 0.5, axis=1) + array([ 7., 2.]) + >>> np.quantile(a, 0.5, axis=1, keepdims=True) + array([[ 7.], + [ 2.]]) + >>> m = np.quantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.quantile(a, 0.5, axis=0, out=out) + array([[ 6.5, 4.5, 2.5]]) + >>> m + array([[ 6.5, 4.5, 2.5]]) + >>> b = a.copy() + >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) + array([ 7., 2.]) + >>> assert not np.all(a == b) + """ + q = np.asanyarray(q) + if not _quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + return _quantile_unchecked( + a, q, axis, out, overwrite_input, interpolation, keepdims) + + def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """Assumes that q is in [0, 1], and is an ndarray""" -- cgit v1.2.1 From df8e83538461c29bc12c44198574bde8ffefcad7 Mon Sep 17 00:00:00 2001 From: mattip Date: Tue, 17 Apr 2018 13:46:36 +0300 Subject: DOC: clear up warnings, fix matplotlib plot --- numpy/lib/function_base.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 8440be52e..0e517ee8f 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3400,17 +3400,19 @@ def percentile(a, q, axis=None, out=None, If True, then allow the input array `a` to be modified by intermediate calculations, to save memory. In this case, the contents of the input `a` after this function completes is undefined. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: - * linear: ``i + (j - i) * fraction``, where ``fraction`` - is the fractional part of the index surrounded by ``i`` - and ``j``. - * lower: ``i``. - * higher: ``j``. - * nearest: ``i`` or ``j``, whichever is nearest. - * midpoint: ``(i + j) / 2``. + + * 'linear': ``i + (j - i) * fraction``, where ``fraction`` + is the fractional part of the index surrounded by ``i`` + and ``j``. + * 'lower': ``i``. + * 'higher': ``j``. + * 'nearest': ``i`` or ``j``, whichever is nearest. + * 'midpoint': ``(i + j) / 2``. .. versionadded:: 1.9.0 keepdims : bool, optional @@ -3479,18 +3481,19 @@ def percentile(a, q, axis=None, out=None, The different types of interpolation can be visualized graphically: - ..plot:: + .. plot:: + import matplotlib.pyplot as plt a = np.arange(4) p = np.linspace(0, 100, 6001) ax = plt.gca() lines = [ - ('linear', None) - ('higher', '--') - ('lower', '--') - ('nearest', '-.') - ('midpoint', '-.') + ('linear', None), + ('higher', '--'), + ('lower', '--'), + ('nearest', '-.'), + ('midpoint', '-.'), ] for interpolation, style in lines: ax.plot( -- cgit v1.2.1 From 9d1d44fc120c451a4925720027e85c70e13c26f2 Mon Sep 17 00:00:00 2001 From: Junjie Bai Date: Tue, 10 Apr 2018 01:24:35 -0700 Subject: ENH: Extend np.flip to work over multiple axes Closes #10847 --- numpy/lib/function_base.py | 50 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 13 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 8440be52e..a4516c276 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -145,7 +145,7 @@ def rot90(m, k=1, axes=(0,1)): return flip(transpose(m, axes_list), axes[1]) -def flip(m, axis): +def flip(m, axis=None): """ Reverse the order of elements in an array along the given axis. @@ -157,9 +157,16 @@ def flip(m, axis): ---------- m : array_like Input array. - axis : integer - Axis in array, which entries are reversed. + axis : None or int or tuple of ints, optional + Axis or axes along which to flip over. The default, + axis=None, will flip over all of the axes of the input array. + If axis is negative it counts from the last to the first axis. + + If axis is a tuple of ints, flipping is performed on all of the axes + specified in the tuple. + .. versionchanged:: 1.15.0 + None and tuples of axes are supported Returns ------- @@ -175,9 +182,17 @@ def flip(m, axis): Notes ----- flip(m, 0) is equivalent to flipud(m). + flip(m, 1) is equivalent to fliplr(m). + flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. + flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all + positions. + + flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at + position 0 and position 1. + Examples -------- >>> A = np.arange(8).reshape((2,2,2)) @@ -186,32 +201,41 @@ def flip(m, axis): [2, 3]], [[4, 5], [6, 7]]]) - >>> flip(A, 0) array([[[4, 5], [6, 7]], [[0, 1], [2, 3]]]) - >>> flip(A, 1) array([[[2, 3], [0, 1]], [[6, 7], [4, 5]]]) - + >>> np.flip(A) + array([[[7, 6], + [5, 4]], + [[3, 2], + [1, 0]]]) + >>> np.flip(A, (0, 2)) + array([[[5, 4], + [7, 6]], + [[1, 0], + [3, 2]]]) >>> A = np.random.randn(3,4,5) >>> np.all(flip(A,2) == A[:,:,::-1,...]) True """ if not hasattr(m, 'ndim'): m = asarray(m) - indexer = [slice(None)] * m.ndim - try: - indexer[axis] = slice(None, None, -1) - except IndexError: - raise ValueError("axis=%i is invalid for the %i-dimensional input array" - % (axis, m.ndim)) - return m[tuple(indexer)] + if axis is None: + indexer = (np.s_[::-1],) * m.ndim + else: + axis = _nx.normalize_axis_tuple(axis, m.ndim) + indexer = [np.s_[:]] * m.ndim + for ax in axis: + indexer[ax] = np.s_[::-1] + indexer = tuple(indexer) + return m[indexer] def iterable(y): -- cgit v1.2.1 From 92f85239dad607540a1fa3124e41c7b357caf7fe Mon Sep 17 00:00:00 2001 From: Andras Deak Date: Fri, 27 Apr 2018 14:05:07 +0200 Subject: DOC: Make doc examples using StringIO python2-3 compatible --- numpy/lib/function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 72beef471..a6e3e07d3 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1633,9 +1633,9 @@ def disp(mesg, device=None, linefeed=True): Besides ``sys.stdout``, a file-like object can also be used as it has both required methods: - >>> from StringIO import StringIO + >>> from io import StringIO >>> buf = StringIO() - >>> np.disp('"Display" in a file', device=buf) + >>> np.disp(u'"Display" in a file', device=buf) >>> buf.getvalue() '"Display" in a file\\n' -- cgit v1.2.1 From 6148f529bc75085f96f5592b6e1d9c6445540492 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20S=C3=A1nchez=20de=20Le=C3=B3n=20Peque?= Date: Thu, 31 May 2018 13:08:16 +0200 Subject: DOC: Fix doctest formatting in `rot90()` examples --- numpy/lib/function_base.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index a6e3e07d3..70aa654dc 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -109,9 +109,8 @@ def rot90(m, k=1, axes=(0,1)): >>> np.rot90(m, 1, (1,2)) array([[[1, 3], [0, 2]], - - [[5, 7], - [4, 6]]]) + [[5, 7], + [4, 6]]]) """ axes = tuple(axes) -- cgit v1.2.1 From af66e487a57bfd4850f4306e3b85d1dac3c70412 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20D=C3=B6pfert?= Date: Fri, 8 Jun 2018 18:22:01 +0200 Subject: DOC: make docstring of np.interp clearer (#11280) * update docstring of interp() * change "calculate" to "evaluate" to be consistent * remove "values" * make "data points" consistent --- numpy/lib/function_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 70aa654dc..95edb95fa 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1181,12 +1181,12 @@ def interp(x, xp, fp, left=None, right=None, period=None): One-dimensional linear interpolation. Returns the one-dimensional piecewise linear interpolant to a function - with given values at discrete data-points. + with given discrete data points (`xp`, `fp`), evaluated at `x`. Parameters ---------- x : array_like - The x-coordinates of the interpolated values. + The x-coordinates at which to evaluate the interpolated values. xp : 1-D sequence of floats The x-coordinates of the data points, must be increasing if argument -- cgit v1.2.1 From d4b43bb148678516c11d8a0b0cae9b53635f9ee0 Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 15 Jun 2018 10:51:39 -0700 Subject: DOC: typos, small fixes --- numpy/lib/function_base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 95edb95fa..c06065ba6 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3543,7 +3543,7 @@ def percentile(a, q, axis=None, out=None, def quantile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ - Compute the `q`th quantile of the data along the specified axis. + Compute the qth quantile of the data along the specified axis. ..versionadded:: 1.15.0 Parameters @@ -3569,6 +3569,7 @@ def quantile(a, q, axis=None, out=None, This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: + * linear: ``i + (j - i) * fraction``, where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. -- cgit v1.2.1 From 83828f52b287fefb3d8753a21bd3441997a4d687 Mon Sep 17 00:00:00 2001 From: Mike Toews Date: Sat, 16 Jun 2018 18:18:19 +1200 Subject: HTTP -> HTTPS, and other linkrot fixes --- numpy/lib/function_base.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 95edb95fa..128da22c6 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1649,7 +1649,7 @@ def disp(mesg, device=None, linefeed=True): return -# See http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html +# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html _DIMENSION_NAME = r'\w+' _CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) _ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) @@ -1906,7 +1906,7 @@ class vectorize(object): References ---------- .. [1] NumPy Reference, section `Generalized Universal Function API - `_. + `_. """ def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, @@ -2561,7 +2561,7 @@ def bartlett(M): .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. .. [4] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function + https://en.wikipedia.org/wiki/Window_function .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. @@ -2661,7 +2661,7 @@ def hanning(M): .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 106-108. .. [3] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function + https://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. @@ -2759,7 +2759,7 @@ def hamming(M): .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function + https://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. @@ -3036,7 +3036,7 @@ def kaiser(M, beta): .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 177-178. .. [3] Wikipedia, "Window function", - http://en.wikipedia.org/wiki/Window_function + https://en.wikipedia.org/wiki/Window_function Examples -------- @@ -3124,7 +3124,7 @@ def sinc(x): .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/SincFunction.html .. [2] Wikipedia, "Sinc function", - http://en.wikipedia.org/wiki/Sinc_function + https://en.wikipedia.org/wiki/Sinc_function Examples -------- @@ -3840,10 +3840,10 @@ def trapz(y, x=None, dx=1.0, axis=-1): References ---------- - .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule .. [2] Illustration image: - http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png Examples -------- -- cgit v1.2.1 From 6d601e51cdce8420b4bea383ee4a17ae8ff2969c Mon Sep 17 00:00:00 2001 From: mattip Date: Sun, 24 Jun 2018 16:27:37 -0700 Subject: fixes from review --- numpy/lib/function_base.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index c06065ba6..07c3f1478 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3398,9 +3398,9 @@ def _median(a, axis=None, out=None, overwrite_input=False): def percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ - Compute the qth percentile of the data along the specified axis. + Compute the q-th percentile of the data along the specified axis. - Returns the qth percentile(s) of the array elements. + Returns the q-th percentile(s) of the array elements. Parameters ---------- @@ -3467,7 +3467,7 @@ def percentile(a, q, axis=None, out=None, Notes ----- - Given a vector ``V`` of length ``N``, the ``q``-th percentile of + Given a vector ``V`` of length ``N``, the q-th percentile of ``V`` is the value ``q/100`` of the way from the minimum to the maximum in a sorted copy of ``V``. The values and distances of the two nearest neighbors as well as the `interpolation` parameter @@ -3543,7 +3543,7 @@ def percentile(a, q, axis=None, out=None, def quantile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ - Compute the qth quantile of the data along the specified axis. + Compute the q-th quantile of the data along the specified axis. ..versionadded:: 1.15.0 Parameters @@ -3603,7 +3603,7 @@ def quantile(a, q, axis=None, out=None, Notes ----- - Given a vector ``V`` of length ``N``, the ``q``-th quantile of + Given a vector ``V`` of length ``N``, the q-th quantile of ``V`` is the value ``q`` of the way from the minimum to the maximum in a sorted copy of ``V``. The values and distances of the two nearest neighbors as well as the `interpolation` parameter @@ -3721,7 +3721,7 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, indices = concatenate((indices, [-1])) ap.partition(indices, axis=axis) - # ensure axis with qth is first + # ensure axis with q-th is first ap = np.moveaxis(ap, axis, 0) axis = 0 @@ -3754,7 +3754,7 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, ap.partition(concatenate((indices_below, indices_above)), axis=axis) - # ensure axis with qth is first + # ensure axis with q-th is first ap = np.moveaxis(ap, axis, 0) weights_below = np.moveaxis(weights_below, axis, 0) weights_above = np.moveaxis(weights_above, axis, 0) @@ -3768,7 +3768,7 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, x1 = take(ap, indices_below, axis=axis) * weights_below x2 = take(ap, indices_above, axis=axis) * weights_above - # ensure axis with qth is first + # ensure axis with q-th is first x1 = np.moveaxis(x1, axis, 0) x2 = np.moveaxis(x2, axis, 0) -- cgit v1.2.1 From 2244cd929354fb4157eaa78204ad6bb3bebea9bf Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Sun, 1 Jul 2018 15:13:21 -0700 Subject: MAINT: Move add_newdocs into core, since it only adds docs to those pieces --- numpy/lib/function_base.py | 36 +----------------------------------- 1 file changed, 1 insertion(+), 35 deletions(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 26ef3e235..9a680dd55 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -27,6 +27,7 @@ from numpy.core.fromnumeric import ( ravel, nonzero, sort, partition, mean, any, sum ) from numpy.core.numerictypes import typecodes, number +from numpy.core.function_base import add_newdoc from numpy.lib.twodim_base import diag from .utils import deprecate from numpy.core.multiarray import ( @@ -3892,41 +3893,6 @@ def trapz(y, x=None, dx=1.0, axis=-1): return ret -#always succeed -def add_newdoc(place, obj, doc): - """ - Adds documentation to obj which is in module place. - - If doc is a string add it to obj as a docstring - - If doc is a tuple, then the first element is interpreted as - an attribute of obj and the second as the docstring - (method, docstring) - - If doc is a list, then each element of the list should be a - sequence of length two --> [(method1, docstring1), - (method2, docstring2), ...] - - This routine never raises an error. - - This routine cannot modify read-only docstrings, as appear - in new-style classes or built-in functions. Because this - routine never raises an error the caller must check manually - that the docstrings were changed. - """ - try: - new = getattr(__import__(place, globals(), {}, [obj]), obj) - if isinstance(doc, str): - add_docstring(new, doc.strip()) - elif isinstance(doc, tuple): - add_docstring(getattr(new, doc[0]), doc[1].strip()) - elif isinstance(doc, list): - for val in doc: - add_docstring(getattr(new, val[0]), val[1].strip()) - except Exception: - pass - - # Based on scitools meshgrid def meshgrid(*xi, **kwargs): """ -- cgit v1.2.1 From 307dd76cb0fb770e07524e4cd29cd08e17edc2c3 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Fri, 29 Jun 2018 23:56:38 -0700 Subject: BUG: Don't convert inputs to `np.float64` in digitize This converts digitize to a pure-python function that falls back on searchsorted. Performance doesn't really matter here anyway - if you care about performance, then you should just call searchsorted directly, rather than checking the order of the bins. Partially fixes gh-11022 --- numpy/lib/function_base.py | 112 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 111 insertions(+), 1 deletion(-) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 9a680dd55..1a43da8b0 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -31,7 +31,7 @@ from numpy.core.function_base import add_newdoc from numpy.lib.twodim_base import diag from .utils import deprecate from numpy.core.multiarray import ( - _insert, add_docstring, digitize, bincount, normalize_axis_index, + _insert, add_docstring, bincount, normalize_axis_index, _monotonicity, interp as compiled_interp, interp_complex as compiled_interp_complex ) from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc @@ -4493,3 +4493,113 @@ def append(arr, values, axis=None): values = ravel(values) axis = arr.ndim-1 return concatenate((arr, values), axis=axis) + + +def digitize(x, bins, right=False): + """ + Return the indices of the bins to which each value in input array belongs. + + ========= ============= ============================ + `right` order of bins returned index `i` satisfies + ========= ============= ============================ + ``False`` increasing ``bins[i-1] <= x < bins[i]`` + ``True`` increasing ``bins[i-1] < x <= bins[i]`` + ``False`` decreasing ``bins[i-1] > x >= bins[i]`` + ``True`` decreasing ``bins[i-1] >= x > bins[i]`` + ========= ============= ============================ + + If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is + returned as appropriate. + + Parameters + ---------- + x : array_like + Input array to be binned. Prior to NumPy 1.10.0, this array had to + be 1-dimensional, but can now have any shape. + bins : array_like + Array of bins. It has to be 1-dimensional and monotonic. + right : bool, optional + Indicating whether the intervals include the right or the left bin + edge. Default behavior is (right==False) indicating that the interval + does not include the right edge. The left bin end is open in this + case, i.e., bins[i-1] <= x < bins[i] is the default behavior for + monotonically increasing bins. + + Returns + ------- + indices : ndarray of ints + Output array of indices, of same shape as `x`. + + Raises + ------ + ValueError + If `bins` is not monotonic. + TypeError + If the type of the input is complex. + + See Also + -------- + bincount, histogram, unique, searchsorted + + Notes + ----- + If values in `x` are such that they fall outside the bin range, + attempting to index `bins` with the indices that `digitize` returns + will result in an IndexError. + + .. versionadded:: 1.10.0 + + `np.digitize` is implemented in terms of `np.searchsorted`. This means + that a binary search is used to bin the values, which scales much better + for larger number of bins than the previous linear search. It also removes + the requirement for the input array to be 1-dimensional. + + For monotonically _increasing_ `bins`, the following are equivalent:: + + np.digitize(x, bins, right=True) + np.searchsorted(bins, x, side='left') + + Note that as the order of the arguments are reversed, the side must be too. + The `searchsorted` call is marginally faster, as it does not do any + monotonicity checks. Perhaps more importantly, it supports all dtypes. + + Examples + -------- + >>> x = np.array([0.2, 6.4, 3.0, 1.6]) + >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) + >>> inds = np.digitize(x, bins) + >>> inds + array([1, 4, 3, 2]) + >>> for n in range(x.size): + ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) + ... + 0.0 <= 0.2 < 1.0 + 4.0 <= 6.4 < 10.0 + 2.5 <= 3.0 < 4.0 + 1.0 <= 1.6 < 2.5 + + >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) + >>> bins = np.array([0, 5, 10, 15, 20]) + >>> np.digitize(x,bins,right=True) + array([1, 2, 3, 4, 4]) + >>> np.digitize(x,bins,right=False) + array([1, 3, 3, 4, 5]) + """ + x = _nx.asarray(x) + bins = _nx.asarray(bins) + + # here for compatibility, searchsorted below is happy to take this + if np.issubdtype(x.dtype, _nx.complexfloating): + raise TypeError("x may not be complex") + + mono = _monotonicity(bins) + if mono == 0: + raise ValueError("bins must be monotonically increasing or decreasing") + + # this is backwards because the arguments below are swapped + side = 'left' if right else 'right' + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) + else: + return _nx.searchsorted(bins, x, side=side) -- cgit v1.2.1 From d5566273395436e03242273b0f81300fd4534070 Mon Sep 17 00:00:00 2001 From: Heath Henley Date: Fri, 27 Jul 2018 00:55:48 -0400 Subject: DOC: Show plot in meshgrid example. --- numpy/lib/function_base.py | 1 + 1 file changed, 1 insertion(+) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 1a43da8b0..742b46795 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3994,6 +3994,7 @@ def meshgrid(*xi, **kwargs): >>> xx, yy = np.meshgrid(x, y, sparse=True) >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) >>> h = plt.contourf(x,y,z) + >>> plt.show() """ ndim = len(xi) -- cgit v1.2.1 From 62adb3f3bd3e6be1bce9debbcb7ebffdd493ed54 Mon Sep 17 00:00:00 2001 From: Heath Henley Date: Fri, 27 Jul 2018 14:35:35 -0400 Subject: DOC: Import maplotlib to show contourf in docs. --- numpy/lib/function_base.py | 1 + 1 file changed, 1 insertion(+) (limited to 'numpy/lib/function_base.py') diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 742b46795..9f8b43c4b 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -3989,6 +3989,7 @@ def meshgrid(*xi, **kwargs): `meshgrid` is very useful to evaluate functions on a grid. + >>> import matplotlib.pyplot as plt >>> x = np.arange(-5, 5, 0.1) >>> y = np.arange(-5, 5, 0.1) >>> xx, yy = np.meshgrid(x, y, sparse=True) -- cgit v1.2.1