diff options
Diffstat (limited to 'numpy/lib')
-rw-r--r-- | numpy/lib/_datasource.py | 24 | ||||
-rw-r--r-- | numpy/lib/_iotools.py | 24 | ||||
-rw-r--r-- | numpy/lib/_version.py | 5 | ||||
-rw-r--r-- | numpy/lib/arraypad.py | 4 | ||||
-rw-r--r-- | numpy/lib/arraysetops.py | 52 | ||||
-rw-r--r-- | numpy/lib/arrayterator.py | 7 | ||||
-rw-r--r-- | numpy/lib/financial.py | 26 | ||||
-rw-r--r-- | numpy/lib/function_base.py | 317 | ||||
-rw-r--r-- | numpy/lib/histograms.py | 9 | ||||
-rw-r--r-- | numpy/lib/index_tricks.py | 10 | ||||
-rw-r--r-- | numpy/lib/nanfunctions.py | 101 | ||||
-rw-r--r-- | numpy/lib/npyio.py | 60 | ||||
-rw-r--r-- | numpy/lib/polynomial.py | 47 | ||||
-rw-r--r-- | numpy/lib/recfunctions.py | 102 | ||||
-rw-r--r-- | numpy/lib/scimath.py | 45 | ||||
-rw-r--r-- | numpy/lib/shape_base.py | 170 | ||||
-rw-r--r-- | numpy/lib/tests/test_arraysetops.py | 8 | ||||
-rw-r--r-- | numpy/lib/tests/test_function_base.py | 144 | ||||
-rw-r--r-- | numpy/lib/tests/test_recfunctions.py | 6 | ||||
-rw-r--r-- | numpy/lib/twodim_base.py | 48 | ||||
-rw-r--r-- | numpy/lib/type_check.py | 37 | ||||
-rw-r--r-- | numpy/lib/ufunclike.py | 12 | ||||
-rw-r--r-- | numpy/lib/utils.py | 23 |
23 files changed, 651 insertions, 630 deletions
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 30237b76f..3a0e67f60 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -20,17 +20,18 @@ gzip, bz2 and xz are supported. Example:: >>> # Create a DataSource, use os.curdir (default) for local storage. - >>> ds = datasource.DataSource() + >>> from numpy import DataSource + >>> ds = DataSource() >>> >>> # Open a remote file. >>> # DataSource downloads the file, stores it locally in: >>> # './www.google.com/index.html' >>> # opens the file and returns a file object. - >>> fp = ds.open('http://www.google.com/index.html') + >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP >>> >>> # Use the file as you normally would - >>> fp.read() - >>> fp.close() + >>> fp.read() # doctest: +SKIP + >>> fp.close() # doctest: +SKIP """ from __future__ import division, absolute_import, print_function @@ -156,6 +157,7 @@ class _FileOpeners(object): Examples -------- + >>> import gzip >>> np.lib._datasource._file_openers.keys() [None, '.bz2', '.gz', '.xz', '.lzma'] >>> np.lib._datasource._file_openers['.gz'] is gzip.open @@ -290,7 +292,7 @@ class DataSource(object): URLs require a scheme string (``http://``) to be used, without it they will fail:: - >>> repos = DataSource() + >>> repos = np.DataSource() >>> repos.exists('www.google.com/index.html') False >>> repos.exists('http://www.google.com/index.html') @@ -302,17 +304,17 @@ class DataSource(object): -------- :: - >>> ds = DataSource('/home/guido') - >>> urlname = 'http://www.google.com/index.html' - >>> gfile = ds.open('http://www.google.com/index.html') # remote file + >>> ds = np.DataSource('/home/guido') + >>> urlname = 'http://www.google.com/' + >>> gfile = ds.open('http://www.google.com/') >>> ds.abspath(urlname) - '/home/guido/www.google.com/site/index.html' + '/home/guido/www.google.com/index.html' - >>> ds = DataSource(None) # use with temporary file + >>> ds = np.DataSource(None) # use with temporary file >>> ds.open('/home/guido/foobar.txt') <open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430> >>> ds.abspath('/home/guido/foobar.txt') - '/tmp/tmpy4pgsP/home/guido/foobar.txt' + '/tmp/.../home/guido/foobar.txt' """ diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 8a042f190..0ebd39b8c 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -146,11 +146,17 @@ def flatten_dtype(ndtype, flatten_base=False): >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ... ('block', int, (2, 3))]) >>> np.lib._iotools.flatten_dtype(dt) - [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')] + [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')] >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) - [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'), - dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'), - dtype('int32')] + [dtype('S4'), + dtype('float64'), + dtype('float64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64')] """ names = ndtype.names @@ -309,13 +315,13 @@ class NameValidator(object): -------- >>> validator = np.lib._iotools.NameValidator() >>> validator(['file', 'field2', 'with space', 'CaSe']) - ['file_', 'field2', 'with_space', 'CaSe'] + ('file_', 'field2', 'with_space', 'CaSe') >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], - deletechars='q', - case_sensitive='False') + ... deletechars='q', + ... case_sensitive=False) >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) - ['excl_', 'field2', 'no_', 'with_space', 'case'] + ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE') """ # @@ -599,7 +605,7 @@ class StringConverter(object): -------- >>> import dateutil.parser >>> import datetime - >>> dateparser = datetustil.parser.parse + >>> dateparser = dateutil.parser.parse >>> defaultdate = datetime.date(2000, 1, 1) >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) """ diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index c3563a7fa..8aa999fc9 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -47,9 +47,12 @@ class NumpyVersion(): >>> from numpy.lib import NumpyVersion >>> if NumpyVersion(np.__version__) < '1.7.0': ... print('skip') - skip + >>> # skip >>> NumpyVersion('1.7') # raises ValueError, add ".0" + Traceback (most recent call last): + ... + ValueError: Not a valid numpy version string """ diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py index 4f6371058..b236cc449 100644 --- a/numpy/lib/arraypad.py +++ b/numpy/lib/arraypad.py @@ -1100,10 +1100,10 @@ def pad(array, pad_width, mode, **kwargs): -------- >>> a = [1, 2, 3, 4, 5] >>> np.pad(a, (2,3), 'constant', constant_values=(4, 6)) - array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6]) + array([4, 4, 1, ..., 6, 6, 6]) >>> np.pad(a, (2, 3), 'edge') - array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5]) + array([1, 1, 1, ..., 5, 5, 5]) >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index fd64ecbd6..b53d8c03f 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -82,7 +82,7 @@ def ediff1d(ary, to_end=None, to_begin=None): array([ 1, 2, 3, -7]) >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) - array([-99, 1, 2, 3, -7, 88, 99]) + array([-99, 1, 2, ..., -7, 88, 99]) The returned array is always 1D. @@ -94,8 +94,7 @@ def ediff1d(ary, to_end=None, to_begin=None): # force a 1d array ary = np.asanyarray(ary).ravel() - # we have unit tests enforcing - # propagation of the dtype of input + # enforce propagation of the dtype of input # ary to returned result dtype_req = ary.dtype @@ -106,23 +105,22 @@ def ediff1d(ary, to_end=None, to_begin=None): if to_begin is None: l_begin = 0 else: - to_begin = np.asanyarray(to_begin) - if not np.can_cast(to_begin, dtype_req): - raise TypeError("dtype of to_begin must be compatible " - "with input ary") - - to_begin = to_begin.ravel() + _to_begin = np.asanyarray(to_begin, dtype=dtype_req) + if not np.all(_to_begin == to_begin): + raise ValueError("cannot convert 'to_begin' to array with dtype " + "'%r' as required for input ary" % dtype_req) + to_begin = _to_begin.ravel() l_begin = len(to_begin) if to_end is None: l_end = 0 else: - to_end = np.asanyarray(to_end) - if not np.can_cast(to_end, dtype_req): - raise TypeError("dtype of to_end must be compatible " - "with input ary") - - to_end = to_end.ravel() + _to_end = np.asanyarray(to_end, dtype=dtype_req) + # check that casting has not overflowed + if not np.all(_to_end == to_end): + raise ValueError("cannot convert 'to_end' to array with dtype " + "'%r' as required for input ary" % dtype_req) + to_end = _to_end.ravel() l_end = len(to_end) # do the calculation in place and copy to_begin and to_end @@ -241,13 +239,11 @@ def unique(ar, return_index=False, return_inverse=False, >>> a = np.array(['a', 'b', 'b', 'c', 'a']) >>> u, indices = np.unique(a, return_index=True) >>> u - array(['a', 'b', 'c'], - dtype='|S1') + array(['a', 'b', 'c'], dtype='<U1') >>> indices array([0, 1, 3]) >>> a[indices] - array(['a', 'b', 'c'], - dtype='|S1') + array(['a', 'b', 'c'], dtype='<U1') Reconstruct the input array from the unique values: @@ -256,9 +252,9 @@ def unique(ar, return_index=False, return_inverse=False, >>> u array([1, 2, 3, 4, 6]) >>> indices - array([0, 1, 4, 3, 1, 2, 1]) + array([0, 1, 4, ..., 1, 2, 1]) >>> u[indices] - array([1, 2, 6, 4, 2, 3, 2]) + array([1, 2, 6, ..., 2, 3, 2]) """ ar = np.asanyarray(ar) @@ -661,8 +657,8 @@ def isin(element, test_elements, assume_unique=False, invert=False): >>> test_elements = [1, 2, 4, 8] >>> mask = np.isin(element, test_elements) >>> mask - array([[ False, True], - [ True, False]]) + array([[False, True], + [ True, False]]) >>> element[mask] array([2, 4]) @@ -676,7 +672,7 @@ def isin(element, test_elements, assume_unique=False, invert=False): >>> mask = np.isin(element, test_elements, invert=True) >>> mask array([[ True, False], - [ False, True]]) + [False, True]]) >>> element[mask] array([0, 6]) @@ -685,14 +681,14 @@ def isin(element, test_elements, assume_unique=False, invert=False): >>> test_set = {1, 2, 4, 8} >>> np.isin(element, test_set) - array([[ False, False], - [ False, False]]) + array([[False, False], + [False, False]]) Casting the set to a list gives the expected result: >>> np.isin(element, list(test_set)) - array([[ False, True], - [ True, False]]) + array([[False, True], + [ True, False]]) """ element = np.asarray(element) return in1d(element, test_elements, assume_unique=assume_unique, diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py index f2d4fe9fd..c16668582 100644 --- a/numpy/lib/arrayterator.py +++ b/numpy/lib/arrayterator.py @@ -80,9 +80,8 @@ class Arrayterator(object): >>> for subarr in a_itor: ... if not subarr.all(): - ... print(subarr, subarr.shape) - ... - [[[[0 1]]]] (1, 1, 1, 2) + ... print(subarr, subarr.shape) # doctest: +SKIP + >>> # [[[[0 1]]]] (1, 1, 1, 2) """ @@ -160,7 +159,7 @@ class Arrayterator(object): ... if not subarr: ... print(subarr, type(subarr)) ... - 0 <type 'numpy.int32'> + 0 <class 'numpy.int64'> """ for block in self: diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py index e1e297492..216687475 100644 --- a/numpy/lib/financial.py +++ b/numpy/lib/financial.py @@ -127,7 +127,7 @@ def fv(rate, nper, pmt, pv, when='end'): >>> a = np.array((0.05, 0.06, 0.07))/12 >>> np.fv(a, 10*12, -100, -100) - array([ 15692.92889434, 16569.87435405, 17509.44688102]) + array([ 15692.92889434, 16569.87435405, 17509.44688102]) # may vary """ when = _convert_when(when) @@ -275,7 +275,7 @@ def nper(rate, pmt, pv, fv=0, when='end'): If you only had $150/month to pay towards the loan, how long would it take to pay-off a loan of $8,000 at 7% annual interest? - >>> print(round(np.nper(0.07/12, -150, 8000), 5)) + >>> print(np.round(np.nper(0.07/12, -150, 8000), 5)) 64.07335 So, over 64 months would be required to pay off the loan. @@ -286,10 +286,10 @@ def nper(rate, pmt, pv, fv=0, when='end'): >>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12, ... -150 : -99 : 50 , ... 8000 : 9001 : 1000])) - array([[[ 64.07334877, 74.06368256], - [ 108.07548412, 127.99022654]], - [[ 66.12443902, 76.87897353], - [ 114.70165583, 137.90124779]]]) + array([[[ 64.07334877, 74.06368256], + [108.07548412, 127.99022654]], + [[ 66.12443902, 76.87897353], + [114.70165583, 137.90124779]]]) """ when = _convert_when(when) @@ -539,7 +539,7 @@ def pv(rate, nper, pmt, fv=0, when='end'): >>> a = np.array((0.05, 0.04, 0.03))/12 >>> np.pv(a, 10*12, -100, 15692.93) - array([ -100.00067132, -649.26771385, -1273.78633713]) + array([ -100.00067132, -649.26771385, -1273.78633713]) # may vary So, to end up with the same $15692.93 under the same $100 per month "savings plan," for annual interest rates of 4% and 3%, one would @@ -704,15 +704,15 @@ def irr(values): Examples -------- - >>> round(irr([-100, 39, 59, 55, 20]), 5) + >>> round(np.irr([-100, 39, 59, 55, 20]), 5) 0.28095 - >>> round(irr([-100, 0, 0, 74]), 5) + >>> round(np.irr([-100, 0, 0, 74]), 5) -0.0955 - >>> round(irr([-100, 100, 0, -7]), 5) + >>> round(np.irr([-100, 100, 0, -7]), 5) -0.0833 - >>> round(irr([-100, 100, 0, 7]), 5) + >>> round(np.irr([-100, 100, 0, 7]), 5) 0.06206 - >>> round(irr([-5, 10.5, 1, -8, 1]), 5) + >>> round(np.irr([-5, 10.5, 1, -8, 1]), 5) 0.0886 (Compare with the Example given for numpy.lib.financial.npv) @@ -777,7 +777,7 @@ def npv(rate, values): Examples -------- >>> np.npv(0.281,[-100, 39, 59, 55, 20]) - -0.0084785916384548798 + -0.0084785916384548798 # may vary (Compare with the Example given for numpy.lib.financial.irr) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 5f87c8b2c..b61a64b8e 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -218,12 +218,12 @@ def flip(m, axis=None): [2, 3]], [[4, 5], [6, 7]]]) - >>> flip(A, 0) + >>> np.flip(A, 0) array([[[4, 5], [6, 7]], [[0, 1], [2, 3]]]) - >>> flip(A, 1) + >>> np.flip(A, 1) array([[[2, 3], [0, 1]], [[6, 7], @@ -239,7 +239,7 @@ def flip(m, axis=None): [[1, 0], [3, 2]]]) >>> A = np.random.randn(3,4,5) - >>> np.all(flip(A,2) == A[:,:,::-1,...]) + >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) True """ if not hasattr(m, 'ndim'): @@ -359,7 +359,7 @@ def average(a, axis=None, weights=None, returned=False): Examples -------- - >>> data = range(1,5) + >>> data = list(range(1,5)) >>> data [1, 2, 3, 4] >>> np.average(data) @@ -373,11 +373,10 @@ def average(a, axis=None, weights=None, returned=False): [2, 3], [4, 5]]) >>> np.average(data, axis=1, weights=[1./4, 3./4]) - array([ 0.75, 2.75, 4.75]) + array([0.75, 2.75, 4.75]) >>> np.average(data, weights=[1./4, 3./4]) - Traceback (most recent call last): - ... + ... TypeError: Axis must be specified when shapes of a and weights differ. >>> a = np.ones(5, dtype=np.float128) @@ -586,7 +585,7 @@ def piecewise(x, condlist, funclist, *args, **kw): ``x >= 0``. >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) - array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) + array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) Apply the same function to a scalar value. @@ -671,7 +670,7 @@ def select(condlist, choicelist, default=0): >>> condlist = [x<3, x>5] >>> choicelist = [x, x**2] >>> np.select(condlist, choicelist) - array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) + array([ 0, 1, 2, ..., 49, 64, 81]) """ # Check the size of condlist and choicelist are the same, or abort. @@ -854,9 +853,9 @@ def gradient(f, *varargs, **kwargs): -------- >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) >>> np.gradient(f) - array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) - array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) Spacing can be also specified with an array that represents the coordinates of the values F along the dimensions. @@ -864,13 +863,13 @@ def gradient(f, *varargs, **kwargs): >>> x = np.arange(f.size) >>> np.gradient(f, x) - array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) Or a non uniform one: >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) >>> np.gradient(f, x) - array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) + array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) For two dimensional arrays, the return will be two arrays ordered by axis. In this example the first array stands for the gradient in @@ -878,8 +877,8 @@ def gradient(f, *varargs, **kwargs): >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) [array([[ 2., 2., -1.], - [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], - [ 1. , 1. , 1. ]])] + [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], + [1. , 1. , 1. ]])] In this example the spacing is also specified: uniform for axis=0 and non uniform for axis=1 @@ -888,17 +887,17 @@ def gradient(f, *varargs, **kwargs): >>> y = [1., 1.5, 3.5] >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) [array([[ 1. , 1. , -0.5], - [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ], - [ 2. , 1.7, 0.5]])] + [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], + [2. , 1.7, 0.5]])] It is possible to specify how boundaries are treated using `edge_order` >>> x = np.array([0, 1, 2, 3, 4]) >>> f = x**2 >>> np.gradient(f, edge_order=1) - array([ 1., 2., 4., 6., 7.]) + array([1., 2., 4., 6., 7.]) >>> np.gradient(f, edge_order=2) - array([-0., 2., 4., 6., 8.]) + array([0., 2., 4., 6., 8.]) The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated @@ -1151,7 +1150,7 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): """ Calculate the n-th discrete difference along the given axis. - The first difference is given by ``out[n] = a[n+1] - a[n]`` along + The first difference is given by ``out[i] = a[i+1] - a[i]`` along the given axis, higher differences are calculated by using `diff` recursively. @@ -1200,7 +1199,7 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): >>> np.diff(u8_arr) array([255], dtype=uint8) >>> u8_arr[1,...] - u8_arr[0,...] - array(255, np.uint8) + 255 If this is not desirable, then the array should be cast to a larger integer type first: @@ -1340,7 +1339,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): >>> np.interp(2.5, xp, fp) 1.0 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) - array([ 3. , 3. , 2.5 , 0.56, 0. ]) + array([3. , 3. , 2.5 , 0.56, 0. ]) >>> UNDEF = -99.0 >>> np.interp(3.14, xp, fp, right=UNDEF) -99.0 @@ -1364,7 +1363,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): >>> xp = [190, -190, 350, -350] >>> fp = [5, 10, 3, 4] >>> np.interp(x, xp, fp, period=360) - array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) + array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) Complex interpolation: @@ -1372,7 +1371,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): >>> xp = [2,3,5] >>> fp = [1.0j, 0, 2+3j] >>> np.interp(x, xp, fp) - array([ 0.+1.j , 1.+1.5j]) + array([0.+1.j , 1.+1.5j]) """ @@ -1445,7 +1444,7 @@ def angle(z, deg=False): Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians - array([ 0. , 1.57079633, 0.78539816]) + array([ 0. , 1.57079633, 0.78539816]) # may vary >>> np.angle(1+1j, deg=True) # in degrees 45.0 @@ -1505,9 +1504,9 @@ def unwrap(p, discont=pi, axis=-1): >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase - array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) + array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary >>> np.unwrap(phase) - array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) + array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary """ p = asarray(p) @@ -1547,10 +1546,10 @@ def sort_complex(a): Examples -------- >>> np.sort_complex([5, 3, 6, 2, 1]) - array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) + array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) - array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) + array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) """ b = array(a, copy=True) @@ -1596,7 +1595,7 @@ def trim_zeros(filt, trim='fb'): array([1, 2, 3, 0, 2, 1]) >>> np.trim_zeros(a, 'b') - array([0, 0, 0, 1, 2, 3, 0, 2, 1]) + array([0, 0, 0, ..., 0, 2, 1]) The input data type is preserved, list/tuple in means list/tuple out. @@ -1958,11 +1957,11 @@ class vectorize(object): >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) - <type 'numpy.int32'> + <class 'numpy.int64'> >>> vfunc = np.vectorize(myfunc, otypes=[float]) >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) - <type 'numpy.float64'> + <class 'numpy.float64'> The `excluded` argument can be used to prevent vectorizing over certain arguments. This can be useful for array-like arguments of a fixed length @@ -1990,18 +1989,18 @@ class vectorize(object): >>> import scipy.stats >>> pearsonr = np.vectorize(scipy.stats.pearsonr, - ... signature='(n),(n)->(),()') - >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) + ... signature='(n),(n)->(),()') + >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) (array([ 1., -1.]), array([ 0., 0.])) Or for a vectorized convolution: >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') >>> convolve(np.eye(4), [1, 2, 1]) - array([[ 1., 2., 1., 0., 0., 0.], - [ 0., 1., 2., 1., 0., 0.], - [ 0., 0., 1., 2., 1., 0.], - [ 0., 0., 0., 1., 2., 1.]]) + array([[1., 2., 1., 0., 0., 0.], + [0., 1., 2., 1., 0., 0.], + [0., 0., 1., 2., 1., 0.], + [0., 0., 0., 1., 2., 1.]]) See Also -------- @@ -2311,10 +2310,14 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The steps to compute the weighted covariance are as follows:: + >>> m = np.arange(10, dtype=np.float64) + >>> f = np.arange(10) * 2 + >>> a = np.arange(10) ** 2. + >>> ddof = 9 # N - 1 >>> w = f * a >>> v1 = np.sum(w) >>> v2 = np.sum(w * a) - >>> m -= np.sum(m * w, axis=1, keepdims=True) / v1 + >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) Note that when ``a == 1``, the normalization factor @@ -2346,14 +2349,14 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] >>> X = np.stack((x, y), axis=0) - >>> print(np.cov(X)) - [[ 11.71 -4.286 ] - [ -4.286 2.14413333]] - >>> print(np.cov(x, y)) - [[ 11.71 -4.286 ] - [ -4.286 2.14413333]] - >>> print(np.cov(x)) - 11.71 + >>> np.cov(X) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x, y) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x) + array(11.71) """ # Check inputs @@ -2590,12 +2593,12 @@ def blackman(M): Examples -------- + >>> import matplotlib.pyplot as plt >>> np.blackman(12) - array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, - 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, - 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, - 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) - + array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary + 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, + 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, + 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) Plot the window and the frequency response: @@ -2604,15 +2607,15 @@ def blackman(M): >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Blackman window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Blackman window') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Sample') >>> plt.show() >>> plt.figure() - <matplotlib.figure.Figure object at 0x...> + <Figure size 640x480 with 0 Axes> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) @@ -2621,13 +2624,12 @@ def blackman(M): >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Blackman window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Frequency response of Blackman window') >>> plt.ylabel("Magnitude [dB]") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Magnitude [dB]') >>> plt.xlabel("Normalized frequency [cycles per sample]") - <matplotlib.text.Text object at 0x...> - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> _ = plt.axis('tight') >>> plt.show() """ @@ -2699,8 +2701,9 @@ def bartlett(M): Examples -------- + >>> import matplotlib.pyplot as plt >>> np.bartlett(12) - array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, + array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 0.18181818, 0. ]) @@ -2711,15 +2714,15 @@ def bartlett(M): >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Bartlett window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Bartlett window') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Sample') >>> plt.show() >>> plt.figure() - <matplotlib.figure.Figure object at 0x...> + <Figure size 640x480 with 0 Axes> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) @@ -2728,13 +2731,12 @@ def bartlett(M): >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Bartlett window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Frequency response of Bartlett window') >>> plt.ylabel("Magnitude [dB]") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Magnitude [dB]') >>> plt.xlabel("Normalized frequency [cycles per sample]") - <matplotlib.text.Text object at 0x...> - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> _ = plt.axis('tight') >>> plt.show() """ @@ -2801,26 +2803,27 @@ def hanning(M): Examples -------- >>> np.hanning(12) - array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, - 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, - 0.07937323, 0. ]) + array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, + 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, + 0.07937323, 0. ]) Plot the window and its frequency response: + >>> import matplotlib.pyplot as plt >>> from numpy.fft import fft, fftshift >>> window = np.hanning(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hann window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Hann window') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Sample') >>> plt.show() >>> plt.figure() - <matplotlib.figure.Figure object at 0x...> + <Figure size 640x480 with 0 Axes> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) @@ -2829,13 +2832,13 @@ def hanning(M): >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of the Hann window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Frequency response of the Hann window') >>> plt.ylabel("Magnitude [dB]") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Magnitude [dB]') >>> plt.xlabel("Normalized frequency [cycles per sample]") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Normalized frequency [cycles per sample]') >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) + ... >>> plt.show() """ @@ -2900,26 +2903,27 @@ def hamming(M): Examples -------- >>> np.hamming(12) - array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, + array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 0.15302337, 0.08 ]) Plot the window and the frequency response: + >>> import matplotlib.pyplot as plt >>> from numpy.fft import fft, fftshift >>> window = np.hamming(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hamming window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Hamming window') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Sample') >>> plt.show() >>> plt.figure() - <matplotlib.figure.Figure object at 0x...> + <Figure size 640x480 with 0 Axes> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) @@ -2928,13 +2932,13 @@ def hamming(M): >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Hamming window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Frequency response of Hamming window') >>> plt.ylabel("Magnitude [dB]") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Magnitude [dB]') >>> plt.xlabel("Normalized frequency [cycles per sample]") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Normalized frequency [cycles per sample]') >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) + ... >>> plt.show() """ @@ -3083,9 +3087,9 @@ def i0(x): Examples -------- >>> np.i0([0.]) - array(1.0) + array(1.0) # may vary >>> np.i0([0., 1. + 2j]) - array([ 1.00000000+0.j , 0.18785373+0.64616944j]) + array([ 1.00000000+0.j , 0.18785373+0.64616944j]) # may vary """ x = atleast_1d(x).copy() @@ -3180,11 +3184,12 @@ def kaiser(M, beta): Examples -------- + >>> import matplotlib.pyplot as plt >>> np.kaiser(12, 14) - array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, - 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, - 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, - 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) + array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary + 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, + 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, + 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) Plot the window and the frequency response: @@ -3194,15 +3199,15 @@ def kaiser(M, beta): >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Kaiser window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Kaiser window') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Sample') >>> plt.show() >>> plt.figure() - <matplotlib.figure.Figure object at 0x...> + <Figure size 640x480 with 0 Axes> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) @@ -3211,13 +3216,13 @@ def kaiser(M, beta): >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Kaiser window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Frequency response of Kaiser window') >>> plt.ylabel("Magnitude [dB]") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Magnitude [dB]') >>> plt.xlabel("Normalized frequency [cycles per sample]") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Normalized frequency [cycles per sample]') >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) + (-0.5, 0.5, -100.0, ...) # may vary >>> plt.show() """ @@ -3273,31 +3278,32 @@ def sinc(x): Examples -------- + >>> import matplotlib.pyplot as plt >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) - array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, + array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, - 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, - 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, + 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, + 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, - 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, - 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, - 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, - 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, - -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, - -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, - 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, + 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, + 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, + 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, + 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, + -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, + -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, + 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, -4.92362781e-02, -3.89804309e-17]) >>> plt.plot(x, np.sinc(x)) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Sinc Function") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Sinc Function') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("X") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'X') >>> plt.show() It works in 2-D as well: @@ -3469,18 +3475,18 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): >>> np.median(a) 3.5 >>> np.median(a, axis=0) - array([ 6.5, 4.5, 2.5]) + array([6.5, 4.5, 2.5]) >>> np.median(a, axis=1) - array([ 7., 2.]) + array([7., 2.]) >>> m = np.median(a, axis=0) >>> out = np.zeros_like(m) >>> np.median(a, axis=0, out=m) - array([ 6.5, 4.5, 2.5]) + array([6.5, 4.5, 2.5]) >>> m - array([ 6.5, 4.5, 2.5]) + array([6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.median(b, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.median(b, axis=None, overwrite_input=True) @@ -3647,23 +3653,23 @@ def percentile(a, q, axis=None, out=None, >>> np.percentile(a, 50) 3.5 >>> np.percentile(a, 50, axis=0) - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> np.percentile(a, 50, axis=1) - array([ 7., 2.]) + array([7., 2.]) >>> np.percentile(a, 50, axis=1, keepdims=True) - array([[ 7.], - [ 2.]]) + array([[7.], + [2.]]) >>> m = np.percentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.percentile(a, 50, axis=0, out=out) - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> m - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a == b) The different types of interpolation can be visualized graphically: @@ -3789,21 +3795,21 @@ def quantile(a, q, axis=None, out=None, >>> np.quantile(a, 0.5) 3.5 >>> np.quantile(a, 0.5, axis=0) - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> np.quantile(a, 0.5, axis=1) - array([ 7., 2.]) + array([7., 2.]) >>> np.quantile(a, 0.5, axis=1, keepdims=True) - array([[ 7.], - [ 2.]]) + array([[7.], + [2.]]) >>> m = np.quantile(a, 0.5, axis=0) >>> out = np.zeros_like(m) >>> np.quantile(a, 0.5, axis=0, out=out) - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> m - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a == b) """ q = np.asanyarray(q) @@ -3950,8 +3956,6 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, r = add(x1, x2) if np.any(n): - warnings.warn("Invalid value encountered in percentile", - RuntimeWarning, stacklevel=3) if zerod: if ap.ndim == 1: if out is not None: @@ -4032,9 +4036,9 @@ def trapz(y, x=None, dx=1.0, axis=-1): array([[0, 1, 2], [3, 4, 5]]) >>> np.trapz(a, axis=0) - array([ 1.5, 2.5, 3.5]) + array([1.5, 2.5, 3.5]) >>> np.trapz(a, axis=1) - array([ 2., 8.]) + array([2., 8.]) """ y = asanyarray(y) @@ -4152,17 +4156,17 @@ def meshgrid(*xi, **kwargs): >>> y = np.linspace(0, 1, ny) >>> xv, yv = np.meshgrid(x, y) >>> xv - array([[ 0. , 0.5, 1. ], - [ 0. , 0.5, 1. ]]) + array([[0. , 0.5, 1. ], + [0. , 0.5, 1. ]]) >>> yv - array([[ 0., 0., 0.], - [ 1., 1., 1.]]) + array([[0., 0., 0.], + [1., 1., 1.]]) >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays >>> xv - array([[ 0. , 0.5, 1. ]]) + array([[0. , 0.5, 1. ]]) >>> yv - array([[ 0.], - [ 1.]]) + array([[0.], + [1.]]) `meshgrid` is very useful to evaluate functions on a grid. @@ -4224,7 +4228,7 @@ def delete(arr, obj, axis=None): arr : array_like Input array. obj : slice, int or array of ints - Indicate which sub-arrays to remove. + Indicate indices of sub-arrays to remove along the specified axis. axis : int, optional The axis along which to delete the subarray defined by `obj`. If `axis` is None, `obj` is applied to the flattened array. @@ -4245,6 +4249,7 @@ def delete(arr, obj, axis=None): ----- Often it is preferable to use a boolean mask. For example: + >>> arr = np.arange(12) + 1 >>> mask = np.ones(len(arr), dtype=bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] @@ -4476,7 +4481,7 @@ def insert(arr, obj, values, axis=None): [2, 2], [3, 3]]) >>> np.insert(a, 1, 5) - array([1, 5, 1, 2, 2, 3, 3]) + array([1, 5, 1, ..., 2, 3, 3]) >>> np.insert(a, 1, 5, axis=1) array([[1, 5, 1], [2, 5, 2], @@ -4496,13 +4501,13 @@ def insert(arr, obj, values, axis=None): >>> b array([1, 1, 2, 2, 3, 3]) >>> np.insert(b, [2, 2], [5, 6]) - array([1, 1, 5, 6, 2, 2, 3, 3]) + array([1, 1, 5, ..., 2, 3, 3]) >>> np.insert(b, slice(2, 4), [5, 6]) - array([1, 1, 5, 2, 6, 2, 3, 3]) + array([1, 1, 5, ..., 2, 3, 3]) >>> np.insert(b, [2, 2], [7.13, False]) # type casting - array([1, 1, 7, 0, 2, 2, 3, 3]) + array([1, 1, 7, ..., 2, 3, 3]) >>> x = np.arange(8).reshape(2, 4) >>> idx = (1, 3) @@ -4666,7 +4671,7 @@ def append(arr, values, axis=None): Examples -------- >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) - array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + array([1, 2, 3, ..., 7, 8, 9]) When `axis` is specified, `values` must have the correct shape. @@ -4676,8 +4681,8 @@ def append(arr, values, axis=None): [7, 8, 9]]) >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) Traceback (most recent call last): - ... - ValueError: arrays must have same number of dimensions + ... + ValueError: all the input arrays must have same number of dimensions """ arr = asanyarray(arr) diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py index 482eabe14..7b229cc89 100644 --- a/numpy/lib/histograms.py +++ b/numpy/lib/histograms.py @@ -645,7 +645,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') - >>> hist_0; hist1 + >>> hist_0; hist_1 array([1, 1, 1]) array([2, 1, 1, 2]) >>> bins_0; bins_1 @@ -748,14 +748,14 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) - (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) + (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) (array([1, 4, 1]), array([0, 1, 2, 3])) >>> a = np.arange(5) >>> hist, bin_edges = np.histogram(a, density=True) >>> hist - array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) + array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) >>> hist.sum() 2.4999999999999996 >>> np.sum(hist * np.diff(bin_edges)) @@ -770,8 +770,9 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, >>> rng = np.random.RandomState(10) # deterministic random data >>> a = np.hstack((rng.normal(size=1000), ... rng.normal(loc=5, scale=2, size=1000))) - >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram + >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram >>> plt.title("Histogram with 'auto' bins") + Text(0.5, 1.0, "Histogram with 'auto' bins") >>> plt.show() """ diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index 56abe293a..64c491cfa 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -478,7 +478,7 @@ class RClass(AxisConcatenator): Examples -------- >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] - array([1, 2, 3, 0, 0, 4, 5, 6]) + array([1, 2, 3, ..., 4, 5, 6]) >>> np.r_[-1:1:6j, [0]*3, 5, 6] array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) @@ -538,7 +538,7 @@ class CClass(AxisConcatenator): [2, 5], [3, 6]]) >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] - array([[1, 2, 3, 0, 0, 4, 5, 6]]) + array([[1, 2, 3, ..., 4, 5, 6]]) """ @@ -813,7 +813,7 @@ def fill_diagonal(a, val, wrap=False): >>> # tall matrices no wrap >>> a = np.zeros((5, 3),int) - >>> fill_diagonal(a, 4) + >>> np.fill_diagonal(a, 4) >>> a array([[4, 0, 0], [0, 4, 0], @@ -823,7 +823,7 @@ def fill_diagonal(a, val, wrap=False): >>> # tall matrices wrap >>> a = np.zeros((5, 3),int) - >>> fill_diagonal(a, 4, wrap=True) + >>> np.fill_diagonal(a, 4, wrap=True) >>> a array([[4, 0, 0], [0, 4, 0], @@ -833,7 +833,7 @@ def fill_diagonal(a, val, wrap=False): >>> # wide matrices >>> a = np.zeros((3, 5),int) - >>> fill_diagonal(a, 4, wrap=True) + >>> np.fill_diagonal(a, 4, wrap=True) >>> a array([[4, 0, 0, 0, 0], [0, 4, 0, 0, 0], diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index d73d84467..b3bf1880b 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -271,9 +271,9 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue): >>> np.nanmin(a) 1.0 >>> np.nanmin(a, axis=0) - array([ 1., 2.]) + array([1., 2.]) >>> np.nanmin(a, axis=1) - array([ 1., 3.]) + array([1., 3.]) When positive infinity and negative infinity are present: @@ -384,9 +384,9 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue): >>> np.nanmax(a) 3.0 >>> np.nanmax(a, axis=0) - array([ 3., 2.]) + array([3., 2.]) >>> np.nanmax(a, axis=1) - array([ 2., 3.]) + array([2., 3.]) When positive infinity and negative infinity are present: @@ -601,12 +601,15 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): >>> np.nansum(a) 3.0 >>> np.nansum(a, axis=0) - array([ 2., 1.]) + array([2., 1.]) >>> np.nansum([1, np.nan, np.inf]) inf >>> np.nansum([1, np.nan, np.NINF]) -inf - >>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present + >>> from numpy.testing import suppress_warnings + >>> with suppress_warnings() as sup: + ... sup.filter(RuntimeWarning) + ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present nan """ @@ -677,7 +680,7 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): >>> np.nanprod(a) 6.0 >>> np.nanprod(a, axis=0) - array([ 3., 2.]) + array([3., 2.]) """ a, mask = _replace_nan(a, 1) @@ -738,16 +741,16 @@ def nancumsum(a, axis=None, dtype=None, out=None): >>> np.nancumsum([1]) array([1]) >>> np.nancumsum([1, np.nan]) - array([ 1., 1.]) + array([1., 1.]) >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nancumsum(a) - array([ 1., 3., 6., 6.]) + array([1., 3., 6., 6.]) >>> np.nancumsum(a, axis=0) - array([[ 1., 2.], - [ 4., 2.]]) + array([[1., 2.], + [4., 2.]]) >>> np.nancumsum(a, axis=1) - array([[ 1., 3.], - [ 3., 3.]]) + array([[1., 3.], + [3., 3.]]) """ a, mask = _replace_nan(a, 0) @@ -805,16 +808,16 @@ def nancumprod(a, axis=None, dtype=None, out=None): >>> np.nancumprod([1]) array([1]) >>> np.nancumprod([1, np.nan]) - array([ 1., 1.]) + array([1., 1.]) >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nancumprod(a) - array([ 1., 2., 6., 6.]) + array([1., 2., 6., 6.]) >>> np.nancumprod(a, axis=0) - array([[ 1., 2.], - [ 3., 2.]]) + array([[1., 2.], + [3., 2.]]) >>> np.nancumprod(a, axis=1) - array([[ 1., 2.], - [ 3., 3.]]) + array([[1., 2.], + [3., 3.]]) """ a, mask = _replace_nan(a, 1) @@ -895,9 +898,9 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): >>> np.nanmean(a) 2.6666666666666665 >>> np.nanmean(a, axis=0) - array([ 2., 4.]) + array([2., 4.]) >>> np.nanmean(a, axis=1) - array([ 1., 3.5]) + array([1., 3.5]) # may vary """ arr, mask = _replace_nan(a, 0) @@ -1049,19 +1052,19 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) >>> a[0, 1] = np.nan >>> a - array([[ 10., nan, 4.], - [ 3., 2., 1.]]) + array([[10., nan, 4.], + [ 3., 2., 1.]]) >>> np.median(a) nan >>> np.nanmedian(a) 3.0 >>> np.nanmedian(a, axis=0) - array([ 6.5, 2., 2.5]) + array([6.5, 2. , 2.5]) >>> np.median(a, axis=1) - array([ 7., 2.]) + array([nan, 2.]) >>> b = a.copy() >>> np.nanmedian(b, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.nanmedian(b, axis=None, overwrite_input=True) @@ -1177,27 +1180,27 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) >>> a[0][1] = np.nan >>> a - array([[ 10., nan, 4.], - [ 3., 2., 1.]]) + array([[10., nan, 4.], + [ 3., 2., 1.]]) >>> np.percentile(a, 50) nan >>> np.nanpercentile(a, 50) - 3.5 + 3.0 >>> np.nanpercentile(a, 50, axis=0) - array([ 6.5, 2., 2.5]) + array([6.5, 2. , 2.5]) >>> np.nanpercentile(a, 50, axis=1, keepdims=True) - array([[ 7.], - [ 2.]]) + array([[7.], + [2.]]) >>> m = np.nanpercentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.nanpercentile(a, 50, axis=0, out=out) - array([ 6.5, 2., 2.5]) + array([6.5, 2. , 2.5]) >>> m - array([ 6.5, 2. , 2.5]) + array([6.5, 2. , 2.5]) >>> b = a.copy() >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a==b) """ @@ -1291,26 +1294,26 @@ def nanquantile(a, q, axis=None, out=None, overwrite_input=False, >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) >>> a[0][1] = np.nan >>> a - array([[ 10., nan, 4.], - [ 3., 2., 1.]]) + array([[10., nan, 4.], + [ 3., 2., 1.]]) >>> np.quantile(a, 0.5) nan >>> np.nanquantile(a, 0.5) - 3.5 + 3.0 >>> np.nanquantile(a, 0.5, axis=0) - array([ 6.5, 2., 2.5]) + array([6.5, 2. , 2.5]) >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) - array([[ 7.], - [ 2.]]) + array([[7.], + [2.]]) >>> m = np.nanquantile(a, 0.5, axis=0) >>> out = np.zeros_like(m) >>> np.nanquantile(a, 0.5, axis=0, out=out) - array([ 6.5, 2., 2.5]) + array([6.5, 2. , 2.5]) >>> m - array([ 6.5, 2. , 2.5]) + array([6.5, 2. , 2.5]) >>> b = a.copy() >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a==b) """ a = np.asanyarray(a) @@ -1465,12 +1468,12 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): Examples -------- >>> a = np.array([[1, np.nan], [3, 4]]) - >>> np.var(a) + >>> np.nanvar(a) 1.5555555555555554 >>> np.nanvar(a, axis=0) - array([ 1., 0.]) + array([1., 0.]) >>> np.nanvar(a, axis=1) - array([ 0., 0.25]) + array([0., 0.25]) # may vary """ arr, mask = _replace_nan(a, 0) @@ -1619,9 +1622,9 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): >>> np.nanstd(a) 1.247219128924647 >>> np.nanstd(a, axis=0) - array([ 1., 0.]) + array([1., 0.]) >>> np.nanstd(a, axis=1) - array([ 0., 0.5]) + array([0., 0.5]) # may vary """ var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index db6a8e5eb..704fea108 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -168,13 +168,13 @@ class NpzFile(Mapping): >>> x = np.arange(10) >>> y = np.sin(x) >>> np.savez(outfile, x=x, y=y) - >>> outfile.seek(0) + >>> _ = outfile.seek(0) >>> npz = np.load(outfile) >>> isinstance(npz, np.lib.io.NpzFile) True - >>> npz.files - ['y', 'x'] + >>> sorted(npz.files) + ['x', 'y'] >>> npz['x'] # getitem access array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> npz.f.x # attribute lookup @@ -502,7 +502,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True): >>> x = np.arange(10) >>> np.save(outfile, x) - >>> outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file >>> np.load(outfile) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -597,10 +597,10 @@ def savez(file, *args, **kwds): Using `savez` with \\*args, the arrays are saved with default names. >>> np.savez(outfile, x, y) - >>> outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file >>> npzfile = np.load(outfile) >>> npzfile.files - ['arr_1', 'arr_0'] + ['arr_0', 'arr_1'] >>> npzfile['arr_0'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -608,10 +608,10 @@ def savez(file, *args, **kwds): >>> outfile = TemporaryFile() >>> np.savez(outfile, x=x, y=y) - >>> outfile.seek(0) + >>> _ = outfile.seek(0) >>> npzfile = np.load(outfile) - >>> npzfile.files - ['y', 'x'] + >>> sorted(npzfile.files) + ['x', 'y'] >>> npzfile['x'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -829,7 +829,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. skiprows : int, optional - Skip the first `skiprows` lines; default: 0. + Skip the first `skiprows` lines, including comments; default: 0. usecols : int or sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. @@ -891,21 +891,21 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, >>> from io import StringIO # StringIO behaves like a file object >>> c = StringIO(u"0 1\\n2 3") >>> np.loadtxt(c) - array([[ 0., 1.], - [ 2., 3.]]) + array([[0., 1.], + [2., 3.]]) >>> d = StringIO(u"M 21 72\\nF 35 58") >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), ... 'formats': ('S1', 'i4', 'f4')}) - array([('M', 21, 72.0), ('F', 35, 58.0)], - dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')]) + array([(b'M', 21, 72.), (b'F', 35, 58.)], + dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')]) >>> c = StringIO(u"1,0,2\\n3,0,4") >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) >>> x - array([ 1., 3.]) + array([1., 3.]) >>> y - array([ 2., 4.]) + array([2., 4.]) """ # Type conversions for Py3 convenience @@ -1481,17 +1481,17 @@ def fromregex(file, regexp, dtype, encoding=None): Examples -------- >>> f = open('test.dat', 'w') - >>> f.write("1312 foo\\n1534 bar\\n444 qux") + >>> _ = f.write("1312 foo\\n1534 bar\\n444 qux") >>> f.close() >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] >>> output = np.fromregex('test.dat', regexp, ... [('num', np.int64), ('key', 'S3')]) >>> output - array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], - dtype=[('num', '<i8'), ('key', '|S3')]) + array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], + dtype=[('num', '<i8'), ('key', 'S3')]) >>> output['num'] - array([1312, 1534, 444], dtype=int64) + array([1312, 1534, 444]) """ own_fh = False @@ -1674,26 +1674,26 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), ... ('mystring','S5')], delimiter=",") >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) + array((1, 1.3, b'abcde'), + dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) Using dtype = None - >>> s.seek(0) # needed for StringIO example only + >>> _ = s.seek(0) # needed for StringIO example only >>> data = np.genfromtxt(s, dtype=None, ... names = ['myint','myfloat','mystring'], delimiter=",") >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) + array((1, 1.3, b'abcde'), + dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) Specifying dtype and names - >>> s.seek(0) + >>> _ = s.seek(0) >>> data = np.genfromtxt(s, dtype="i8,f8,S5", ... names=['myint','myfloat','mystring'], delimiter=",") >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) + array((1, 1.3, b'abcde'), + dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) An example with fixed-width columns @@ -1701,8 +1701,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], ... delimiter=[1,3,5]) >>> data - array((1, 1.3, 'abcde'), - dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')]) + array((1, 1.3, b'abcde'), + dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')]) """ if max_rows is not None: diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index e3defdca2..b55764b5d 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -110,7 +110,7 @@ def poly(seq_of_zeros): Given a sequence of a polynomial's zeros: >>> np.poly((0, 0, 0)) # Multiple root example - array([1, 0, 0, 0]) + array([1., 0., 0., 0.]) The line above represents z**3 + 0*z**2 + 0*z + 0. @@ -119,14 +119,14 @@ def poly(seq_of_zeros): The line above represents z**3 - z/4 - >>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0])) - array([ 1. , -0.77086955, 0.08618131, 0. ]) #random + >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0])) + array([ 1. , -0.77086955, 0.08618131, 0. ]) # random Given a square array object: >>> P = np.array([[0, 1./3], [-1./2, 0]]) >>> np.poly(P) - array([ 1. , 0. , 0.16666667]) + array([1. , 0. , 0.16666667]) Note how in all cases the leading coefficient is always 1. @@ -295,7 +295,7 @@ def polyint(p, m=1, k=None): >>> p = np.poly1d([1,1,1]) >>> P = np.polyint(p) >>> P - poly1d([ 0.33333333, 0.5 , 1. , 0. ]) + poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary >>> np.polyder(P) == p True @@ -310,7 +310,7 @@ def polyint(p, m=1, k=None): 0.0 >>> P = np.polyint(p, 3, k=[6,5,3]) >>> P - poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) + poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary Note that 3 = 6 / 2!, and that the constants are given in the order of integrations. Constant of the highest-order polynomial term comes first: @@ -404,7 +404,7 @@ def polyder(p, m=1): >>> np.polyder(p, 3) poly1d([6]) >>> np.polyder(p, 4) - poly1d([ 0.]) + poly1d([0.]) """ m = int(m) @@ -552,28 +552,29 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) >>> z = np.polyfit(x, y, 3) >>> z - array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) + array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary It is convenient to use `poly1d` objects for dealing with polynomials: >>> p = np.poly1d(z) >>> p(0.5) - 0.6143849206349179 + 0.6143849206349179 # may vary >>> p(3.5) - -0.34732142857143039 + -0.34732142857143039 # may vary >>> p(10) - 22.579365079365115 + 22.579365079365115 # may vary High-order polynomials may oscillate wildly: >>> p30 = np.poly1d(np.polyfit(x, y, 30)) - /... RankWarning: Polyfit may be poorly conditioned... + ... + >>> # RankWarning: Polyfit may be poorly conditioned... >>> p30(4) - -0.80000000000000204 + -0.80000000000000204 # may vary >>> p30(5) - -0.99999999999999445 + -0.99999999999999445 # may vary >>> p30(4.5) - -0.10547061179440398 + -0.10547061179440398 # may vary Illustration: @@ -703,6 +704,8 @@ def polyval(p, x): for polynomials of high degree the values may be inaccurate due to rounding errors. Use carefully. + If `x` is a subtype of `ndarray` the return value will be of the same type. + References ---------- .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. @@ -714,18 +717,18 @@ def polyval(p, x): >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 76 >>> np.polyval([3,0,1], np.poly1d(5)) - poly1d([ 76.]) + poly1d([76.]) >>> np.polyval(np.poly1d([3,0,1]), 5) 76 >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) - poly1d([ 76.]) + poly1d([76.]) """ p = NX.asarray(p) if isinstance(x, poly1d): y = 0 else: - x = NX.asarray(x) + x = NX.asanyarray(x) y = NX.zeros_like(x) for i in range(len(p)): y = y * x + p[i] @@ -951,7 +954,7 @@ def polydiv(u, v): >>> x = np.array([3.0, 5.0, 2.0]) >>> y = np.array([2.0, 1.0]) >>> np.polydiv(x, y) - (array([ 1.5 , 1.75]), array([ 0.25])) + (array([1.5 , 1.75]), array([0.25])) """ truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d)) @@ -1046,7 +1049,7 @@ class poly1d(object): >>> p.r array([-1.+1.41421356j, -1.-1.41421356j]) >>> p(p.r) - array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) + array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary These numbers in the previous line represent (0, 0) to machine precision @@ -1073,7 +1076,7 @@ class poly1d(object): poly1d([ 1, 4, 10, 12, 9]) >>> (p**3 + 4) / p - (poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.])) + (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.])) ``asarray(p)`` gives the coefficient array, so polynomials can be used in all functions that accept arrays: @@ -1095,7 +1098,7 @@ class poly1d(object): Construct a polynomial from its roots: >>> np.poly1d([1, 2], True) - poly1d([ 1, -3, 2]) + poly1d([ 1., -3., 2.]) This is the same polynomial as obtained by: diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index fcc0d9a7a..5ff35f0bb 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -57,11 +57,10 @@ def recursive_fill_fields(input, output): Examples -------- >>> from numpy.lib import recfunctions as rfn - >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) >>> b = np.zeros((3,), dtype=a.dtype) >>> rfn.recursive_fill_fields(a, b) - array([(1, 10.0), (2, 20.0), (0, 0.0)], - dtype=[('A', '<i4'), ('B', '<f8')]) + array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')]) """ newdtype = output.dtype @@ -89,11 +88,11 @@ def get_fieldspec(dtype): Examples -------- - >>> dt = np.dtype([(('a', 'A'), int), ('b', float, 3)]) + >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) >>> dt.descr - [(('a', 'A'), '<i4'), ('b', '<f8', (3,))] + [(('a', 'A'), '<i8'), ('b', '<f8', (3,))] >>> get_fieldspec(dt) - [(('a', 'A'), dtype('int32')), ('b', dtype(('<f8', (3,))))] + [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))] """ if dtype.names is None: @@ -120,10 +119,15 @@ def get_names(adtype): Examples -------- >>> from numpy.lib import recfunctions as rfn - >>> rfn.get_names(np.empty((1,), dtype=int)) is None - True + >>> rfn.get_names(np.empty((1,), dtype=int)) + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' + >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)])) - ('A', 'B') + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names(adtype) ('a', ('b', ('ba', 'bb'))) @@ -153,9 +157,13 @@ def get_names_flat(adtype): -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None - True + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)])) - ('A', 'B') + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names_flat(adtype) ('a', 'b', 'ba', 'bb') @@ -403,20 +411,18 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) - masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)], - mask = [(False, False) (False, False) (True, False)], - fill_value = (999999, 1e+20), - dtype = [('f0', '<i4'), ('f1', '<f8')]) - - >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])), - ... usemask=False) - array([(1, 10.0), (2, 20.0), (-1, 30.0)], - dtype=[('f0', '<i4'), ('f1', '<f8')]) - >>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]), + array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('f0', '<i8'), ('f1', '<f8')]) + + >>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64), + ... np.array([10., 20., 30.])), usemask=False) + array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('f0', '<i8'), ('f1', '<f8')]) + >>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]), ... np.array([10., 20., 30.])), ... usemask=False, asrecarray=True) - rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)], - dtype=[('a', '<i4'), ('f1', '<f8')]) + rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('a', '<i8'), ('f1', '<f8')]) Notes ----- @@ -547,16 +553,14 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False): -------- >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - ... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) >>> rfn.drop_fields(a, 'a') - array([((2.0, 3),), ((5.0, 6),)], - dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])]) + array([((2., 3),), ((5., 6),)], + dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])]) >>> rfn.drop_fields(a, 'ba') - array([(1, (3,)), (4, (6,))], - dtype=[('a', '<i4'), ('b', [('bb', '<i4')])]) + array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])]) >>> rfn.drop_fields(a, ['ba', 'bb']) - array([(1,), (4,)], - dtype=[('a', '<i4')]) + array([(1,), (4,)], dtype=[('a', '<i8')]) """ if _is_string_like(drop_names): drop_names = [drop_names] @@ -648,8 +652,8 @@ def rename_fields(base, namemapper): >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) - array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))], - dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])]) + array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))], + dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])]) """ def _recursive_rename_fields(ndtype, namemapper): @@ -834,18 +838,18 @@ def repack_fields(a, align=False, recurse=False): ... print("offsets:", [d.fields[name][1] for name in d.names]) ... print("itemsize:", d.itemsize) ... - >>> dt = np.dtype('u1,i4,f4', align=True) + >>> dt = np.dtype('u1,<i4,<f4', align=True) >>> dt - dtype({'names':['f0','f1','f2'], 'formats':['u1','<i4','<f8'], 'offsets':[0,4,8], 'itemsize':16}, align=True) + dtype({'names':['f0','f1','f2'], 'formats':['u1','<i8','<f8'], 'offsets':[0,8,16], 'itemsize':24}, align=True) >>> print_offsets(dt) - offsets: [0, 4, 8] - itemsize: 16 + offsets: [0, 8, 16] + itemsize: 24 >>> packed_dt = repack_fields(dt) >>> packed_dt - dtype([('f0', 'u1'), ('f1', '<i4'), ('f2', '<f8')]) + dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')]) >>> print_offsets(packed_dt) - offsets: [0, 1, 5] - itemsize: 13 + offsets: [0, 1, 9] + itemsize: 17 """ if not isinstance(a, np.dtype): @@ -1244,15 +1248,16 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, True >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - ... dtype=[('A', '|S3'), ('B', float), ('C', float)]) + ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)]) >>> test = rfn.stack_arrays((z,zz)) >>> test - masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0) - ('c', 30.0, 300.0)], - mask = [(False, False, True) (False, False, True) (False, False, False) - (False, False, False) (False, False, False)], - fill_value = ('N/A', 1e+20, 1e+20), - dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')]) + masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0), + (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)], + mask=[(False, False, True), (False, False, True), + (False, False, False), (False, False, False), + (False, False, False)], + fill_value=(b'N/A', 1.e+20, 1.e+20), + dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')]) """ if isinstance(arrays, ndarray): @@ -1331,7 +1336,10 @@ def find_duplicates(a, key=None, ignoremask=True, return_index=False): >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) - ... # XXX: judging by the output, the ignoremask flag has no effect + (masked_array(data=[(1,), (1,), (2,), (2,)], + mask=[(False,), (False,), (False,), (False,)], + fill_value=(999999,), + dtype=[('a', '<i8')]), array([0, 1, 3, 4])) """ a = np.asanyarray(a).ravel() # Get a dictionary of fields diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py index 9ca006841..5ac790ce9 100644 --- a/numpy/lib/scimath.py +++ b/numpy/lib/scimath.py @@ -59,7 +59,7 @@ def _tocomplex(arr): >>> a = np.array([1,2,3],np.short) >>> ac = np.lib.scimath._tocomplex(a); ac - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) >>> ac.dtype dtype('complex64') @@ -70,7 +70,7 @@ def _tocomplex(arr): >>> b = np.array([1,2,3],np.double) >>> bc = np.lib.scimath._tocomplex(b); bc - array([ 1.+0.j, 2.+0.j, 3.+0.j]) + array([1.+0.j, 2.+0.j, 3.+0.j]) >>> bc.dtype dtype('complex128') @@ -81,13 +81,13 @@ def _tocomplex(arr): >>> c = np.array([1,2,3],np.csingle) >>> cc = np.lib.scimath._tocomplex(c); cc - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) >>> c *= 2; c - array([ 2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) + array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) >>> cc - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) """ if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, nt.ushort, nt.csingle)): @@ -170,7 +170,7 @@ def _fix_real_abs_gt_1(x): array([0, 1]) >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) - array([ 0.+0.j, 2.+0.j]) + array([0.+0.j, 2.+0.j]) """ x = asarray(x) if any(isreal(x) & (abs(x) > 1)): @@ -212,14 +212,14 @@ def sqrt(x): >>> np.lib.scimath.sqrt(1) 1.0 >>> np.lib.scimath.sqrt([1, 4]) - array([ 1., 2.]) + array([1., 2.]) But it automatically handles negative inputs: >>> np.lib.scimath.sqrt(-1) - (0.0+1.0j) + 1j >>> np.lib.scimath.sqrt([-1,4]) - array([ 0.+1.j, 2.+0.j]) + array([0.+1.j, 2.+0.j]) """ x = _fix_real_lt_zero(x) @@ -317,7 +317,7 @@ def log10(x): 1.0 >>> np.emath.log10([-10**1, -10**2, 10**2]) - array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ]) + array([1.+1.3644j, 2.+1.3644j, 2.+0.j ]) """ x = _fix_real_lt_zero(x) @@ -354,9 +354,9 @@ def logn(n, x): >>> np.set_printoptions(precision=4) >>> np.lib.scimath.logn(2, [4, 8]) - array([ 2., 3.]) + array([2., 3.]) >>> np.lib.scimath.logn(2, [-4, -8, 8]) - array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) """ x = _fix_real_lt_zero(x) @@ -405,7 +405,7 @@ def log2(x): >>> np.emath.log2(8) 3.0 >>> np.emath.log2([-4, -8, 8]) - array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) """ x = _fix_real_lt_zero(x) @@ -451,9 +451,9 @@ def power(x, p): >>> np.lib.scimath.power([2, 4], 2) array([ 4, 16]) >>> np.lib.scimath.power([2, 4], -2) - array([ 0.25 , 0.0625]) + array([0.25 , 0.0625]) >>> np.lib.scimath.power([-2, 4], 2) - array([ 4.+0.j, 16.+0.j]) + array([ 4.-0.j, 16.+0.j]) """ x = _fix_real_lt_zero(x) @@ -499,7 +499,7 @@ def arccos(x): 0.0 >>> np.emath.arccos([1,2]) - array([ 0.-0.j , 0.+1.317j]) + array([0.-0.j , 0.-1.317j]) """ x = _fix_real_abs_gt_1(x) @@ -545,7 +545,7 @@ def arcsin(x): 0.0 >>> np.emath.arcsin([0,1]) - array([ 0. , 1.5708]) + array([0. , 1.5708]) """ x = _fix_real_abs_gt_1(x) @@ -589,11 +589,14 @@ def arctanh(x): -------- >>> np.set_printoptions(precision=4) - >>> np.emath.arctanh(np.eye(2)) - array([[ Inf, 0.], - [ 0., Inf]]) + >>> from numpy.testing import suppress_warnings + >>> with suppress_warnings() as sup: + ... sup.filter(RuntimeWarning) + ... np.emath.arctanh(np.eye(2)) + array([[inf, 0.], + [ 0., inf]]) >>> np.emath.arctanh([1j]) - array([ 0.+0.7854j]) + array([0.+0.7854j]) """ x = _fix_real_abs_gt_1(x) diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index f56c4f4db..e088a6c4a 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -129,7 +129,7 @@ def take_along_axis(arr, indices, axis): [40, 50, 60]]) >>> ai = np.argsort(a, axis=1); ai array([[0, 2, 1], - [1, 2, 0]], dtype=int64) + [1, 2, 0]]) >>> np.take_along_axis(a, ai, axis=1) array([[10, 20, 30], [40, 50, 60]]) @@ -142,7 +142,7 @@ def take_along_axis(arr, indices, axis): >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1) >>> ai array([[1], - [0], dtype=int64) + [0]]) >>> np.take_along_axis(a, ai, axis=1) array([[30], [60]]) @@ -152,10 +152,10 @@ def take_along_axis(arr, indices, axis): >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1) >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1) - >>> ai = np.concatenate([ai_min, ai_max], axis=axis) - >> ai + >>> ai = np.concatenate([ai_min, ai_max], axis=1) + >>> ai array([[0, 1], - [1, 0]], dtype=int64) + [1, 0]]) >>> np.take_along_axis(a, ai, axis=1) array([[10, 30], [40, 60]]) @@ -243,7 +243,7 @@ def put_along_axis(arr, indices, values, axis): >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1) >>> ai array([[1], - [0]], dtype=int64) + [0]]) >>> np.put_along_axis(a, ai, 99, axis=1) >>> a array([[10, 99, 20], @@ -330,9 +330,9 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... return (a[0] + a[-1]) * 0.5 >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) >>> np.apply_along_axis(my_func, 0, b) - array([ 4., 5., 6.]) + array([4., 5., 6.]) >>> np.apply_along_axis(my_func, 1, b) - array([ 2., 5., 8.]) + array([2., 5., 8.]) For a function that returns a 1D array, the number of dimensions in `outarr` is the same as `arr`. @@ -732,11 +732,11 @@ def array_split(ary, indices_or_sections, axis=0): -------- >>> x = np.arange(8.0) >>> np.array_split(x, 3) - [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])] + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] >>> x = np.arange(7.0) >>> np.array_split(x, 3) - [array([ 0., 1., 2.]), array([ 3., 4.]), array([ 5., 6.])] + [array([0., 1., 2.]), array([3., 4.]), array([5., 6.])] """ try: @@ -828,14 +828,14 @@ def split(ary, indices_or_sections, axis=0): -------- >>> x = np.arange(9.0) >>> np.split(x, 3) - [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])] + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] >>> x = np.arange(8.0) >>> np.split(x, [3, 5, 6, 10]) - [array([ 0., 1., 2.]), - array([ 3., 4.]), - array([ 5.]), - array([ 6., 7.]), + [array([0., 1., 2.]), + array([3., 4.]), + array([5.]), + array([6., 7.]), array([], dtype=float64)] """ @@ -872,43 +872,43 @@ def hsplit(ary, indices_or_sections): -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) >>> np.hsplit(x, 2) [array([[ 0., 1.], [ 4., 5.], [ 8., 9.], - [ 12., 13.]]), + [12., 13.]]), array([[ 2., 3.], [ 6., 7.], - [ 10., 11.], - [ 14., 15.]])] + [10., 11.], + [14., 15.]])] >>> np.hsplit(x, np.array([3, 6])) - [array([[ 0., 1., 2.], - [ 4., 5., 6.], - [ 8., 9., 10.], - [ 12., 13., 14.]]), - array([[ 3.], - [ 7.], - [ 11.], - [ 15.]]), - array([], dtype=float64)] + [array([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + array([[ 3.], + [ 7.], + [11.], + [15.]]), + array([], shape=(4, 0), dtype=float64)] With a higher dimensional array the split is still along the second axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x - array([[[ 0., 1.], - [ 2., 3.]], - [[ 4., 5.], - [ 6., 7.]]]) + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) >>> np.hsplit(x, 2) - [array([[[ 0., 1.]], - [[ 4., 5.]]]), - array([[[ 2., 3.]], - [[ 6., 7.]]])] + [array([[[0., 1.]], + [[4., 5.]]]), + array([[[2., 3.]], + [[6., 7.]]])] """ if _nx.ndim(ary) == 0: @@ -936,35 +936,31 @@ def vsplit(ary, indices_or_sections): -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) >>> np.vsplit(x, 2) - [array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.]]), - array([[ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]])] + [array([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])] >>> np.vsplit(x, np.array([3, 6])) - [array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]]), - array([[ 12., 13., 14., 15.]]), - array([], dtype=float64)] + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)] With a higher dimensional array the split is still along the first axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x - array([[[ 0., 1.], - [ 2., 3.]], - [[ 4., 5.], - [ 6., 7.]]]) + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) >>> np.vsplit(x, 2) - [array([[[ 0., 1.], - [ 2., 3.]]]), - array([[[ 4., 5.], - [ 6., 7.]]])] + [array([[[0., 1.], + [2., 3.]]]), array([[[4., 5.], + [6., 7.]]])] """ if _nx.ndim(ary) < 2: @@ -989,30 +985,28 @@ def dsplit(ary, indices_or_sections): -------- >>> x = np.arange(16.0).reshape(2, 2, 4) >>> x - array([[[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.]], - [[ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]]) + array([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) >>> np.dsplit(x, 2) - [array([[[ 0., 1.], - [ 4., 5.]], - [[ 8., 9.], - [ 12., 13.]]]), - array([[[ 2., 3.], - [ 6., 7.]], - [[ 10., 11.], - [ 14., 15.]]])] + [array([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), array([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])] >>> np.dsplit(x, np.array([3, 6])) - [array([[[ 0., 1., 2.], - [ 4., 5., 6.]], - [[ 8., 9., 10.], - [ 12., 13., 14.]]]), - array([[[ 3.], - [ 7.]], - [[ 11.], - [ 15.]]]), - array([], dtype=float64)] - + [array([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + array([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + array([], shape=(2, 2, 0), dtype=float64)] """ if _nx.ndim(ary) < 3: raise ValueError('dsplit only works on arrays of 3 or more dimensions') @@ -1092,15 +1086,15 @@ def kron(a, b): Examples -------- >>> np.kron([1,10,100], [5,6,7]) - array([ 5, 6, 7, 50, 60, 70, 500, 600, 700]) + array([ 5, 6, 7, ..., 500, 600, 700]) >>> np.kron([5,6,7], [1,10,100]) - array([ 5, 50, 500, 6, 60, 600, 7, 70, 700]) + array([ 5, 50, 500, ..., 7, 70, 700]) >>> np.kron(np.eye(2), np.ones((2,2))) - array([[ 1., 1., 0., 0.], - [ 1., 1., 0., 0.], - [ 0., 0., 1., 1.], - [ 0., 0., 1., 1.]]) + array([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) >>> a = np.arange(100).reshape((2,5,2,5)) >>> b = np.arange(24).reshape((2,3,4)) diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index a17fc66e5..93d4b279f 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -136,8 +136,8 @@ class TestSetOps(object): np.nan), # should fail because attempting # to downcast to smaller int type: - (np.array([1, 2, 3], dtype=np.int32), - np.array([5, 7, 2], dtype=np.int64), + (np.array([1, 2, 3], dtype=np.int16), + np.array([5, 1<<20, 2], dtype=np.int32), None), # should fail because attempting to cast # two special floating point values @@ -152,8 +152,8 @@ class TestSetOps(object): # specifically, raise an appropriate # Exception when attempting to append or # prepend with an incompatible type - msg = 'must be compatible' - with assert_raises_regex(TypeError, msg): + msg = 'cannot convert' + with assert_raises_regex(ValueError, msg): ediff1d(ary=ary, to_end=append, to_begin=prepend) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 3d4b0e3b2..d9a97db1b 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -4,6 +4,7 @@ import operator import warnings import sys import decimal +import types import pytest import numpy as np @@ -24,6 +25,7 @@ from numpy.lib import ( from numpy.compat import long +PY2 = sys.version_info[0] == 2 def get_mat(n): data = np.arange(n) @@ -353,9 +355,9 @@ class TestAverage(object): assert_equal(type(np.average(a, weights=w)), subclass) def test_upcasting(self): - types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'), + typs = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'), ('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')] - for at, wt, rt in types: + for at, wt, rt in typs: a = np.array([[1,2],[3,4]], dtype=at) w = np.array([[1,2],[3,4]], dtype=wt) assert_equal(np.average(a, weights=w).dtype, np.dtype(rt)) @@ -1498,6 +1500,49 @@ class TestVectorize(object): f(x) +class TestLeaks(object): + class A(object): + iters = 20 + + def bound(self, *args): + return 0 + + @staticmethod + def unbound(*args): + return 0 + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.parametrize('name, incr', [ + ('bound', A.iters), + ('unbound', 0), + ]) + def test_frompyfunc_leaks(self, name, incr): + # exposed in gh-11867 as np.vectorized, but the problem stems from + # frompyfunc. + # class.attribute = np.frompyfunc(<method>) creates a + # reference cycle if <method> is a bound class method. It requires a + # gc collection cycle to break the cycle (on CPython 3) + import gc + A_func = getattr(self.A, name) + gc.disable() + try: + refcount = sys.getrefcount(A_func) + for i in range(self.A.iters): + a = self.A() + a.f = np.frompyfunc(getattr(a, name), 1, 1) + out = a.f(np.arange(10)) + a = None + if PY2: + assert_equal(sys.getrefcount(A_func), refcount) + else: + # A.func is part of a reference cycle if incr is non-zero + assert_equal(sys.getrefcount(A_func), refcount + incr) + for i in range(5): + gc.collect() + assert_equal(sys.getrefcount(A_func), refcount) + finally: + gc.enable() + class TestDigitize(object): def test_forward(self): @@ -2391,11 +2436,8 @@ class TestPercentile(object): assert_equal(np.percentile(x, 100), 3.5) assert_equal(np.percentile(x, 50), 1.75) x[1] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.percentile(x, 0), np.nan) - assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan) - assert_(w[0].category is RuntimeWarning) + assert_equal(np.percentile(x, 0), np.nan) + assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan) def test_api(self): d = np.ones(5) @@ -2733,85 +2775,63 @@ class TestPercentile(object): def test_nan_behavior(self): a = np.arange(24, dtype=float) a[2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.percentile(a, 0.3), np.nan) - assert_equal(np.percentile(a, 0.3, axis=0), np.nan) - assert_equal(np.percentile(a, [0.3, 0.6], axis=0), - np.array([np.nan] * 2)) - assert_(w[0].category is RuntimeWarning) - assert_(w[1].category is RuntimeWarning) - assert_(w[2].category is RuntimeWarning) + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3, axis=0), np.nan) + assert_equal(np.percentile(a, [0.3, 0.6], axis=0), + np.array([np.nan] * 2)) a = np.arange(24, dtype=float).reshape(2, 3, 4) a[1, 2, 3] = np.nan a[1, 1, 2] = np.nan # no axis - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.percentile(a, 0.3), np.nan) - assert_equal(np.percentile(a, 0.3).ndim, 0) - assert_(w[0].category is RuntimeWarning) + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3).ndim, 0) # axis0 zerod b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0) b[2, 3] = np.nan b[1, 2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.percentile(a, 0.3, 0), b) + assert_equal(np.percentile(a, 0.3, 0), b) # axis0 not zerod b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 0) b[:, 2, 3] = np.nan b[:, 1, 2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.percentile(a, [0.3, 0.6], 0), b) + assert_equal(np.percentile(a, [0.3, 0.6], 0), b) # axis1 zerod b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1) b[1, 3] = np.nan b[1, 2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.percentile(a, 0.3, 1), b) + assert_equal(np.percentile(a, 0.3, 1), b) # axis1 not zerod b = np.percentile( np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1) b[:, 1, 3] = np.nan b[:, 1, 2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.percentile(a, [0.3, 0.6], 1), b) + assert_equal(np.percentile(a, [0.3, 0.6], 1), b) # axis02 zerod b = np.percentile( np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2)) b[1] = np.nan b[2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.percentile(a, 0.3, (0, 2)), b) + assert_equal(np.percentile(a, 0.3, (0, 2)), b) # axis02 not zerod b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], (0, 2)) b[:, 1] = np.nan b[:, 2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) + assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) # axis02 not zerod with nearest interpolation b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], (0, 2), interpolation='nearest') b[:, 1] = np.nan b[:, 2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.percentile( - a, [0.3, 0.6], (0, 2), interpolation='nearest'), b) + assert_equal(np.percentile( + a, [0.3, 0.6], (0, 2), interpolation='nearest'), b) class TestQuantile(object): @@ -2858,10 +2878,7 @@ class TestMedian(object): # check array scalar result assert_equal(np.median(a).ndim, 0) a[1] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.median(a).ndim, 0) - assert_(w[0].category is RuntimeWarning) + assert_equal(np.median(a).ndim, 0) def test_axis_keyword(self): a3 = np.array([[2, 3], @@ -2960,58 +2977,43 @@ class TestMedian(object): def test_nan_behavior(self): a = np.arange(24, dtype=float) a[2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.median(a), np.nan) - assert_equal(np.median(a, axis=0), np.nan) - assert_(w[0].category is RuntimeWarning) - assert_(w[1].category is RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a, axis=0), np.nan) a = np.arange(24, dtype=float).reshape(2, 3, 4) a[1, 2, 3] = np.nan a[1, 1, 2] = np.nan # no axis - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.median(a), np.nan) - assert_equal(np.median(a).ndim, 0) - assert_(w[0].category is RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a).ndim, 0) # axis0 b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0) b[2, 3] = np.nan b[1, 2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.median(a, 0), b) - assert_equal(len(w), 1) + assert_equal(np.median(a, 0), b) # axis1 b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1) b[1, 3] = np.nan b[1, 2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.median(a, 1), b) - assert_equal(len(w), 1) + assert_equal(np.median(a, 1), b) # axis02 b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2)) b[1] = np.nan b[2] = np.nan - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', RuntimeWarning) - assert_equal(np.median(a, (0, 2)), b) - assert_equal(len(w), 1) + assert_equal(np.median(a, (0, 2)), b) def test_empty(self): - # empty arrays + # mean(empty array) emits two warnings: empty slice and divide by 0 a = np.array([], dtype=float) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) assert_equal(np.median(a), np.nan) assert_(w[0].category is RuntimeWarning) + assert_equal(len(w), 2) # multiple dimensions a = np.array([], dtype=float, ndmin=3) diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index 11f8a5afa..069693613 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -221,9 +221,9 @@ class TestRecFunctions(object): ( 5, ( 6., 7), [ 8., 9.]), (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])], - dtype=[('a', '<i4'), - ('b', [('f0', '<f4'), ('f1', '<u2')]), - ('c', '<f4', (2,))]) + dtype=[('a', 'i4'), + ('b', [('f0', 'f4'), ('f1', 'u2')]), + ('c', 'f4', (2,))]) assert_equal(out, want) d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 27d848608..e165c9b02 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -77,13 +77,13 @@ def fliplr(m): -------- >>> A = np.diag([1.,2.,3.]) >>> A - array([[ 1., 0., 0.], - [ 0., 2., 0.], - [ 0., 0., 3.]]) + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) >>> np.fliplr(A) - array([[ 0., 0., 1.], - [ 0., 2., 0.], - [ 3., 0., 0.]]) + array([[0., 0., 1.], + [0., 2., 0.], + [3., 0., 0.]]) >>> A = np.random.randn(2,3,5) >>> np.all(np.fliplr(A) == A[:,::-1,...]) @@ -129,13 +129,13 @@ def flipud(m): -------- >>> A = np.diag([1.0, 2, 3]) >>> A - array([[ 1., 0., 0.], - [ 0., 2., 0.], - [ 0., 0., 3.]]) + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) >>> np.flipud(A) - array([[ 0., 0., 3.], - [ 0., 2., 0.], - [ 1., 0., 0.]]) + array([[0., 0., 3.], + [0., 2., 0.], + [1., 0., 0.]]) >>> A = np.random.randn(2,3,5) >>> np.all(np.flipud(A) == A[::-1,...]) @@ -191,9 +191,9 @@ def eye(N, M=None, k=0, dtype=float, order='C'): array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) - array([[ 0., 1., 0.], - [ 0., 0., 1.], - [ 0., 0., 0.]]) + array([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) """ if M is None: @@ -378,9 +378,9 @@ def tri(N, M=None, k=0, dtype=float): [1, 1, 1, 1, 1]]) >>> np.tri(3, 5, -1) - array([[ 0., 0., 0., 0., 0.], - [ 1., 0., 0., 0., 0.], - [ 1., 1., 0., 0., 0.]]) + array([[0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0.], + [1., 1., 0., 0., 0.]]) """ if M is None: @@ -540,7 +540,7 @@ def vander(x, N=None, increasing=False): of the differences between the values of the input vector: >>> np.linalg.det(np.vander(x)) - 48.000000000000043 + 48.000000000000043 # may vary >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) 48 @@ -644,7 +644,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, Examples -------- - >>> import matplotlib as mpl + >>> from matplotlib.image import NonUniformImage >>> import matplotlib.pyplot as plt Construct a 2-D histogram with variable bin width. First define the bin @@ -666,6 +666,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, >>> ax = fig.add_subplot(131, title='imshow: square bins') >>> plt.imshow(H, interpolation='nearest', origin='low', ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) + <matplotlib.image.AxesImage object at 0x...> :func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges: @@ -673,13 +674,14 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, ... aspect='equal') >>> X, Y = np.meshgrid(xedges, yedges) >>> ax.pcolormesh(X, Y, H) + <matplotlib.collections.QuadMesh object at 0x...> :class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to display actual bin edges with interpolation: >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) - >>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear') + >>> im = NonUniformImage(ax, interpolation='bilinear') >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 >>> im.set_data(xcenters, ycenters, H) @@ -829,7 +831,7 @@ def tril_indices(n, k=0, m=None): Both for indexing: >>> a[il1] - array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) + array([ 0, 4, 5, ..., 13, 14, 15]) And for assigning values: @@ -944,7 +946,7 @@ def triu_indices(n, k=0, m=None): Both for indexing: >>> a[iu1] - array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) + array([ 0, 1, 2, ..., 10, 11, 15]) And for assigning values: diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py index 90b1e9a6e..f55517732 100644 --- a/numpy/lib/type_check.py +++ b/numpy/lib/type_check.py @@ -105,11 +105,11 @@ def asfarray(a, dtype=_nx.float_): Examples -------- >>> np.asfarray([2, 3]) - array([ 2., 3.]) + array([2., 3.]) >>> np.asfarray([2, 3], dtype='float') - array([ 2., 3.]) + array([2., 3.]) >>> np.asfarray([2, 3], dtype='int8') - array([ 2., 3.]) + array([2., 3.]) """ if not _nx.issubdtype(dtype, _nx.inexact): @@ -146,13 +146,13 @@ def real(val): -------- >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.real - array([ 1., 3., 5.]) + array([1., 3., 5.]) >>> a.real = 9 >>> a - array([ 9.+2.j, 9.+4.j, 9.+6.j]) + array([9.+2.j, 9.+4.j, 9.+6.j]) >>> a.real = np.array([9, 8, 7]) >>> a - array([ 9.+2.j, 8.+4.j, 7.+6.j]) + array([9.+2.j, 8.+4.j, 7.+6.j]) >>> np.real(1 + 1j) 1.0 @@ -192,10 +192,10 @@ def imag(val): -------- >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.imag - array([ 2., 4., 6.]) + array([2., 4., 6.]) >>> a.imag = np.array([8, 10, 12]) >>> a - array([ 1. +8.j, 3.+10.j, 5.+12.j]) + array([1. +8.j, 3.+10.j, 5.+12.j]) >>> np.imag(1 + 1j) 1.0 @@ -422,11 +422,13 @@ def nan_to_num(x, copy=True): 0.0 >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) >>> np.nan_to_num(x) - array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, - -1.28000000e+002, 1.28000000e+002]) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) >>> np.nan_to_num(y) - array([ 1.79769313e+308 +0.00000000e+000j, + array([ 1.79769313e+308 +0.00000000e+000j, # may vary 0.00000000e+000 +0.00000000e+000j, 0.00000000e+000 +1.79769313e+308j]) """ @@ -490,12 +492,12 @@ def real_if_close(a, tol=100): Examples -------- >>> np.finfo(float).eps - 2.2204460492503131e-16 + 2.2204460492503131e-16 # may vary >>> np.real_if_close([2.1 + 4e-14j], tol=1000) - array([ 2.1]) + array([2.1]) >>> np.real_if_close([2.1 + 4e-13j], tol=1000) - array([ 2.1 +4.00000000e-13j]) + array([2.1+4.e-13j]) """ a = asanyarray(a) @@ -538,7 +540,6 @@ def asscalar(a): -------- >>> np.asscalar(np.array([24])) 24 - """ # 2018-10-10, 1.16 @@ -672,11 +673,11 @@ def common_type(*arrays): Examples -------- >>> np.common_type(np.arange(2, dtype=np.float32)) - <type 'numpy.float32'> + <class 'numpy.float32'> >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) - <type 'numpy.float64'> + <class 'numpy.float64'> >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) - <type 'numpy.complex128'> + <class 'numpy.complex128'> """ is_complex = False diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py index 9a9e6f9dd..5c411e8c8 100644 --- a/numpy/lib/ufunclike.py +++ b/numpy/lib/ufunclike.py @@ -154,11 +154,11 @@ def isposinf(x, out=None): Examples -------- >>> np.isposinf(np.PINF) - array(True, dtype=bool) + True >>> np.isposinf(np.inf) - array(True, dtype=bool) + True >>> np.isposinf(np.NINF) - array(False, dtype=bool) + False >>> np.isposinf([-np.inf, 0., np.inf]) array([False, False, True]) @@ -224,11 +224,11 @@ def isneginf(x, out=None): Examples -------- >>> np.isneginf(np.NINF) - array(True, dtype=bool) + True >>> np.isneginf(np.inf) - array(False, dtype=bool) + False >>> np.isneginf(np.PINF) - array(False, dtype=bool) + False >>> np.isneginf([-np.inf, 0., np.inf]) array([ True, False, False]) diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 84edf4021..6b112f37a 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -150,10 +150,8 @@ def deprecate(*args, **kwargs): Warning: >>> olduint = np.deprecate(np.uint) + DeprecationWarning: `uint64` is deprecated! # may vary >>> olduint(6) - /usr/lib/python2.5/site-packages/numpy/lib/utils.py:114: - DeprecationWarning: uint32 is deprecated - warnings.warn(str1, DeprecationWarning, stacklevel=2) 6 """ @@ -201,8 +199,8 @@ def byte_bounds(a): >>> low, high = np.byte_bounds(I) >>> high - low == I.size*I.itemsize True - >>> I = np.eye(2, dtype='G'); I.dtype - dtype('complex192') + >>> I = np.eye(2); I.dtype + dtype('float64') >>> low, high = np.byte_bounds(I) >>> high - low == I.size*I.itemsize True @@ -263,17 +261,17 @@ def who(vardict=None): >>> np.who() Name Shape Bytes Type =========================================================== - a 10 40 int32 + a 10 80 int64 b 20 160 float64 - Upper bound on total bytes = 200 + Upper bound on total bytes = 240 >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str', ... 'idx':5} >>> np.who(d) Name Shape Bytes Type =========================================================== - y 3 24 float64 x 2 16 float64 + y 3 24 float64 Upper bound on total bytes = 40 """ @@ -733,7 +731,7 @@ def lookfor(what, module=None, import_modules=True, regenerate=False, Examples -------- - >>> np.lookfor('binary representation') + >>> np.lookfor('binary representation') # doctest: +SKIP Search results for 'binary representation' ------------------------------------------ numpy.binary_repr @@ -1104,7 +1102,7 @@ def safe_eval(source): >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') Traceback (most recent call last): ... - SyntaxError: Unsupported source construct: compiler.ast.CallFunc + ValueError: malformed node or string: <_ast.Call object at 0x...> """ # Local import to speed up numpy's import time. @@ -1142,17 +1140,12 @@ def _median_nancheck(data, result, axis, out): n = n.filled(False) if result.ndim == 0: if n == True: - warnings.warn("Invalid value encountered in median", - RuntimeWarning, stacklevel=3) if out is not None: out[...] = data.dtype.type(np.nan) result = out else: result = data.dtype.type(np.nan) elif np.count_nonzero(n.ravel()) > 0: - warnings.warn("Invalid value encountered in median for" + - " %d results" % np.count_nonzero(n.ravel()), - RuntimeWarning, stacklevel=3) result[n] = np.nan return result |