diff options
Diffstat (limited to 'numpy')
-rw-r--r-- | numpy/core/arrayprint.py | 46 | ||||
-rw-r--r-- | numpy/core/shape_base.py | 236 | ||||
-rw-r--r-- | numpy/core/tests/test_arrayprint.py | 6 | ||||
-rw-r--r-- | numpy/core/tests/test_nditer.py | 335 | ||||
-rw-r--r-- | numpy/core/tests/test_shape_base.py | 22 |
5 files changed, 328 insertions, 317 deletions
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 21872461e..da173625e 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -305,8 +305,12 @@ def _get_formatdict(data, **opt): 'int': lambda: IntegerFormat(data), 'float': lambda: FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'longfloat': lambda: + FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), 'complexfloat': lambda: ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'longcomplexfloat': lambda: + ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), 'datetime': lambda: DatetimeFormat(data), 'timedelta': lambda: TimedeltaFormat(data), 'object': lambda: _object_format, @@ -329,7 +333,7 @@ def _get_formatdict(data, **opt): for key in ['int']: formatdict[key] = indirect(formatter['int_kind']) if 'float_kind' in fkeys: - for key in ['half', 'float', 'longfloat']: + for key in ['float', 'longfloat']: formatdict[key] = indirect(formatter['float_kind']) if 'complex_kind' in fkeys: for key in ['complexfloat', 'longcomplexfloat']: @@ -361,9 +365,15 @@ def _get_format_function(data, **options): else: return formatdict['int']() elif issubclass(dtypeobj, _nt.floating): - return formatdict['float']() + if issubclass(dtypeobj, _nt.longfloat): + return formatdict['longfloat']() + else: + return formatdict['float']() elif issubclass(dtypeobj, _nt.complexfloating): - return formatdict['complexfloat']() + if issubclass(dtypeobj, _nt.clongfloat): + return formatdict['longcomplexfloat']() + else: + return formatdict['complexfloat']() elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): return formatdict['numpystr']() elif issubclass(dtypeobj, _nt.datetime64): @@ -805,6 +815,20 @@ class FloatingFormat(object): pad_left=self.pad_left, pad_right=self.pad_right) +# for back-compatibility, we keep the classes for each float type too +class FloatFormat(FloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn("FloatFormat has been replaced by FloatingFormat", + DeprecationWarning, stacklevel=2) + super(FloatFormat, self).__init__(*args, **kwargs) + + +class LongFloatFormat(FloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn("LongFloatFormat has been replaced by FloatingFormat", + DeprecationWarning, stacklevel=2) + super(LongFloatFormat, self).__init__(*args, **kwargs) + def format_float_scientific(x, precision=None, unique=True, trim='k', sign=False, pad_left=None, exp_digits=None): @@ -996,6 +1020,22 @@ class ComplexFloatingFormat(object): i = self.imag_format(x.imag) return r + i + 'j' +# for back-compatibility, we keep the classes for each complex type too +class ComplexFormat(ComplexFloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn( + "ComplexFormat has been replaced by ComplexFloatingFormat", + DeprecationWarning, stacklevel=2) + super(ComplexFormat, self).__init__(*args, **kwargs) + +class LongComplexFormat(ComplexFloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn( + "LongComplexFormat has been replaced by ComplexFloatingFormat", + DeprecationWarning, stacklevel=2) + super(LongComplexFormat, self).__init__(*args, **kwargs) + + class DatetimeFormat(object): def __init__(self, x, unit=None, timezone=None, casting='same_kind'): # Get the unit from the dtype diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 026ad603a..8a047fdda 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -365,78 +365,93 @@ def stack(arrays, axis=0, out=None): return _nx.concatenate(expanded_arrays, axis=axis, out=out) -class _Recurser(object): +def _block_check_depths_match(arrays, parent_index=[]): """ - Utility class for recursing over nested iterables + Recursive function checking that the depths of nested lists in `arrays` + all match. Mismatch raises a ValueError as described in the block + docstring below. + + The entire index (rather than just the depth) needs to be calculated + for each innermost list, in case an error needs to be raised, so that + the index of the offending list can be printed as part of the error. + + The parameter `parent_index` is the full index of `arrays` within the + nested lists passed to _block_check_depths_match at the top of the + recursion. + The return value is a pair. The first item returned is the full index + of an element (specifically the first element) from the bottom of the + nesting in `arrays`. An empty list at the bottom of the nesting is + represented by a `None` index. + The second item is the maximum of the ndims of the arrays nested in + `arrays`. """ - def __init__(self, recurse_if): - self.recurse_if = recurse_if - - def map_reduce(self, x, f_map=lambda x, **kwargs: x, - f_reduce=lambda x, **kwargs: x, - f_kwargs=lambda **kwargs: kwargs, - **kwargs): - """ - Iterate over the nested list, applying: - * ``f_map`` (T -> U) to items - * ``f_reduce`` (Iterable[U] -> U) to mapped items - - For instance, ``map_reduce([[1, 2], 3, 4])`` is:: - - f_reduce([ - f_reduce([ - f_map(1), - f_map(2) - ]), - f_map(3), - f_map(4) - ]]) - - - State can be passed down through the calls with `f_kwargs`, - to iterables of mapped items. When kwargs are passed, as in - ``map_reduce([[1, 2], 3, 4], **kw)``, this becomes:: - - kw1 = f_kwargs(**kw) - kw2 = f_kwargs(**kw1) - f_reduce([ - f_reduce([ - f_map(1), **kw2) - f_map(2, **kw2) - ], **kw1), - f_map(3, **kw1), - f_map(4, **kw1) - ]], **kw) - """ - def f(x, **kwargs): - if not self.recurse_if(x): - return f_map(x, **kwargs) - else: - next_kwargs = f_kwargs(**kwargs) - return f_reduce(( - f(xi, **next_kwargs) - for xi in x - ), **kwargs) - return f(x, **kwargs) - - def walk(self, x, index=()): - """ - Iterate over x, yielding (index, value, entering), where - - * ``index``: a tuple of indices up to this point - * ``value``: equal to ``x[index[0]][...][index[-1]]``. On the first iteration, is - ``x`` itself - * ``entering``: bool. The result of ``recurse_if(value)`` - """ - do_recurse = self.recurse_if(x) - yield index, x, do_recurse - - if not do_recurse: - return - for i, xi in enumerate(x): - # yield from ... - for v in self.walk(xi, index + (i,)): - yield v + def format_index(index): + idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) + return 'arrays' + idx_str + if type(arrays) is tuple: + # not strictly necessary, but saves us from: + # - more than one way to do things - no point treating tuples like + # lists + # - horribly confusing behaviour that results when tuples are + # treated like ndarray + raise TypeError( + '{} is a tuple. ' + 'Only lists can be used to arrange blocks, and np.block does ' + 'not allow implicit conversion from tuple to ndarray.'.format( + format_index(parent_index) + ) + ) + elif type(arrays) is list and len(arrays) > 0: + idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) + for i, arr in enumerate(arrays)) + + first_index, max_arr_ndim = next(idxs_ndims) + for index, ndim in idxs_ndims: + if ndim > max_arr_ndim: + max_arr_ndim = ndim + if len(index) != len(first_index): + raise ValueError( + "List depths are mismatched. First element was at depth " + "{}, but there is an element at depth {} ({})".format( + len(first_index), + len(index), + format_index(index) + ) + ) + return first_index, max_arr_ndim + elif type(arrays) is list and len(arrays) == 0: + # We've 'bottomed out' on an empty list + return parent_index + [None], 0 + else: + # We've 'bottomed out' - arrays is either a scalar or an array + return parent_index, _nx.ndim(arrays) + + +def _block(arrays, max_depth, result_ndim): + """ + Internal implementation of block. `arrays` is the argument passed to + block. `max_depth` is the depth of nested lists within `arrays` and + `result_ndim` is the greatest of the dimensions of the arrays in + `arrays` and the depth of the lists in `arrays` (see block docstring + for details). + """ + def atleast_nd(a, ndim): + # Ensures `a` has at least `ndim` dimensions by prepending + # ones to `a.shape` as necessary + return array(a, ndmin=ndim, copy=False, subok=True) + + def block_recursion(arrays, depth=0): + if depth < max_depth: + if len(arrays) == 0: + raise ValueError('Lists cannot be empty') + arrs = [block_recursion(arr, depth+1) for arr in arrays] + return _nx.concatenate(arrs, axis=-(max_depth-depth)) + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + return atleast_nd(arrays, result_ndim) + + return block_recursion(arrays) def block(arrays): @@ -587,81 +602,6 @@ def block(arrays): """ - def atleast_nd(x, ndim): - x = asanyarray(x) - diff = max(ndim - x.ndim, 0) - return x[(None,)*diff + (Ellipsis,)] - - def format_index(index): - return 'arrays' + ''.join('[{}]'.format(i) for i in index) - - rec = _Recurser(recurse_if=lambda x: type(x) is list) - - # ensure that the lists are all matched in depth - list_ndim = None - any_empty = False - for index, value, entering in rec.walk(arrays): - if type(value) is tuple: - # not strictly necessary, but saves us from: - # - more than one way to do things - no point treating tuples like - # lists - # - horribly confusing behaviour that results when tuples are - # treated like ndarray - raise TypeError( - '{} is a tuple. ' - 'Only lists can be used to arrange blocks, and np.block does ' - 'not allow implicit conversion from tuple to ndarray.'.format( - format_index(index) - ) - ) - if not entering: - curr_depth = len(index) - elif len(value) == 0: - curr_depth = len(index) + 1 - any_empty = True - else: - continue - - if list_ndim is not None and list_ndim != curr_depth: - raise ValueError( - "List depths are mismatched. First element was at depth {}, " - "but there is an element at depth {} ({})".format( - list_ndim, - curr_depth, - format_index(index) - ) - ) - list_ndim = curr_depth - - # do this here so we catch depth mismatches first - if any_empty: - raise ValueError('Lists cannot be empty') - - # convert all the arrays to ndarrays - arrays = rec.map_reduce(arrays, - f_map=asanyarray, - f_reduce=list - ) - - # determine the maximum dimension of the elements - elem_ndim = rec.map_reduce(arrays, - f_map=lambda xi: xi.ndim, - f_reduce=max - ) - ndim = max(list_ndim, elem_ndim) - - # first axis to concatenate along - first_axis = ndim - list_ndim - - # Make all the elements the same dimension - arrays = rec.map_reduce(arrays, - f_map=lambda xi: atleast_nd(xi, ndim), - f_reduce=list - ) - - # concatenate innermost lists on the right, outermost on the left - return rec.map_reduce(arrays, - f_reduce=lambda xs, axis: _nx.concatenate(list(xs), axis=axis), - f_kwargs=lambda axis: dict(axis=axis+1), - axis=first_axis - ) + bottom_index, arr_ndim = _block_check_depths_match(arrays) + list_ndim = len(bottom_index) + return _block(arrays, list_ndim, max(arr_ndim, list_ndim)) diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py index 02dda5151..62b5cf580 100644 --- a/numpy/core/tests/test_arrayprint.py +++ b/numpy/core/tests/test_arrayprint.py @@ -157,6 +157,12 @@ class TestArray2String(object): assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == '[abcabc defdef]') + # check for backcompat that using FloatFormat works and emits warning + with assert_warns(DeprecationWarning): + fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False) + assert_equal(np.array2string(x, formatter={'float_kind': fmt}), + '[0. 1. 2.]') + def test_structure_format(self): dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 59e11f22e..f3f8706b5 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2146,172 +2146,197 @@ def test_iter_no_broadcast(): assert_raises(ValueError, nditer, [a, b, c], [], [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) -def test_iter_nested_iters_basic(): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) +class TestIterNested(object): - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + def test_basic(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) -def test_iter_nested_iters_reorder(): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - # In 'K' order (default), it gets reordered - i, j = np.nested_iters(a, [[0], [2, 1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - i, j = np.nested_iters(a, [[1, 0], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + def test_reorder(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[2, 0], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + # In 'K' order (default), it gets reordered + i, j = np.nested_iters(a, [[0], [2, 1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - # In 'C' order, it doesn't - i, j = np.nested_iters(a, [[0], [2, 1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) + i, j = np.nested_iters(a, [[1, 0], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - i, j = np.nested_iters(a, [[1, 0], [2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) + i, j = np.nested_iters(a, [[2, 0], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - i, j = np.nested_iters(a, [[2, 0], [1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) + # In 'C' order, it doesn't + i, j = np.nested_iters(a, [[0], [2, 1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) -def test_iter_nested_iters_flip_axes(): - # Test nested iteration with negative axes - a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] + i, j = np.nested_iters(a, [[1, 0], [2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) - # In 'K' order (default), the axes all get flipped - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + i, j = np.nested_iters(a, [[2, 0], [1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + def test_flip_axes(self): + # Test nested iteration with negative axes + a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + # In 'K' order (default), the axes all get flipped + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - # In 'C' order, flipping axes is disabled - i, j = np.nested_iters(a, [[0], [1, 2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - i, j = np.nested_iters(a, [[0, 1], [2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - i, j = np.nested_iters(a, [[0, 2], [1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) + # In 'C' order, flipping axes is disabled + i, j = np.nested_iters(a, [[0], [1, 2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) -def test_iter_nested_iters_broadcast(): - # Test nested iteration with broadcasting - a = arange(2).reshape(2, 1) - b = arange(3).reshape(1, 3) + i, j = np.nested_iters(a, [[0, 1], [2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) - i, j = np.nested_iters([a, b], [[0], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + i, j = np.nested_iters(a, [[0, 2], [1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) - i, j = np.nested_iters([a, b], [[1], [0]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + def test_broadcast(self): + # Test nested iteration with broadcasting + a = arange(2).reshape(2, 1) + b = arange(3).reshape(1, 3) -def test_iter_nested_iters_dtype_copy(): - # Test nested iteration with a copy to change dtype + i, j = np.nested_iters([a, b], [[0], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + + i, j = np.nested_iters([a, b], [[1], [0]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + + def test_dtype_copy(self): + # Test nested iteration with a copy to change dtype + + # copy + a = arange(6, dtype='i4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readonly', 'copy'], + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) + vals = None + + # updateifcopy + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + i, j, x, y = (None,)*4 # force the updateifcopy + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_0d(self): + a = np.arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[], [1, 0, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0, 2], []]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + + i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) + vals = [] + for x in i: + for y in j: + vals.append([z for z in k]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - # copy - a = arange(6, dtype='i4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readonly', 'copy'], - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) - vals = None - - # updateifcopy - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readwrite', 'updateifcopy'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[0, 1, 2], [3, 4, 5]]) - i, j, x, y = (None,)*4 # force the updateifcopy - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - -def test_iter_nested_iters_dtype_buffered(): - # Test nested iteration with buffering to change dtype - - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - flags=['buffered'], - op_flags=['readwrite'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_iter_reduction_error(): @@ -2639,28 +2664,6 @@ def test_0d_iter(): assert_equal(vals['d'], 0.5) -def test_0d_nested_iter(): - a = np.arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[], [1, 0, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[1, 0, 2], []]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) - - i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) - vals = [] - for x in i: - for y in j: - vals.append([z for z in k]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due # to broadcasting. Dividing by 1024 will keep it small enough to diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py index 5c1e569b7..deb2a407d 100644 --- a/numpy/core/tests/test_shape_base.py +++ b/numpy/core/tests/test_shape_base.py @@ -560,6 +560,28 @@ class TestBlock(object): assert_raises_regex(TypeError, 'tuple', np.block, ([1, 2], [3, 4])) assert_raises_regex(TypeError, 'tuple', np.block, [(1, 2), (3, 4)]) + def test_different_ndims(self): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 1, 3)) + + result = np.block([a, b, c]) + expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_different_ndims_depths(self): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 2, 3)) + + result = np.block([[a, b], [c]]) + expected = np.array([[[1., 2., 2.], + [3., 3., 3.], + [3., 3., 3.]]]) + + assert_equal(result, expected) + if __name__ == "__main__": run_module_suite() |