summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md4
-rw-r--r--.gitignore1
-rw-r--r--README.md8
-rw-r--r--azure-pipelines.yml34
-rw-r--r--benchmarks/benchmarks/bench_core.py6
-rw-r--r--benchmarks/benchmarks/bench_function_base.py45
-rw-r--r--doc/C_STYLE_GUIDE.rst.txt2
-rw-r--r--doc/HOWTO_RELEASE.rst.txt38
-rw-r--r--doc/neps/nep-0018-array-function-protocol.rst357
-rw-r--r--doc/neps/nep-0019-rng-policy.rst70
-rw-r--r--doc/release/1.17.0-notes.rst119
-rw-r--r--doc/source/reference/c-api.array.rst19
-rw-r--r--doc/source/user/building.rst7
-rw-r--r--doc/source/user/c-info.how-to-extend.rst41
-rw-r--r--numpy/conftest.py7
-rw-r--r--numpy/core/_add_newdocs.py16
-rw-r--r--numpy/core/_methods.py75
-rw-r--r--numpy/core/code_generators/generate_umath.py19
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py58
-rw-r--r--numpy/core/defchararray.py2
-rw-r--r--numpy/core/fromnumeric.py36
-rw-r--r--numpy/core/multiarray.py24
-rw-r--r--numpy/core/numeric.py62
-rw-r--r--numpy/core/overrides.py26
-rw-r--r--numpy/core/setup.py3
-rw-r--r--numpy/core/src/common/lowlevel_strided_loops.h3
-rw-r--r--numpy/core/src/common/npy_longdouble.c82
-rw-r--r--numpy/core/src/common/npy_longdouble.h10
-rw-r--r--numpy/core/src/common/npy_sort.h.src11
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src26
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.c2
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src197
-rw-r--r--numpy/core/src/multiarray/calculation.c21
-rw-r--r--numpy/core/src/multiarray/common.c37
-rw-r--r--numpy/core/src/multiarray/compiled_base.c193
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c9
-rw-r--r--numpy/core/src/multiarray/item_selection.c12
-rw-r--r--numpy/core/src/multiarray/iterators.c33
-rw-r--r--numpy/core/src/multiarray/methods.c32
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c7
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.h2
-rw-r--r--numpy/core/src/multiarray/number.c2
-rw-r--r--numpy/core/src/multiarray/number.h1
-rw-r--r--numpy/core/src/npysort/radixsort.c.src229
-rw-r--r--numpy/core/src/umath/clip.c.src119
-rw-r--r--numpy/core/src/umath/clip.h.src18
-rw-r--r--numpy/core/src/umath/fast_loop_macros.h8
-rw-r--r--numpy/core/src/umath/funcs.inc.src11
-rw-r--r--numpy/core/src/umath/loops.c.src47
-rw-r--r--numpy/core/src/umath/loops.h.src3
-rw-r--r--numpy/core/src/umath/matmul.c.src79
-rw-r--r--numpy/core/src/umath/matmul.h.src2
-rw-r--r--numpy/core/src/umath/simd.inc.src118
-rw-r--r--numpy/core/tests/test_longdouble.py26
-rw-r--r--numpy/core/tests/test_multiarray.py170
-rw-r--r--numpy/core/tests/test_numeric.py211
-rw-r--r--numpy/core/tests/test_overrides.py62
-rw-r--r--numpy/core/tests/test_ufunc.py53
-rw-r--r--numpy/core/tests/test_umath.py27
-rw-r--r--numpy/distutils/conv_template.py28
-rw-r--r--numpy/distutils/from_template.py28
-rw-r--r--numpy/distutils/line_endings.py6
-rw-r--r--numpy/distutils/system_info.py99
-rwxr-xr-xnumpy/f2py/f2py2e.py8
-rw-r--r--numpy/f2py/rules.py103
-rw-r--r--numpy/lib/format.py144
-rw-r--r--numpy/lib/function_base.py24
-rw-r--r--numpy/lib/histograms.py11
-rw-r--r--numpy/lib/nanfunctions.py25
-rw-r--r--numpy/lib/npyio.py13
-rw-r--r--numpy/lib/polynomial.py2
-rw-r--r--numpy/lib/recfunctions.py4
-rw-r--r--numpy/lib/shape_base.py7
-rw-r--r--numpy/lib/stride_tricks.py2
-rw-r--r--numpy/lib/tests/test_format.py81
-rw-r--r--numpy/lib/tests/test_function_base.py2
-rw-r--r--numpy/lib/tests/test_histograms.py10
-rw-r--r--numpy/lib/tests/test_packbits.py139
-rw-r--r--numpy/lib/tests/test_recfunctions.py9
-rw-r--r--numpy/lib/type_check.py7
-rw-r--r--numpy/linalg/lapack_lite/clapack_scrub.py5
-rw-r--r--numpy/linalg/lapack_lite/fortran.py17
-rw-r--r--numpy/linalg/linalg.py12
-rw-r--r--numpy/ma/core.py20
-rw-r--r--numpy/ma/tests/test_core.py11
-rw-r--r--numpy/ma/tests/test_old_ma.py14
-rw-r--r--numpy/ma/tests/test_regression.py4
-rw-r--r--numpy/random/mtrand/distributions.c31
-rw-r--r--numpy/random/mtrand/mtrand.pyx11
-rw-r--r--numpy/random/tests/test_random.py6
-rw-r--r--numpy/testing/_private/utils.py17
-rw-r--r--numpy/testing/tests/test_utils.py1
-rw-r--r--numpy/tests/test_warnings.py2
-rw-r--r--site.cfg.example16
-rwxr-xr-xtools/pypy-test.sh10
-rwxr-xr-x[-rw-r--r--]tools/test-installed-numpy.py4
-rwxr-xr-xtools/travis-test.sh4
97 files changed, 2901 insertions, 978 deletions
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index b6da4b772..e12eea7bd 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,7 +1,7 @@
<!-- Please be sure you are following the instructions in the dev guidelines
-http://www.numpy.org/devdocs/dev/gitwash/development_workflow.html
+http://www.numpy.org/devdocs/dev/development_workflow.html
-->
<!-- We'd appreciate it if your commit message is properly formatted
-http://www.numpy.org/devdocs/dev/gitwash/development_workflow.html#writing-the-commit-message
+http://www.numpy.org/devdocs/dev/development_workflow.html#writing-the-commit-message
-->
diff --git a/.gitignore b/.gitignore
index a31d6ea44..8e96d4154 100644
--- a/.gitignore
+++ b/.gitignore
@@ -142,6 +142,7 @@ numpy/core/src/npysort/binsearch.c
numpy/core/src/npysort/heapsort.c
numpy/core/src/npysort/mergesort.c
numpy/core/src/npysort/quicksort.c
+numpy/core/src/npysort/radixsort.c
numpy/core/src/npysort/selection.c
numpy/core/src/npysort/timsort.c
numpy/core/src/npysort/sort.c
diff --git a/README.md b/README.md
index 972c018af..f1d024565 100644
--- a/README.md
+++ b/README.md
@@ -11,11 +11,13 @@
NumPy is the fundamental package needed for scientific computing with Python.
-- **Website (including documentation):** https://www.numpy.org
+- **Website:** https://www.numpy.org
+- **Documentation:** http://docs.scipy.org/
- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion
-- **Source:** https://github.com/numpy/numpy
-- **Bug reports:** https://github.com/numpy/numpy/issues
+- **Source code:** https://github.com/numpy/numpy
- **Contributing:** https://www.numpy.org/devdocs/dev/index.html
+- **Bug reports:** https://github.com/numpy/numpy/issues
+- **Report a security vulnerability:** https://tidelift.com/docs/security
It provides:
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 6162e80ee..7454cb63e 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -5,6 +5,12 @@ trigger:
include:
- master
- maintenance/*
+variables:
+ # OpenBLAS_version should be updated
+ # to match numpy-wheels repo
+ OpenBLAS_version: 0.3.7.dev
+ TEST_GET_CONFIG: import numpy, ctypes; dll = ctypes.CDLL(numpy.core._multiarray_umath.__file__); get_config = dll.openblas_get_config; get_config.restype=ctypes.c_char_p; res = get_config(); print('OpenBLAS get_config returned', str(res)); assert b'OpenBLAS $(OpenBLAS_version)' in res
+
jobs:
- job: Linux_Python_36_32bit_full_with_asserts
pool:
@@ -20,13 +26,15 @@ jobs:
apt-get -y install gfortran-5 wget && \
cd .. && \
mkdir openblas && cd openblas && \
- wget https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.3-186-g701ea883-manylinux1_i686.tar.gz && \
- tar zxvf openblas-v0.3.3-186-g701ea883-manylinux1_i686.tar.gz && \
+ wget https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.5-274-g6a8b4269-manylinux1_i686.tar.gz && \
+ tar zxvf openblas-v0.3.5-274-g6a8b4269-manylinux1_i686.tar.gz && \
cp -r ./usr/local/lib/* /usr/lib && \
cp ./usr/local/include/* /usr/include && \
cd ../numpy && \
+ python3 -m pip install . && \
F77=gfortran-5 F90=gfortran-5 \
- CFLAGS='-UNDEBUG -std=c99' python3 runtests.py --mode=full -- -rsx --junitxml=junit/test-results.xml"
+ CFLAGS='-UNDEBUG -std=c99' python3 runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml && \
+ cd ../openblas && python3 -c \"$(TEST_GET_CONFIG)\""
displayName: 'Run 32-bit Ubuntu Docker Build / Tests'
- task: PublishTestResults@2
condition: succeededOrFailed()
@@ -75,15 +83,15 @@ jobs:
# matches our MacOS wheel builds -- currently based
# primarily on file size / name details
- script: |
- wget "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.3-186-g701ea883-macosx_10_9_intel-gf_1becaaa.tar.gz"
- tar -zxvf openblas-v0.3.3-186-g701ea883-macosx_10_9_intel-gf_1becaaa.tar.gz
+ wget "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.5-274-g6a8b4269-macosx_10_9_x86_64-gf_1becaaa.tar.gz"
+ tar -zxvf openblas-v0.3.5-274-g6a8b4269-macosx_10_9_x86_64-gf_1becaaa.tar.gz
# manually link to appropriate system paths
cp ./usr/local/lib/* /usr/local/lib/
cp ./usr/local/include/* /usr/local/include/
displayName: 'install pre-built openblas'
- script: python -m pip install --upgrade pip setuptools wheel
displayName: 'Install tools'
- - script: python -m pip install cython nose pytz pytest pickle5 vulture docutils sphinx==1.8.5 numpydoc matplotlib
+ - script: python -m pip install cython nose pytz pytest pickle5 vulture docutils sphinx==1.8.5 numpydoc
displayName: 'Install dependencies; some are optional to avoid test skips'
- script: /bin/bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/ | grep 'unreachable'"
displayName: 'Check for unreachable code paths in Python modules'
@@ -97,10 +105,16 @@ jobs:
ATLAS: None
ACCELERATE: None
CC: /usr/bin/clang
+ # wait until after dev build of NumPy to pip
+ # install matplotlib to avoid pip install of older numpy
+ - script: python -m pip install matplotlib
+ displayName: 'Install matplotlib before refguide run'
- script: python runtests.py -g --refguide-check
displayName: 'Run Refuide Check'
- - script: python runtests.py --mode=full -- -rsx --junitxml=junit/test-results.xml
+ - script: python runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml
displayName: 'Run Full NumPy Test Suite'
+ - bash: pushd . && cd .. && python -c "$(TEST_GET_CONFIG)" && popd
+ displayName: 'Verify OpenBLAS version'
- task: PublishTestResults@2
condition: succeededOrFailed()
inputs:
@@ -113,8 +127,8 @@ jobs:
variables:
# openblas URLs from numpy-wheels
# appveyor / Windows config
- OPENBLAS_32: "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.3-186-g701ea883-win32-gcc_7_1_0.zip"
- OPENBLAS_64: "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.3-186-g701ea883-win_amd64-gcc_7_1_0.zip"
+ OPENBLAS_32: "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.5-274-g6a8b4269-win32-gcc_7_1_0.zip"
+ OPENBLAS_64: "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.5-274-g6a8b4269-win_amd64-gcc_7_1_0.zip"
strategy:
maxParallel: 6
matrix:
@@ -167,7 +181,7 @@ jobs:
Write-Host "Python Version: $pyversion"
$target = "C:\\hostedtoolcache\\windows\\Python\\$pyversion\\$(PYTHON_ARCH)\\lib\\openblas.a"
Write-Host "target path: $target"
- cp $tmpdir\$(BITS)\lib\libopenblas_v0.3.3-186-g701ea883-gcc_7_1_0.a $target
+ cp $tmpdir\$(BITS)\lib\libopenblas_v0.3.5-274-g6a8b4269-gcc_7_1_0.a $target
displayName: 'Download / Install OpenBLAS'
- powershell: |
choco install -y mingw --forcex86 --force --version=5.3.0
diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py
index 194ce3218..9e409dd91 100644
--- a/benchmarks/benchmarks/bench_core.py
+++ b/benchmarks/benchmarks/bench_core.py
@@ -162,12 +162,18 @@ class UnpackBits(Benchmark):
def time_unpackbits(self):
np.unpackbits(self.d)
+ def time_unpackbits_little(self):
+ np.unpackbits(self.d, bitorder="little")
+
def time_unpackbits_axis0(self):
np.unpackbits(self.d2, axis=0)
def time_unpackbits_axis1(self):
np.unpackbits(self.d2, axis=1)
+ def time_unpackbits_axis1_little(self):
+ np.unpackbits(self.d2, bitorder="little", axis=1)
+
class Indices(Benchmark):
def time_indices(self):
diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py
index 7ea8c39b0..f730bf6ba 100644
--- a/benchmarks/benchmarks/bench_function_base.py
+++ b/benchmarks/benchmarks/bench_function_base.py
@@ -95,6 +95,17 @@ class Select(Benchmark):
np.select(self.cond_large, ([self.d, self.e] * 10))
+def memoize(f):
+ _memoized = {}
+ def wrapped(*args):
+ if args not in _memoized:
+ _memoized[args] = f(*args)
+
+ return _memoized[args].copy()
+
+ return f
+
+
class SortGenerator(object):
# The size of the unsorted area in the "random unsorted area"
# benchmarks
@@ -103,6 +114,7 @@ class SortGenerator(object):
BUBBLE_SIZE = 100
@staticmethod
+ @memoize
def random(size, dtype):
"""
Returns a randomly-shuffled array.
@@ -112,6 +124,7 @@ class SortGenerator(object):
return arr
@staticmethod
+ @memoize
def ordered(size, dtype):
"""
Returns an ordered array.
@@ -119,6 +132,7 @@ class SortGenerator(object):
return np.arange(size, dtype=dtype)
@staticmethod
+ @memoize
def reversed(size, dtype):
"""
Returns an array that's in descending order.
@@ -126,6 +140,7 @@ class SortGenerator(object):
return np.arange(size-1, -1, -1, dtype=dtype)
@staticmethod
+ @memoize
def uniform(size, dtype):
"""
Returns an array that has the same value everywhere.
@@ -133,6 +148,7 @@ class SortGenerator(object):
return np.ones(size, dtype=dtype)
@staticmethod
+ @memoize
def swapped_pair(size, dtype, swap_frac):
"""
Returns an ordered array, but one that has ``swap_frac * size``
@@ -145,6 +161,7 @@ class SortGenerator(object):
return a
@staticmethod
+ @memoize
def sorted_block(size, dtype, block_size):
"""
Returns an array with blocks that are all sorted.
@@ -159,6 +176,7 @@ class SortGenerator(object):
return np.array(b)
@classmethod
+ @memoize
def random_unsorted_area(cls, size, dtype, frac, area_size=None):
"""
This type of array has random unsorted areas such that they
@@ -176,6 +194,7 @@ class SortGenerator(object):
return a
@classmethod
+ @memoize
def random_bubble(cls, size, dtype, bubble_num, bubble_size=None):
"""
This type of array has ``bubble_num`` random unsorted areas.
@@ -197,7 +216,7 @@ class Sort(Benchmark):
# In NumPy 1.17 and newer, 'merge' can be one of several
# stable sorts, it isn't necessarily merge sort.
['quick', 'merge', 'heap'],
- ['float64', 'int64', 'uint64'],
+ ['float64', 'int64', 'int16'],
[
('random',),
('ordered',),
@@ -206,15 +225,15 @@ class Sort(Benchmark):
('sorted_block', 10),
('sorted_block', 100),
('sorted_block', 1000),
- ('swapped_pair', 0.01),
- ('swapped_pair', 0.1),
- ('swapped_pair', 0.5),
- ('random_unsorted_area', 0.5),
- ('random_unsorted_area', 0.1),
- ('random_unsorted_area', 0.01),
- ('random_bubble', 1),
- ('random_bubble', 5),
- ('random_bubble', 10),
+ # ('swapped_pair', 0.01),
+ # ('swapped_pair', 0.1),
+ # ('swapped_pair', 0.5),
+ # ('random_unsorted_area', 0.5),
+ # ('random_unsorted_area', 0.1),
+ # ('random_unsorted_area', 0.01),
+ # ('random_bubble', 1),
+ # ('random_bubble', 5),
+ # ('random_bubble', 10),
],
]
param_names = ['kind', 'dtype', 'array_type']
@@ -227,10 +246,10 @@ class Sort(Benchmark):
array_class = array_type[0]
self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:])
- def time_sort_inplace(self, kind, dtype, array_type):
- self.arr.sort(kind=kind)
-
def time_sort(self, kind, dtype, array_type):
+ # Using np.sort(...) instead of arr.sort(...) because it makes a copy.
+ # This is important because the data is prepared once per benchmark, but
+ # used across multiple runs.
np.sort(self.arr, kind=kind)
def time_argsort(self, kind, dtype, array_type):
diff --git a/doc/C_STYLE_GUIDE.rst.txt b/doc/C_STYLE_GUIDE.rst.txt
index f68abaf7c..fa1d3a77d 100644
--- a/doc/C_STYLE_GUIDE.rst.txt
+++ b/doc/C_STYLE_GUIDE.rst.txt
@@ -138,7 +138,7 @@ Code lay-out
the open paren, no spaces inside the parens, no spaces before
commas, one space after each comma.
-* Always put spaces around assignment, Boolean and comparison
+* Always put spaces around the assignment, Boolean and comparison
operators. In expressions using a lot of operators, add spaces
around the outermost (lowest priority) operators.
diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt
index a6a8fe8ab..e2aea12b7 100644
--- a/doc/HOWTO_RELEASE.rst.txt
+++ b/doc/HOWTO_RELEASE.rst.txt
@@ -5,7 +5,7 @@ Current build and release info
==============================
The current info on building and releasing NumPy and SciPy is scattered in
-several places. It should be summarized in one place, updated and where
+several places. It should be summarized in one place, updated, and where
necessary described in more detail. The sections below list all places where
useful info can be found.
@@ -37,8 +37,8 @@ Supported platforms and versions
================================
Python 2.7 and >=3.4 are the currently supported versions when building from
-source. We test numpy against all these versions every time we merge code to
-trunk. Binary installers may be available for a subset of these versions (see
+source. We test NumPy against all these versions every time we merge code to
+master. Binary installers may be available for a subset of these versions (see
below).
OS X
@@ -54,7 +54,7 @@ Windows
-------
We build 32- and 64-bit wheels for Python 2.7, 3.4, 3.5 on Windows. Windows
-XP, Vista, 7, 8 and 10 are supported. We build numpy using the MSVC compilers
+XP, Vista, 7, 8 and 10 are supported. We build NumPy using the MSVC compilers
on Appveyor, but we are hoping to update to a `mingw-w64 toolchain
<https://mingwpy.github.io>`_. The Windows wheels use ATLAS for BLAS / LAPACK.
@@ -62,7 +62,7 @@ Linux
-----
We build and ship `manylinux1 <https://www.python.org/dev/peps/pep-0513>`_
-wheels for numpy. Many Linux distributions include their own binary builds
+wheels for NumPy. Many Linux distributions include their own binary builds
of NumPy.
BSD / Solaris
@@ -93,7 +93,7 @@ each platform. At the moment this means:
* Manylinux1 wheels use the gcc provided on the Manylinux docker images.
You will need Cython for building the binaries. Cython compiles the ``.pyx``
-files in the numpy distribution to ``.c`` files.
+files in the NumPy distribution to ``.c`` files.
Building source archives and wheels
-----------------------------------
@@ -130,9 +130,9 @@ Uploading to PyPI
Generating author/pr lists
--------------------------
-You will need an personal access token
+You will need a personal access token
`<https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/>`_
-so that scripts can access the github numpy repository
+so that scripts can access the github NumPy repository.
* gitpython (pip)
* pygithub (pip)
@@ -182,8 +182,8 @@ After a date is set, create a new maintenance/x.y.z branch, add new empty
release notes for the next version in the master branch and update the Trac
Milestones.
-Make sure current trunk builds a package correctly
---------------------------------------------------
+Make sure current branch builds a package correctly
+---------------------------------------------------
::
git clean -fxd
@@ -191,7 +191,7 @@ Make sure current trunk builds a package correctly
python setup.py sdist
To actually build the binaries after everything is set up correctly, the
-release.sh script can be used. For details of the build process itself it is
+release.sh script can be used. For details of the build process itself, it is
best to read the pavement.py script.
.. note:: The following steps are repeated for the beta(s), release
@@ -233,7 +233,7 @@ There are three steps to the process.
2. If the C_API_VERSION in the first step has changed, or if the hash of
the API has changed, the cversions.txt file needs to be updated. To check
- the hash, run the script numpy/core/cversions.py and note the api hash that
+ the hash, run the script numpy/core/cversions.py and note the API hash that
is printed. If that hash does not match the last hash in
numpy/core/code_generators/cversions.txt the hash has changed. Using both
the appropriate C_API_VERSION and hash, add a new entry to cversions.txt.
@@ -244,7 +244,7 @@ There are three steps to the process.
definitive.
If steps 1 and 2 are done correctly, compiling the release should not give
- a warning "API mismatch detect at the beginning of the build.
+ a warning "API mismatch detect at the beginning of the build".
3. The numpy/core/include/numpy/numpyconfig.h will need a new
NPY_X_Y_API_VERSION macro, where X and Y are the major and minor version
@@ -271,7 +271,7 @@ Mention at least the following:
- outlook for the near future
Also make sure that as soon as the branch is made, there is a new release
-notes file in trunk for the next release.
+notes file in the master branch for the next release.
Update the release status and create a release "tag"
----------------------------------------------------
@@ -318,10 +318,10 @@ The ``-s`` flag makes a PGP (usually GPG) signed tag. Please do sign the
release tags.
The release tag should have the release number in the annotation (tag
-message). Unfortunately the name of a tag can be changed without breaking the
+message). Unfortunately, the name of a tag can be changed without breaking the
signature, the contents of the message cannot.
-See : https://github.com/scipy/scipy/issues/4919 for a discussion of signing
+See: https://github.com/scipy/scipy/issues/4919 for a discussion of signing
release tags, and https://keyring.debian.org/creating-key.html for instructions
on creating a GPG key if you do not have one.
@@ -479,9 +479,9 @@ Announce to the lists
The release should be announced on the mailing lists of
NumPy and SciPy, to python-announce, and possibly also those of
-Matplotlib,IPython and/or Pygame.
+Matplotlib, IPython and/or Pygame.
-During the beta/RC phase an explicit request for testing the binaries with
+During the beta/RC phase, an explicit request for testing the binaries with
several other libraries (SciPy/Matplotlib/Pygame) should be posted on the
mailing list.
@@ -497,5 +497,5 @@ After the final release is announced, a few administrative tasks are left to be
done:
- Forward port changes in the release branch to release notes and release
- scripts, if any, to trunk.
+ scripts, if any, to master branch.
- Update the Milestones in Trac.
diff --git a/doc/neps/nep-0018-array-function-protocol.rst b/doc/neps/nep-0018-array-function-protocol.rst
index ffe780c79..de5adeacd 100644
--- a/doc/neps/nep-0018-array-function-protocol.rst
+++ b/doc/neps/nep-0018-array-function-protocol.rst
@@ -10,6 +10,7 @@ NEP 18 — A dispatch mechanism for NumPy's high level array functions
:Status: Provisional
:Type: Standards Track
:Created: 2018-05-29
+:Updated: 2019-04-11
:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-August/078493.html
Abstact
@@ -199,6 +200,83 @@ include *all* of the corresponding NumPy function's optional arguments
Optional arguments are only passed in to ``__array_function__`` if they
were explicitly used in the NumPy function call.
+.. note::
+
+ Just like the case for builtin special methods like ``__add__``, properly
+ written ``__array_function__`` methods should always return
+ ``NotImplemented`` when an unknown type is encountered. Otherwise, it will
+ be impossible to correctly override NumPy functions from another object
+ if the operation also includes one of your objects.
+
+Avoiding nested ``__array_function__`` overrides
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The special ``__skip_array_function__`` attribute found on NumPy functions that
+support overrides with ``__array_function__`` allows for calling these
+functions without any override checks.
+
+``__skip_array_function__`` always points back to the original NumPy-array
+specific implementation of a function. These functions do not check for
+``__array_function__`` overrides, and instead usually coerce all of their
+array-like arguments to NumPy arrays.
+
+.. note::
+
+ ``__skip_array_function__`` was not included as part of the initial
+ opt-in-only preview of ``__array_function__`` in NumPy 1.16.
+
+Defaulting to NumPy's coercive implementations
+''''''''''''''''''''''''''''''''''''''''''''''
+
+Some projects may prefer to default to NumPy's implementation, rather than
+explicitly defining implementing a supported API. This allows for incrementally
+overriding NumPy's API in projects that already support it implicitly by
+allowing their objects to be converted into NumPy arrays (e.g., because they
+implemented special methods such as ``__array__``). We don't recommend this
+for most new projects ("Explicit is better than implicit"), but in some cases
+it is the most expedient option.
+
+Adapting the previous example:
+
+.. code:: python
+
+ class MyArray:
+ def __array_function__(self, func, types, args, kwargs):
+ # It is still best practice to defer to unrecognized types
+ if not all(issubclass(t, (MyArray, np.ndarray)) for t in types):
+ return NotImplemented
+
+ my_func = HANDLED_FUNCTIONS.get(func)
+ if my_func is None:
+ return func.__skip_array_function__(*args, **kwargs)
+ return my_func(*args, **kwargs)
+
+ def __array__(self, dtype):
+ # convert this object into a NumPy array
+
+Now, if a NumPy function that isn't explicitly handled is called on
+``MyArray`` object, the operation will act (almost) as if MyArray's
+``__array_function__`` method never existed.
+
+Explicitly reusing NumPy's implementation
+'''''''''''''''''''''''''''''''''''''''''
+
+``__skip_array_function__`` is also convenient for cases where an explicit
+set of NumPy functions should still use NumPy's implementation, by
+calling ``func.__skip__array_function__(*args, **kwargs)`` inside
+``__array_function__`` instead of ``func(*args, **kwargs)`` (which would
+lead to infinite recursion). For example, to explicitly reuse NumPy's
+``array_repr()`` function on a custom array type:
+
+.. code:: python
+
+ class MyArray:
+ def __array_function__(self, func, types, args, kwargs):
+ ...
+ if func is np.array_repr:
+ return np.array_repr.__skip_array_function__(*args, **kwargs)
+ ...
+
Necessary changes within the NumPy codebase itself
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -300,6 +378,13 @@ In particular:
- If all ``__array_function__`` methods return ``NotImplemented``,
NumPy will raise ``TypeError``.
+If no ``__array_function__`` methods exist, NumPy will default to calling its
+own implementation, intended for use on NumPy arrays. This case arises, for
+example, when all array-like arguments are Python numbers or lists.
+(NumPy arrays do have a ``__array_function__`` method, given below, but it
+always returns ``NotImplemented`` if any argument other than a NumPy array
+subclass implements ``__array_function__``.)
+
One deviation from the current behavior of ``__array_ufunc__`` is that NumPy
will only call ``__array_function__`` on the *first* argument of each unique
type. This matches Python's
@@ -310,31 +395,41 @@ between these two dispatch protocols, we should
`also update <https://github.com/numpy/numpy/issues/11306>`_
``__array_ufunc__`` to match this behavior.
-Special handling of ``numpy.ndarray``
-'''''''''''''''''''''''''''''''''''''
+The ``__array_function__`` method on ``numpy.ndarray``
+''''''''''''''''''''''''''''''''''''''''''''''''''''''
The use cases for subclasses with ``__array_function__`` are the same as those
-with ``__array_ufunc__``, so ``numpy.ndarray`` should also define a
-``__array_function__`` method mirroring ``ndarray.__array_ufunc__``:
+with ``__array_ufunc__``, so ``numpy.ndarray`` also defines a
+``__array_function__`` method.
+
+``ndarray.__array_function__`` is a trivial case of the "Defaulting to NumPy's
+implementation" strategy described above: *every* NumPy function on NumPy
+arrays is defined by calling NumPy's own implementation if there are other
+overrides:
.. code:: python
def __array_function__(self, func, types, args, kwargs):
- # Cannot handle items that have __array_function__ other than our own.
- for t in types:
- if (hasattr(t, '__array_function__') and
- t.__array_function__ is not ndarray.__array_function__):
- return NotImplemented
-
- # Arguments contain no overrides, so we can safely call the
- # overloaded function again.
- return func(*args, **kwargs)
-
-To avoid infinite recursion, the dispatch rules for ``__array_function__`` need
-also the same special case they have for ``__array_ufunc__``: any arguments with
-an ``__array_function__`` method that is identical to
-``numpy.ndarray.__array_function__`` are not be called as
-``__array_function__`` implementations.
+ if not all(issubclass(t, ndarray) for t in types):
+ # Defer to any non-subclasses that implement __array_function__
+ return NotImplemented
+ return func.__skip_array_function__(*args, **kwargs)
+
+This method matches NumPy's dispatching rules, so for most part it is
+possible to pretend that ``ndarray.__array_function__`` does not exist.
+
+The ``__array_function__`` protocol always calls subclasses before
+superclasses, so if any ``ndarray`` subclasses are involved in an operation,
+they will get the chance to override it, just as if any other argument
+overrides ``__array_function__``. However, the default behavior in an operation
+that combines a base NumPy array and a subclass is different: if the subclass
+returns ``NotImplemented``, NumPy's implementation of the function will be
+called instead of raising an exception. This is appropriate since subclasses
+are `expected to be substitutable <https://en.wikipedia.org/wiki/Liskov_substitution_principle>`_.
+
+Notice that the ``__skip_array_function__`` function attribute allows us
+to avoid the special cases for NumPy arrays that were needed in the
+``__array_ufunc__`` protocol.
Changes within NumPy functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -347,12 +442,12 @@ without effect if no arguments implement the ``__array_function__``
protocol.
In most cases, these functions should written using the
-``array_function_dispatch`` decorator, which also associates dispatcher
-functions:
+``array_function_dispatch`` decorator. Error checking aside, here's what the
+core implementation looks like:
.. code:: python
- def array_function_dispatch(dispatcher):
+ def array_function_dispatch(dispatcher, module=None):
"""Wrap a function for dispatch with the __array_function__ protocol."""
def decorator(implementation):
@functools.wraps(implementation)
@@ -360,58 +455,76 @@ functions:
relevant_args = dispatcher(*args, **kwargs)
return implement_array_function(
implementation, public_api, relevant_args, args, kwargs)
+ if module is not None:
+ public_api.__module__ = module
+ public_api.__skip_array_function__ = implementation
return public_api
return decorator
# example usage
- def _broadcast_to_dispatcher(array, shape, subok=None):
+ def broadcast_to(array, shape, subok=None):
return (array,)
- @array_function_dispatch(_broadcast_to_dispatcher)
+ @array_function_dispatch(broadcast_to, module='numpy')
def broadcast_to(array, shape, subok=False):
... # existing definition of np.broadcast_to
Using a decorator is great! We don't need to change the definitions of
existing NumPy functions, and only need to write a few additional lines
-for the dispatcher function. We could even reuse a single dispatcher for
-families of functions with the same signature (e.g., ``sum`` and ``prod``).
-For such functions, the largest change could be adding a few lines to the
-docstring to note which arguments are checked for overloads.
+to define dispatcher function. We originally thought that we might want to
+implement dispatching for some NumPy functions without the decorator, but
+so far it seems to cover every case.
-It's particularly worth calling out the decorator's use of
+Within NumPy's implementation, it's worth calling out the decorator's use of
``functools.wraps``:
- This ensures that the wrapped function has the same name and docstring as
the wrapped NumPy function.
- On Python 3, it also ensures that the decorator function copies the original
function signature, which is important for introspection based tools such as
- auto-complete. If we care about preserving function signatures on Python 2,
- for the `short while longer <http://www.numpy.org/neps/nep-0014-dropping-python2.7-proposal.html>`_
- that NumPy supports Python 2.7, we do could do so by adding a vendored
- dependency on the (single-file, BSD licensed)
- `decorator library <https://github.com/micheles/decorator>`_.
+ auto-complete.
- Finally, it ensures that the wrapped function
`can be pickled <http://gael-varoquaux.info/programming/decoration-in-python-done-right-decorating-and-pickling.html>`_.
-In a few cases, it would not make sense to use the ``array_function_dispatch``
-decorator directly, but override implementation in terms of
-``implement_array_function`` should still be straightforward.
-
-- Functions written entirely in C (e.g., ``np.concatenate``) can't use
- decorators, but they could still use a C equivalent of
- ``implement_array_function``. If performance is not a
- concern, they could also be easily wrapped with a small Python wrapper.
-- ``np.einsum`` does complicated argument parsing to handle two different
- function signatures. It would probably be best to avoid the overhead of
- parsing it twice in the typical case of no overrides.
-
-Fortunately, in each of these cases so far, the functions already has a generic
-signature of the form ``*args, **kwargs``, which means we don't need to worry
-about potential inconsistency between how functions are called and what we pass
-to ``__array_function__``. (In C, arguments for all Python functions are parsed
-from a tuple ``*args`` and dict ``**kwargs``.) This shouldn't stop us from
-writing overrides for functions with non-generic signatures that can't use the
-decorator, but we should consider these cases carefully.
+The example usage illustrates several best practices for writing dispatchers
+relevant to NumPy contributors:
+
+- We gave the "dispatcher" function ``broadcast_to`` the exact same name and
+ arguments as the "implementation" function. The matching arguments are
+ required, because the function generated by ``array_function_dispatch`` will
+ call the dispatcher in *exactly* the same way as it was called. The matching
+ function name isn't strictly necessary, but ensures that Python reports the
+ original function name in error messages if invalid arguments are used, e.g.,
+ ``TypeError: broadcast_to() got an unexpected keyword argument``.
+
+- We passed the ``module`` argument, which in turn sets the ``__module__``
+ attribute on the generated function. This is for the benefit of better error
+ messages, here for errors raised internally by NumPy when no implementation
+ is found, e.g.,
+ ``TypeError: no implementation found for 'numpy.broadcast_to'``. Setting
+ ``__module__`` to the canonical location in NumPy's public API encourages
+ users to use NumPy's public API for identifying functions in
+ ``__array_function__``.
+
+- The dispatcher is a function that returns a tuple, rather than an equivalent
+ (and equally valid) generator using ``yield``:
+
+ .. code:: python
+
+ # example usage
+ def broadcast_to(array, shape, subok=None):
+ yield array
+
+ This is no accident: NumPy's implementation of dispatch for
+ ``__array_function__`` is fastest when dispatcher functions return a builtin
+ sequence type (``tuple`` or ``list``).
+
+ On a related note, it's perfectly fine for dispatchers to return arguments
+ even if in some cases you *know* that they cannot have an
+ ``__array_function__`` method. This can arise for functions with default
+ arguments (e.g., ``None``) or complex signatures. NumPy's dispatching logic
+ sorts out these cases very quickly, so it generally is not worth the trouble
+ of parsing them on your own.
.. note::
@@ -426,10 +539,10 @@ An important virtue of this approach is that it allows for adding new
optional arguments to NumPy functions without breaking code that already
relies on ``__array_function__``.
-This is not a theoretical concern. The implementation of overrides *within*
-functions like ``np.sum()`` rather than defining a new function capturing
-``*args`` and ``**kwargs`` necessitated some awkward gymnastics to ensure that
-the new ``keepdims`` argument is only passed in cases where it is used, e.g.,
+This is not a theoretical concern. NumPy's older, haphazard implementation of
+overrides *within* functions like ``np.sum()`` necessitated some awkward
+gymnastics when we decided to add new optional arguments, e.g., the new
+``keepdims`` argument is only passed in cases where it is used:
.. code:: python
@@ -439,11 +552,12 @@ the new ``keepdims`` argument is only passed in cases where it is used, e.g.,
kwargs['keepdims'] = keepdims
return array.sum(..., **kwargs)
-This also makes it possible to add optional arguments to ``__array_function__``
-implementations incrementally and only in cases where it makes sense. For
-example, a library implementing immutable arrays would not be required to
-explicitly include an unsupported ``out`` argument. Doing this properly for all
-optional arguments is somewhat onerous, e.g.,
+For ``__array_function__`` implementors, this also means that it is possible
+to implement even existing optional arguments incrementally, and only in cases
+where it makes sense. For example, a library implementing immutable arrays
+would not be required to explicitly include an unsupported ``out`` argument in
+the function signature. This can be somewhat onerous to implement properly,
+e.g.,
.. code:: python
@@ -486,6 +600,36 @@ concerned about performance differences measured in microsecond(s) on NumPy
functions, because it's difficult to do *anything* in Python in less than a
microsecond.
+For rare cases where NumPy functions are called in performance critical inner
+loops on small arrays or scalars, it is possible to avoid the overhead of
+dispatching by calling the versions of NumPy functions skipping
+``__array_function__`` checks available in the ``__skip_array_function__``
+attribute. For example:
+
+.. code:: python
+
+ dot = getattr(np.dot, '__skip_array_function__', np.dot)
+
+ def naive_matrix_power(x, n):
+ x = np.array(x)
+ for _ in range(n):
+ dot(x, x, out=x)
+ return x
+
+NumPy will use this internally to minimize overhead for NumPy functions
+defined in terms of other NumPy functions, but
+**we do not recommend it for most users**:
+
+- The specific implementation of overrides is still provisional, so the
+ ``__skip_array_function__`` attribute on particular functions could be
+ removed in any NumPy release without warning.
+ For this reason, access to ``__skip_array_function__`` attribute outside of
+ ``__array_function__`` methods should *always* be guarded by using
+ ``getattr()`` with a default value.
+- In cases where this makes a difference, you will get far greater speed-ups
+ rewriting your inner loops in a compiled language, e.g., with Cython or
+ Numba.
+
Use outside of NumPy
~~~~~~~~~~~~~~~~~~~~
@@ -553,7 +697,7 @@ Backward compatibility
----------------------
This proposal does not change existing semantics, except for those arguments
-that currently have ``__array_function__`` methods, which should be rare.
+that currently have ``__array_function__`` attributes, which should be rare.
Alternatives
@@ -631,7 +775,7 @@ would be straightforward to write a shim for a default
Implementations in terms of a limited core API
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The internal implementations of some NumPy functions is extremely simple.
+The internal implementation of some NumPy functions is extremely simple.
For example:
- ``np.stack()`` is implemented in only a few lines of code by combining
@@ -665,36 +809,32 @@ nearly every public function in NumPy's API. This does not preclude the future
possibility of rewriting NumPy functions in terms of simplified core
functionality with ``__array_function__`` and a protocol and/or base class for
ensuring that arrays expose methods and properties like ``numpy.ndarray``.
-However, to work well this would require the possibility of implementing
-*some* but not all functions with ``__array_function__``, e.g., as described
-in the next section.
Coercion to a NumPy array as a catch-all fallback
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
With the current design, classes that implement ``__array_function__``
-to overload at least one function implicitly declare an intent to
-implement the entire NumPy API. It's not possible to implement *only*
-``np.concatenate()`` on a type, but fall back to NumPy's default
-behavior of casting with ``np.asarray()`` for all other functions.
-
-This could present a backwards compatibility concern that would
-discourage libraries from adopting ``__array_function__`` in an
-incremental fashion. For example, currently most numpy functions will
-implicitly convert ``pandas.Series`` objects into NumPy arrays, behavior
-that assuredly many pandas users rely on. If pandas implemented
-``__array_function__`` only for ``np.concatenate``, unrelated NumPy
-functions like ``np.nanmean`` would suddenly break on pandas objects by
-raising TypeError.
-
-With ``__array_ufunc__``, it's possible to alleviate this concern by
-casting all arguments to numpy arrays and re-calling the ufunc, but the
-heterogeneous function signatures supported by ``__array_function__``
-make it impossible to implement this generic fallback behavior for
-``__array_function__``.
-
-We could resolve this issue by change the handling of return values in
-``__array_function__`` in either of two possible ways:
+to overload at least one function can opt-out of overriding other functions
+by using the ``__skip_array_function__`` function, as described above under
+"Defaulting to NumPy's implementation."
+
+However, this still results in different behavior than not implementing
+``__array_function__`` in at least one edge case. If multiple objects implement
+``__array_function__`` but don't know about each other NumPy will raise
+``TypeError`` if all methods return ``NotImplemented``, whereas if no arguments
+defined ``__array_function__`` methods it would attempt to coerce all of them
+to NumPy arrays.
+
+Alternatively, this could be "fixed" by writing a ``__array_function__``
+method that always calls ``__skip_array_function__()`` instead of returning
+``NotImplemented`` for some functions, but that would result in a type
+whose implementation cannot be overriden by over argumetns -- like NumPy
+arrays themselves prior to the introduction of this protocol.
+
+Either way, it is not possible to *exactly* maintain the current behavior of
+all NumPy functions if at least one more function is overriden. If preserving
+this behavior is important, we could potentially solve it by changing the
+handling of return values in ``__array_function__`` in either of two ways:
1. Change the meaning of all arguments returning ``NotImplemented`` to indicate
that all arguments should be coerced to NumPy arrays and the operation
@@ -710,21 +850,23 @@ We could resolve this issue by change the handling of return values in
preclude reliable use of NumPy's high level API on these objects.
2. Use another sentinel value of some sort, e.g.,
``np.NotImplementedButCoercible``, to indicate that a class implementing part
- of NumPy's higher level array API is coercible as a fallback. This is a more
- appealing option.
-
-With either approach, we would need to define additional rules for *how*
-coercible array arguments are coerced. The only sane rule would be to treat
-these return values as equivalent to not defining an
-``__array_function__`` method at all, which means that NumPy functions would
-fall-back to their current behavior of coercing all array-like arguments.
-
-It is not yet clear to us yet if we need an optional like
-``NotImplementedButCoercible``, so for now we propose to defer this issue.
-We can always implement ``np.NotImplementedButCoercible`` at some later time if
-it proves critical to the NumPy community in the future. Importantly, we don't
-think this will stop critical libraries that desire to implement most of the
-high level NumPy API from adopting this proposal.
+ of NumPy's higher level array API is coercible as a fallback. If all
+ arguments return ``NotImplementedButCoercible``, arguments would be coerced
+ and the operation would be retried.
+
+ Unfortunately, correct behavior after encountering
+ ``NotImplementedButCoercible`` is not always obvious. Particularly
+ challenging is the "mixed" case where some arguments return
+ ``NotImplementedButCoercible`` and others return ``NotImplemented``.
+ Would dispatching be retried after only coercing the "coercible" arguments?
+ If so, then conceivably we could end up looping through the dispatching
+ logic an arbitrary number of times. Either way, the dispatching rules would
+ definitely get more complex and harder to reason about.
+
+At present, neither of these alternatives looks like a good idea. Reusing
+``__skip_array_function__()`` looks like it should suffice for most purposes.
+Arguably this loss in flexibility is a virtue: fallback implementations often
+result in unpredictable and undesired behavior.
A magic decorator that inspects type annotations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -823,7 +965,8 @@ There are two other arguments that we think *might* be important to pass to
- Access to the non-dispatched implementation (i.e., before wrapping with
``array_function_dispatch``) in ``ndarray.__array_function__`` would allow
us to drop special case logic for that method from
- ``implement_array_function``.
+ ``implement_array_function``. *Update: This has been implemented, as the
+ ``__skip_array_function__`` attributes.*
- Access to the ``dispatcher`` function passed into
``array_function_dispatch()`` would allow ``__array_function__``
implementations to determine the list of "array-like" arguments in a generic
@@ -860,7 +1003,7 @@ a descriptor.
Given the complexity and the limited use cases, we are also deferring on this
issue for now, but we are confident that ``__array_function__`` could be
-expanded to accomodate these use cases in the future if need be.
+expanded to accommodate these use cases in the future if need be.
Discussion
----------
@@ -877,7 +1020,7 @@ it was discussed at a `NumPy developer sprint
Berkeley Institute for Data Science (BIDS) <https://bids.berkeley.edu/>`_.
Detailed discussion of this proposal itself can be found on the
-`the mailing list <https://mail.python.org/pipermail/numpy-discussion/2018-June/078127.html>`_ and relvant pull requests
+`the mailing list <https://mail.python.org/pipermail/numpy-discussion/2018-June/078127.html>`_ and relevant pull requests
(`1 <https://github.com/numpy/numpy/pull/11189>`_,
`2 <https://github.com/numpy/numpy/pull/11303#issuecomment-396638175>`_,
`3 <https://github.com/numpy/numpy/pull/11374>`_)
diff --git a/doc/neps/nep-0019-rng-policy.rst b/doc/neps/nep-0019-rng-policy.rst
index f50897b0f..46dcbd0d7 100644
--- a/doc/neps/nep-0019-rng-policy.rst
+++ b/doc/neps/nep-0019-rng-policy.rst
@@ -6,6 +6,7 @@ NEP 19 — Random Number Generator Policy
:Status: Accepted
:Type: Standards Track
:Created: 2018-05-24
+:Updated: 2019-05-21
:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-June/078126.html
Abstract
@@ -91,7 +92,8 @@ those contributors simply walked away.
Implementation
--------------
-Work on a proposed new PRNG subsystem is already underway in the randomgen_
+Work on a proposed new Pseudo Random Number Generator (PRNG) subsystem is
+already underway in the randomgen_
project. The specifics of the new design are out of scope for this NEP and up
for much discussion, but we will discuss general policies that will guide the
evolution of whatever code is adopted. We will also outline just a few of the
@@ -119,37 +121,38 @@ Gaussian variate generation to the faster `Ziggurat algorithm
discouraged improvement would be tweaking the Ziggurat tables just a little bit
for a small performance improvement.
-Any new design for the RNG subsystem will provide a choice of different core
+Any new design for the random subsystem will provide a choice of different core
uniform PRNG algorithms. A promising design choice is to make these core
uniform PRNGs their own lightweight objects with a minimal set of methods
-(randomgen_ calls them “basic RNGs”). The broader set of non-uniform
+(randomgen_ calls them “BitGenerators”). The broader set of non-uniform
distributions will be its own class that holds a reference to one of these core
uniform PRNG objects and simply delegates to the core uniform PRNG object when
-it needs uniform random numbers. To borrow an example from randomgen_, the
-class ``MT19937`` is a basic RNG that implements the classic Mersenne Twister
-algorithm. The class ``RandomGenerator`` wraps around the basic RNG to provide
+it needs uniform random numbers (randomgen_ calls this the Generator). To
+borrow an example from randomgen_, the
+class ``MT19937`` is a BitGenerator that implements the classic Mersenne Twister
+algorithm. The class ``Generator`` wraps around the BitGenerator to provide
all of the non-uniform distribution methods::
# This is not the only way to instantiate this object.
# This is just handy for demonstrating the delegation.
- >>> brng = MT19937(seed)
- >>> rg = RandomGenerator(brng)
+ >>> bg = MT19937(seed)
+ >>> rg = Generator(bg)
>>> x = rg.standard_normal(10)
-We will be more strict about a select subset of methods on these basic RNG
+We will be more strict about a select subset of methods on these BitGenerator
objects. They MUST guarantee stream-compatibility for a specified set
of methods which are chosen to make it easier to compose them to build other
distributions and which are needed to abstract over the implementation details
of the variety of core PRNG algorithms. Namely,
* ``.bytes()``
- * ``.random_uintegers()``
- * ``.random_sample()``
+ * ``integers`` (formerly ``.random_uintegers()``)
+ * ``random`` (formerly ``.random_sample()``)
-The distributions class (``RandomGenerator``) SHOULD have all of the same
+The distributions class (``Generator``) SHOULD have all of the same
distribution methods as ``RandomState`` with close-enough function signatures
such that almost all code that currently works with ``RandomState`` instances
-will work with ``RandomGenerator`` instances (ignoring the precise stream
+will work with ``Generator`` instances (ignoring the precise stream
values). Some variance will be allowed for integer distributions: in order to
avoid some of the cross-platform problems described above, these SHOULD be
rewritten to work with ``uint64`` numbers on all platforms.
@@ -183,9 +186,10 @@ reproducible across numpy versions.
This legacy distributions class MUST be accessible under the name
``numpy.random.RandomState`` for backwards compatibility. All current ways of
instantiating ``numpy.random.RandomState`` with a given state should
-instantiate the Mersenne Twister basic RNG with the same state. The legacy
-distributions class MUST be capable of accepting other basic RNGs. The purpose
-here is to ensure that one can write a program with a consistent basic RNG
+instantiate the Mersenne Twister BitGenerator with the same state. The legacy
+distributions class MUST be capable of accepting other BitGenerators. The
+purpose
+here is to ensure that one can write a program with a consistent BitGenerator
state with a mixture of libraries that may or may not have upgraded from
``RandomState``. Instances of the legacy distributions class MUST respond
``True`` to ``isinstance(rg, numpy.random.RandomState)`` because there is
@@ -209,27 +213,27 @@ consistently and usefully, but a very common usage is in unit tests where many
of the problems of global state are less likely.
This NEP does not propose removing these functions or changing them to use the
-less-stable ``RandomGenerator`` distribution implementations. Future NEPs
+less-stable ``Generator`` distribution implementations. Future NEPs
might.
Specifically, the initial release of the new PRNG subsystem SHALL leave these
convenience functions as aliases to the methods on a global ``RandomState``
-that is initialized with a Mersenne Twister basic RNG object. A call to
-``numpy.random.seed()`` will be forwarded to that basic RNG object. In
+that is initialized with a Mersenne Twister BitGenerator object. A call to
+``numpy.random.seed()`` will be forwarded to that BitGenerator object. In
addition, the global ``RandomState`` instance MUST be accessible in this
initial release by the name ``numpy.random.mtrand._rand``: Robert Kern long ago
promised ``scikit-learn`` that this name would be stable. Whoops.
-In order to allow certain workarounds, it MUST be possible to replace the basic
-RNG underneath the global ``RandomState`` with any other basic RNG object (we
-leave the precise API details up to the new subsystem). Calling
+In order to allow certain workarounds, it MUST be possible to replace the
+BitGenerator underneath the global ``RandomState`` with any other BitGenerator
+object (we leave the precise API details up to the new subsystem). Calling
``numpy.random.seed()`` thereafter SHOULD just pass the given seed to the
-current basic RNG object and not attempt to reset the basic RNG to the Mersenne
-Twister. The set of ``numpy.random.*`` convenience functions SHALL remain the
-same as they currently are. They SHALL be aliases to the ``RandomState``
-methods and not the new less-stable distributions class (``RandomGenerator``,
-in the examples above). Users who want to get the fastest, best distributions
-can follow best practices and instantiate generator objects explicitly.
+current BitGenerator object and not attempt to reset the BitGenerator to the
+Mersenne Twister. The set of ``numpy.random.*`` convenience functions SHALL
+remain the same as they currently are. They SHALL be aliases to the
+``RandomState`` methods and not the new less-stable distributions class
+(``Generator``, in the examples above). Users who want to get the fastest, best
+distributions can follow best practices and instantiate generator objects explicitly.
This NEP does not propose that these requirements remain in perpetuity. After
we have experience with the new PRNG subsystem, we can and should revisit these
@@ -298,8 +302,8 @@ positive improvement to the downstream project, just avoiding being broken.
Furthermore, under this old proposal, we would have had a quite lengthy
deprecation period where ``RandomState`` existed alongside the new system of
-basic RNGs and distribution classes. Leaving the implementation of
-``RandomState`` fixed meant that it could not use the new basic RNG state
+BitGenerator and Generator classes. Leaving the implementation of
+``RandomState`` fixed meant that it could not use the new BitGenerator state
objects. Developing programs that use a mixture of libraries that have and
have not upgraded would require managing two sets of PRNG states. This would
notionally have been time-limited, but we intended the deprecation to be very
@@ -308,9 +312,9 @@ long.
The current proposal solves all of these problems. All current usages of
``RandomState`` will continue to work in perpetuity, though some may be
discouraged through documentation. Unit tests can continue to use the full
-complement of ``RandomState`` methods. Mixed ``RandomState/RandomGenerator``
-code can safely share the common basic RNG state. Unmodified ``RandomState``
-code can make use of the new features of alternative basic RNGs like settable
+complement of ``RandomState`` methods. Mixed ``RandomState/Generator``
+code can safely share the common BitGenerator state. Unmodified ``RandomState``
+code can make use of the new features of alternative BitGenerator-like settable
streams.
diff --git a/doc/release/1.17.0-notes.rst b/doc/release/1.17.0-notes.rst
index 71ad17673..5a27b8046 100644
--- a/doc/release/1.17.0-notes.rst
+++ b/doc/release/1.17.0-notes.rst
@@ -33,6 +33,16 @@ The internal use of these functions has been refactored and there are better
alternatives. Relace ``exec_command`` with `subprocess.Popen` and
``temp_file_name`` with `tempfile.mkstemp`.
+Writeable flag of C-API wrapped arrays
+--------------------------------------
+When an array is created from the C-API to wrap a pointer to data, the only
+indication we have of the read-write nature of the data is the ``writeable``
+flag set during creation. It is dangerous to force the flag to writeable.
+In the future it will not be possible to switch the writeable flag to ``True``
+from python.
+This deprecation should not affect many users since arrays created in such
+a manner are very rare in practice and only available through the NumPy C-API.
+
Future Changes
==============
@@ -66,6 +76,16 @@ zero::
>>> np.zeros(10)//1
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
+``MaskedArray.mask`` now returns a view of the mask, not the mask itself
+------------------------------------------------------------------------
+Returning the mask itself was unsafe, as it could be reshaped in place which
+would violate expectations of the masked array code. It's behavior is now
+consistent with the ``.data`` attribute, which also returns a view.
+
+The underlying mask can still be accessed with ``._mask`` if it is needed.
+Tests that contain ``assert x.mask is not y.mask`` or similar will need to be
+updated.
+
Do not lookup ``__buffer__`` attribute in `numpy.frombuffer`
------------------------------------------------------------
Looking up ``__buffer__`` attribute in `numpy.frombuffer` was undocumented and
@@ -83,6 +103,17 @@ The functions ``np.load``, and ``np.lib.format.read_array`` take an
`allow_pickle` keyword which now defaults to ``False`` in response to
`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
+Potential changes to the random stream
+--------------------------------------
+Due to bugs in the application of log to random floating point numbers,
+the stream may change when sampling from ``np.random.beta``, ``np.random.binomial``,
+``np.random.laplace``, ``np.random.logistic``, ``np.random.logseries`` or
+``np.random.multinomial`` if a 0 is generated in the underlying MT19937 random stream.
+There is a 1 in :math:`10^{53}` chance of this occurring, and so the probability that
+the stream changes for any given seed is extremely small. If a 0 is encountered in the
+underlying generator, then the incorrect value produced (either ``np.inf``
+or ``np.nan``) is now dropped.
+
C API changes
=============
@@ -90,6 +121,38 @@ C API changes
New Features
============
+libFLAME
+--------
+Support for building NumPy with the libFLAME linear algebra package as the LAPACK,
+implementation, see
+`libFLAME <https://www.cs.utexas.edu/~flame/web/libFLAME.html>`_ for details.
+
+User-defined BLAS detection order
+---------------------------------
+``numpy.distutils`` now uses an environment variable, comma-separated and case
+insensitive, to determine the detection order for BLAS libraries.
+By default ``NPY_BLAS_ORDER=mkl,blis,openblas,atlas,accelerate,blas``.
+However, to force the use of OpenBLAS simply do::
+
+ NPY_BLAS_ORDER=openblas python setup.py build
+
+which forces the use of OpenBLAS.
+This may be helpful for users which have a MKL installation but wishes to try
+out different implementations.
+
+User-defined LAPACK detection order
+-----------------------------------
+``numpy.distutils`` now uses an environment variable, comma-separated and case
+insensitive, to determine the detection order for LAPAK libraries.
+By default ``NPY_BLAS_ORDER=mkl,openblas,flame,atlas,accelerate,lapack``.
+However, to force the use of OpenBLAS simply do::
+
+ NPY_LAPACK_ORDER=openblas python setup.py build
+
+which forces the use of OpenBLAS.
+This may be helpful for users which have a MKL installation but wishes to try
+out different implementations.
+
``np.ufunc.reduce`` and related functions now accept a ``where`` mask
---------------------------------------------------------------------
``np.ufunc.reduce``, ``np.sum``, ``np.prod``, ``np.min``, ``np.max`` all
@@ -110,6 +173,9 @@ random data. The algorithm is stable and requires O(n/2) working space. For
details of the algorithm, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
+In addition, for very small dtypes, radix sort is used instead of timsort. In
+general, we attempt to use the fastest possible implementation.
+
``np.unpackbits`` now accepts a ``count`` parameter
---------------------------------------------------
``count`` allows subsetting the number of bits that will be unpacked up-front,
@@ -149,6 +215,17 @@ Floating point scalars implement ``as_integer_ratio`` to match the builtin float
This returns a (numerator, denominator) pair, which can be used to construct a
`fractions.Fraction`.
+``.npy`` files support unicode field names
+------------------------------------------
+A new format version of 3.0 has been introduced, which enables structured types
+with non-latin1 field names. This is used automatically when needed.
+
+`numpy.packbits` and `numpy.unpackbits` accept an ``order`` keyword
+-------------------------------------------------------------------
+The ``order`` keyword defaults to ``big``, and will order the **bits**
+accordingly. For ``'big'`` 3 will become ``[0, 0, 0, 0, 0, 0, 1, 1]``, and
+``[1, 1, 0, 0, 0, 0, 0, 0]`` for ``little``
+
Improvements
============
@@ -171,6 +248,14 @@ maintains `O(N log N)` run time complexity instead of deteriorating towards
`O(N*N)` for prime lengths. Also, accuracy for real-valued FFTs with near-prime
lengths has improved and is on par with complex-valued FFTs.
+Performance improvements for integer sorts
+------------------------------------------
+
+``sort``, ``argsort``, ``ndarray.sort`` and ``ndarray.argsort`` now use radix
+sort as the default stable sort for integers and booleans. This is faster than
+the old default, mergesort, in the vast majority of cases.
+
+
Further improvements to ``ctypes`` support in ``np.ctypeslib``
--------------------------------------------------------------
A new `numpy.ctypeslib.as_ctypes_type` function has been added, which can be
@@ -252,6 +337,15 @@ methods when called on object arrays, making them compatible with
In general, this handles object arrays more gracefully, and avoids floating-
point operations if exact arithmetic types are used.
+Support of object arrays in ``np.matmul``
+-----------------------------------------
+It is now possible to use ``np.matmul`` (or the ``@`` operator) with object arrays.
+For instance, it is now possible to do::
+
+ from fractions import Fraction
+ a = np.array([[Fraction(1, 2), Fraction(1, 3)], [Fraction(1, 3), Fraction(1, 2)]])
+ b = a @ a
+
Changes
=======
@@ -282,6 +376,31 @@ was accidental. The old behavior can be retained with
``structured_to_unstructured(arr[['a']]).squeeze(axis=-1)`` or far more simply,
``arr['a']``.
+``clip`` now uses a ufunc under the hood
+----------------------------------------
+This means that registering clip functions for custom dtypes in C via
+`descr->f->fastclip` is deprecated - they should use the ufunc registration
+mechanism instead, attaching to the ``np.core.umath.clip`` ufunc.
+
+It also means that ``clip`` accepts ``where`` and ``casting`` arguments,
+and can be override with ``__array_ufunc__``.
+
+A consequence of this change is that some behaviors of the old ``clip`` have
+been deprecated:
+
+* Passing ``nan`` to mean "do not clip" as one or both bounds. This didn't work
+ in all cases anyway, and can be better handled by passing infinities of the
+ appropriate sign.
+* Using "unsafe" casting by default when an ``out`` argument is passed. Using
+ ``casting="unsafe"`` explicitly will silence this warning.
+
+Additionally, there are some corner cases with behavior changes:
+
+* Padding ``max < min`` has changed to be more consistent across dtypes, but
+ should not be relied upon.
+* Scalar ``min`` and ``max`` take part in promotion rules like they do in all
+ other ufuncs.
+
``__array_interface__`` offset now works as documented
------------------------------------------------------
The interface may use an ``offset`` value that was mistakenly ignored.
diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst
index 7d6942f75..aeb55ca03 100644
--- a/doc/source/reference/c-api.array.rst
+++ b/doc/source/reference/c-api.array.rst
@@ -291,9 +291,14 @@ From scratch
.. c:function:: PyObject* PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
Create a new uninitialized array of type, *typenum*, whose size in
- each of *nd* dimensions is given by the integer array, *dims*.
- This function cannot be used to create a flexible-type array (no
- itemsize given).
+ each of *nd* dimensions is given by the integer array, *dims*.The memory
+ for the array is uninitialized (unless typenum is :c:data:`NPY_OBJECT`
+ in which case each element in the array is set to NULL). The
+ *typenum* argument allows specification of any of the builtin
+ data-types such as :c:data:`NPY_FLOAT` or :c:data:`NPY_LONG`. The
+ memory for the array can be set to zero if desired using
+ :c:func:`PyArray_FILLWBYTE` (return_object, 0).This function cannot be
+ used to create a flexible-type array (no itemsize given).
.. c:function:: PyObject* PyArray_SimpleNewFromData( \
int nd, npy_intp* dims, int typenum, void* data)
@@ -302,7 +307,13 @@ From scratch
pointer. The array flags will have a default that the data area is
well-behaved and C-style contiguous. The shape of the array is
given by the *dims* c-array of length *nd*. The data-type of the
- array is indicated by *typenum*.
+ array is indicated by *typenum*. If data comes from another
+ reference-counted Python object, the reference count on this object
+ should be increased after the pointer is passed in, and the base member
+ of the returned ndarray should point to the Python object that owns
+ the data. This will ensure that the provided memory is not
+ freed while the returned array is in existence. To free memory as soon
+ as the ndarray is deallocated, set the OWNDATA flag on the returned ndarray.
.. c:function:: PyObject* PyArray_SimpleNewFromDescr( \
int nd, npy_intp* dims, PyArray_Descr* descr)
diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst
index a9ec496c5..a13e1160a 100644
--- a/doc/source/user/building.rst
+++ b/doc/source/user/building.rst
@@ -155,9 +155,10 @@ The default order for the libraries are:
1. MKL
2. OpenBLAS
-3. ATLAS
-4. Accelerate (MacOS)
-5. LAPACK (NetLIB)
+3. libFLAME
+4. ATLAS
+5. Accelerate (MacOS)
+6. LAPACK (NetLIB)
If you wish to build against OpenBLAS but you also have MKL available one
diff --git a/doc/source/user/c-info.how-to-extend.rst b/doc/source/user/c-info.how-to-extend.rst
index b70036c89..3961325fb 100644
--- a/doc/source/user/c-info.how-to-extend.rst
+++ b/doc/source/user/c-info.how-to-extend.rst
@@ -504,7 +504,7 @@ writeable). The syntax is
Creating a brand-new ndarray
----------------------------
-Quite often new arrays must be created from within extension-module
+Quite often, new arrays must be created from within extension-module
code. Perhaps an output array is needed and you don't want the caller
to have to supply it. Perhaps only a temporary array is needed to hold
an intermediate calculation. Whatever the need there are simple ways
@@ -512,42 +512,9 @@ to get an ndarray object of whatever data-type is needed. The most
general function for doing this is :c:func:`PyArray_NewFromDescr`. All array
creation functions go through this heavily re-used code. Because of
its flexibility, it can be somewhat confusing to use. As a result,
-simpler forms exist that are easier to use.
-
-:c:func:`PyArray_SimpleNew`
-
- This function allocates new memory and places it in an ndarray
- with *nd* dimensions whose shape is determined by the array of
- at least *nd* items pointed to by *dims*. The memory for the
- array is uninitialized (unless typenum is :c:data:`NPY_OBJECT` in
- which case each element in the array is set to NULL). The
- *typenum* argument allows specification of any of the builtin
- data-types such as :c:data:`NPY_FLOAT` or :c:data:`NPY_LONG`. The
- memory for the array can be set to zero if desired using
- :c:func:`PyArray_FILLWBYTE` (return_object, 0).
-
-:c:func:`PyArray_SimpleNewFromData`
-
- Sometimes, you want to wrap memory allocated elsewhere into an
- ndarray object for downstream use. This routine makes it
- straightforward to do that. The first three arguments are the same
- as in :c:func:`PyArray_SimpleNew`, the final argument is a pointer to a
- block of contiguous memory that the ndarray should use as it's
- data-buffer which will be interpreted in C-style contiguous
- fashion. A new reference to an ndarray is returned, but the
- ndarray will not own its data. When this ndarray is deallocated,
- the pointer will not be freed.
-
- You should ensure that the provided memory is not freed while the
- returned array is in existence. The easiest way to handle this is
- if data comes from another reference-counted Python object. The
- reference count on this object should be increased after the
- pointer is passed in, and the base member of the returned ndarray
- should point to the Python object that owns the data. Then, when
- the ndarray is deallocated, the base-member will be DECREF'd
- appropriately. If you want the memory to be freed as soon as the
- ndarray is deallocated then simply set the OWNDATA flag on the
- returned ndarray.
+simpler forms exist that are easier to use. These forms are part of the
+:c:func:`PyArray_SimpleNew` family of functions, which simplify the interface
+by providing default values for common use cases.
Getting at ndarray memory and accessing elements of the ndarray
diff --git a/numpy/conftest.py b/numpy/conftest.py
index 4d4d055ec..7834dd39d 100644
--- a/numpy/conftest.py
+++ b/numpy/conftest.py
@@ -13,6 +13,13 @@ _old_fpu_mode = None
_collect_results = {}
+def pytest_configure(config):
+ config.addinivalue_line("markers",
+ "valgrind_error: Tests that are known to error under valgrind.")
+ config.addinivalue_line("markers",
+ "slow: Tests that are very slow.")
+
+
#FIXME when yield tests are gone.
@pytest.hookimpl()
def pytest_itemcollected(item):
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 52ab9c994..7a7a433c0 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -2614,7 +2614,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
- a.argsort(axis=-1, kind='quicksort', order=None)
+ a.argsort(axis=-1, kind=None, order=None)
Returns the indices that would sort this array.
@@ -2771,7 +2771,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
- a.clip(min=None, max=None, out=None)
+ a.clip(min=None, max=None, out=None, **kwargs)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
@@ -3800,7 +3800,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
- a.sort(axis=-1, kind='quicksort', order=None)
+ a.sort(axis=-1, kind=None, order=None)
Sort an array in-place. Refer to `numpy.sort` for full documentation.
@@ -4618,10 +4618,12 @@ add_newdoc('numpy.core', 'ufunc',
number of outputs; use `None` for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
- Values of True indicate to calculate the ufunc at that position, values
- of False indicate to leave the value in the output alone. Note that if
- an uninitialized return array is created via the default ``out=None``,
- then the elements where the values are False will remain uninitialized.
+ This condition is broadcast over the input. At locations where the
+ condition is True, the `out` array will be set to the ufunc result.
+ Elsewhere, the `out` array will retain its original value.
+ Note that if an uninitialized `out` array is created via the default
+ ``out=None``, locations within it where the condition is False will
+ remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index 3ab64f7a1..ba6f7d111 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -11,6 +11,7 @@ from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core._asarray import asanyarray
from numpy.core import numerictypes as nt
+from numpy.core import _exceptions
from numpy._globals import _NoValue
# save those O(100) nanoseconds!
@@ -55,6 +56,80 @@ def _count_reduce_items(arr, axis):
items *= arr.shape[ax]
return items
+# Numpy 1.17.0, 2019-02-24
+# Various clip behavior deprecations, marked with _clip_dep as a prefix.
+
+def _clip_dep_is_scalar_nan(a):
+ # guarded to protect circular imports
+ from numpy.core.fromnumeric import ndim
+ if ndim(a) != 0:
+ return False
+ try:
+ return um.isnan(a)
+ except TypeError:
+ return False
+
+def _clip_dep_is_byte_swapped(a):
+ if isinstance(a, mu.ndarray):
+ return not a.dtype.isnative
+ return False
+
+def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs):
+ # normal path
+ if casting is not None:
+ return ufunc(*args, out=out, casting=casting, **kwargs)
+
+ # try to deal with broken casting rules
+ try:
+ return ufunc(*args, out=out, **kwargs)
+ except _exceptions._UFuncOutputCastingError as e:
+ # Numpy 1.17.0, 2019-02-24
+ warnings.warn(
+ "Converting the output of clip from {!r} to {!r} is deprecated. "
+ "Pass `casting=\"unsafe\"` explicitly to silence this warning, or "
+ "correct the type of the variables.".format(e.from_, e.to),
+ DeprecationWarning,
+ stacklevel=2
+ )
+ return ufunc(*args, out=out, casting="unsafe", **kwargs)
+
+def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs):
+ if min is None and max is None:
+ raise ValueError("One of max or min must be given")
+
+ # Numpy 1.17.0, 2019-02-24
+ # This deprecation probably incurs a substantial slowdown for small arrays,
+ # it will be good to get rid of it.
+ if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out):
+ using_deprecated_nan = False
+ if _clip_dep_is_scalar_nan(min):
+ min = -float('inf')
+ using_deprecated_nan = True
+ if _clip_dep_is_scalar_nan(max):
+ max = float('inf')
+ using_deprecated_nan = True
+ if using_deprecated_nan:
+ warnings.warn(
+ "Passing `np.nan` to mean no clipping in np.clip has always "
+ "been unreliable, and is now deprecated. "
+ "In future, this will always return nan, like it already does "
+ "when min or max are arrays that contain nan. "
+ "To skip a bound, pass either None or an np.inf of an "
+ "appropriate sign.",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ if min is None:
+ return _clip_dep_invoke_with_casting(
+ um.minimum, a, max, out=out, casting=casting, **kwargs)
+ elif max is None:
+ return _clip_dep_invoke_with_casting(
+ um.maximum, a, min, out=out, casting=casting, **kwargs)
+ else:
+ return _clip_dep_invoke_with_casting(
+ um.clip, a, min, max, out=out, casting=casting, **kwargs)
+
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index e17523451..bf1747272 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -511,6 +511,13 @@ defdict = {
TD(noobj),
TD(O, f='npy_ObjectMin')
),
+'clip':
+ Ufunc(3, 1, ReorderableNone,
+ docstrings.get('numpy.core.umath.clip'),
+ 'PyUFunc_SimpleUniformOperationTypeResolver',
+ TD(noobj),
+ [TypeDescription('O', 'npy_ObjectClip', 'OOO', 'O')]
+ ),
'fmax':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmax'),
@@ -697,6 +704,7 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp'),
None,
+ TD('e', f='exp', astype={'e':'f'}),
TD('f', simd=[('avx2', 'f'), ('avx512f', 'f')]),
TD(inexact, f='exp', astype={'e':'f'}),
TD(P, f='exp'),
@@ -719,6 +727,7 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log'),
None,
+ TD('e', f='log', astype={'e':'f'}),
TD('f', simd=[('avx2', 'f'), ('avx512f', 'f')]),
TD(inexact, f='log', astype={'e':'f'}),
TD(P, f='log'),
@@ -922,6 +931,7 @@ defdict = {
docstrings.get('numpy.core.umath.matmul'),
"PyUFunc_SimpleUniformOperationTypeResolver",
TD(notimes_or_obj),
+ TD(O),
signature='(n?,k),(k,m?)->(n?,m?)',
),
}
@@ -961,6 +971,9 @@ arity_lookup = {
'O': 'OO_O',
'P': 'OO_O_method',
},
+ (3, 1): {
+ 'O': 'OOO_O',
+ }
}
#for each name
@@ -1137,6 +1150,7 @@ def make_code(funcdict, filename):
#include "ufunc_type_resolution.h"
#include "loops.h"
#include "matmul.h"
+ #include "clip.h"
%s
static int
@@ -1154,7 +1168,6 @@ def make_code(funcdict, filename):
if __name__ == "__main__":
filename = __file__
- fid = open('__umath_generated.c', 'w')
code = make_code(defdict, filename)
- fid.write(code)
- fid.close()
+ with open('__umath_generated.c', 'w') as fid:
+ fid.write(code)
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 8b1a5a3db..2a5646bd7 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -26,8 +26,12 @@ subst = {
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
- Values of True indicate to calculate the ufunc at that position, values
- of False indicate to leave the value in the output alone.
+ This condition is broadcast over the input. At locations where the
+ condition is True, the `out` array will be set to the ufunc result.
+ Elsewhere, the `out` array will retain its original value.
+ Note that if an uninitialized `out` array is created via the default
+ ``out=None``, locations within it where the condition is False will
+ remain uninitialized.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
@@ -42,14 +46,20 @@ subst = {
def add_newdoc(place, name, doc):
doc = textwrap.dedent(doc).strip()
- if name[0] != '_' and name != 'matmul':
- # matmul is special, it does not use the OUT_SCALAR replacement strings
+ skip = (
+ # gufuncs do not use the OUT_SCALAR replacement strings
+ 'matmul',
+ # clip has 3 inputs, which is not handled by this
+ 'clip',
+ )
+ if name[0] != '_' and name not in skip:
if '\nx :' in doc:
assert '$OUT_SCALAR_1' in doc, "in {}".format(name)
elif '\nx2 :' in doc or '\nx1, x2 :' in doc:
assert '$OUT_SCALAR_2' in doc, "in {}".format(name)
else:
assert False, "Could not detect number of inputs in {}".format(name)
+
for k, v in subst.items():
doc = doc.replace('$' + k, v)
@@ -2535,6 +2545,46 @@ add_newdoc('numpy.core.umath', 'fmin',
""")
+add_newdoc('numpy.core.umath', 'clip',
+ """
+ Clip (limit) the values in an array.
+
+ Given an interval, values outside the interval are clipped to
+ the interval edges. For example, if an interval of ``[0, 1]``
+ is specified, values smaller than 0 become 0, and values larger
+ than 1 become 1.
+
+ Equivalent to but faster than ``np.minimum(np.maximum(a, a_min), a_max)``.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing elements to clip.
+ a_min : array_like
+ Minimum value.
+ a_max : array_like
+ Maximum value.
+ out : ndarray, optional
+ The results will be placed in this array. It may be the input
+ array for in-place clipping. `out` must be of the right shape
+ to hold the output. Its type is preserved.
+ $PARAMS
+
+ See Also
+ --------
+ numpy.clip :
+ Wrapper that makes the ``a_min`` and ``a_max`` arguments optional,
+ dispatching to one of `~numpy.core.umath.clip`,
+ `~numpy.core.umath.minimum`, and `~numpy.core.umath.maximum`.
+
+ Returns
+ -------
+ clipped_array : ndarray
+ An array with the elements of `a`, but where values
+ < `a_min` are replaced with `a_min`, and those > `a_max`
+ with `a_max`.
+ """)
+
add_newdoc('numpy.core.umath', 'matmul',
"""
Matrix product of two arrays.
diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py
index 3fd7d14c4..d7ecce1b4 100644
--- a/numpy/core/defchararray.py
+++ b/numpy/core/defchararray.py
@@ -2124,7 +2124,7 @@ class chararray(ndarray):
def __rmod__(self, other):
return NotImplemented
- def argsort(self, axis=-1, kind='quicksort', order=None):
+ def argsort(self, axis=-1, kind=None, order=None):
"""
Return the indices that sort the array lexicographically.
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index b4d721940..202802e2c 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -824,7 +824,7 @@ def _sort_dispatcher(a, axis=None, kind=None, order=None):
@array_function_dispatch(_sort_dispatcher)
-def sort(a, axis=-1, kind='quicksort', order=None):
+def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
@@ -837,8 +837,8 @@ def sort(a, axis=-1, kind='quicksort', order=None):
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
- and 'mergesort' use timsort under the covers and, in general, the
- actual implementation will vary with data type. The 'mergesort' option
+ and 'mergesort' use timsort or radix sort under the covers and, in general,
+ the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
@@ -914,7 +914,8 @@ def sort(a, axis=-1, kind='quicksort', order=None):
'stable' automatically choses the best stable sorting algorithm
for the data type being sorted. It, along with 'mergesort' is
- currently mapped to timsort. API forward compatibility currently limits the
+ currently mapped to timsort or radix sort depending on the
+ data type. API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
@@ -925,7 +926,8 @@ def sort(a, axis=-1, kind='quicksort', order=None):
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For details of timsort, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
-
+ 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
+ O(n) sort instead of O(n log n).
Examples
--------
@@ -974,7 +976,7 @@ def _argsort_dispatcher(a, axis=None, kind=None, order=None):
@array_function_dispatch(_argsort_dispatcher)
-def argsort(a, axis=-1, kind='quicksort', order=None):
+def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
@@ -997,8 +999,6 @@ def argsort(a, axis=-1, kind='quicksort', order=None):
.. versionchanged:: 1.15.0.
The 'stable' option was added.
-
-
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
@@ -1279,7 +1279,7 @@ def searchsorted(a, v, side='left', sorter=None):
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
- This function is a faster version of the builtin python `bisect.bisect_left`
+ This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
@@ -1961,12 +1961,12 @@ def compress(condition, a, axis=None, out=None):
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
-def _clip_dispatcher(a, a_min, a_max, out=None):
+def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs):
return (a, a_min, a_max)
@array_function_dispatch(_clip_dispatcher)
-def clip(a, a_min, a_max, out=None):
+def clip(a, a_min, a_max, out=None, **kwargs):
"""
Clip (limit) the values in an array.
@@ -1975,6 +1975,9 @@ def clip(a, a_min, a_max, out=None):
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
+ Equivalent to but faster than ``np.maximum(a_min, np.minimum(a, a_max))``.
+ No check is performed to ensure ``a_min < a_max``.
+
Parameters
----------
a : array_like
@@ -1992,6 +1995,11 @@ def clip(a, a_min, a_max, out=None):
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
+ **kwargs
+ For other keyword-only arguments, see the
+ :ref:`ufunc docs <ufuncs.kwargs>`.
+
+ .. versionadded:: 1.17.0
Returns
-------
@@ -2020,7 +2028,7 @@ def clip(a, a_min, a_max, out=None):
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
- return _wrapfunc(a, 'clip', a_min, a_max, out=out)
+ return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs)
def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
@@ -2137,7 +2145,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
warnings.warn(
"Calling np.sum(generator) is deprecated, and in the future will give a different result. "
"Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
res = _sum_(a)
if out is not None:
@@ -3561,5 +3569,5 @@ def rank(a):
warnings.warn(
"`rank` is deprecated; use the `ndim` attribute or function instead. "
"To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
- VisibleDeprecationWarning, stacklevel=2)
+ VisibleDeprecationWarning, stacklevel=3)
return ndim(a)
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index a839ae402..8006dd9b5 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -8,6 +8,7 @@ by importing from the extension module.
import functools
import warnings
+import sys
from . import overrides
from . import _multiarray_umath
@@ -1113,9 +1114,9 @@ def putmask(a, mask, values):
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
-def packbits(a, axis=None):
+def packbits(a, axis=None, bitorder='big'):
"""
- packbits(a, axis=None)
+ packbits(a, axis=None, bitorder='big')
Packs the elements of a binary-valued array into bits in a uint8 array.
@@ -1129,6 +1130,13 @@ def packbits(a, axis=None):
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
+ bitorder : {'big', 'little'}, optional
+ The order of the input bits. 'big' will mimic bin(val),
+ ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011 => ``, 'little' will
+ reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``.
+ Defaults to 'big'.
+
+ .. versionadded:: 1.17.0
Returns
-------
@@ -1164,9 +1172,9 @@ def packbits(a, axis=None):
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
-def unpackbits(a, axis=None, count=None):
+def unpackbits(a, axis=None, count=None, bitorder='big'):
"""
- unpackbits(a, axis=None, count=None)
+ unpackbits(a, axis=None, count=None, bitorder='big')
Unpacks elements of a uint8 array into a binary-valued output array.
@@ -1194,6 +1202,14 @@ def unpackbits(a, axis=None, count=None):
.. versionadded:: 1.17.0
+ bitorder : {'big', 'little'}, optional
+ The order of the returned bits. 'big' will mimic bin(val),
+ ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse
+ the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``.
+ Defaults to 'big'.
+
+ .. versionadded:: 1.17.0
+
Returns
-------
unpacked : ndarray, uint8 type
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 4b59d730d..55e6c1cad 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1139,7 +1139,7 @@ def roll(a, shift, axis=None):
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> np.roll(x, -2)
array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
-
+
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
@@ -1606,11 +1606,11 @@ little_endian = (sys.byteorder == 'little')
@set_module('numpy')
-def indices(dimensions, dtype=int):
+def indices(dimensions, dtype=int, sparse=False):
"""
Return an array representing the indices of a grid.
- Compute an array where the subarrays contain index values 0,1,...
+ Compute an array where the subarrays contain index values 0, 1, ...
varying only along the corresponding axis.
Parameters
@@ -1619,28 +1619,38 @@ def indices(dimensions, dtype=int):
The shape of the grid.
dtype : dtype, optional
Data type of the result.
+ sparse : boolean, optional
+ Return a sparse representation of the grid instead of a dense
+ representation. Default is False.
+
+ .. versionadded:: 1.17
Returns
-------
- grid : ndarray
- The array of grid indices,
- ``grid.shape = (len(dimensions),) + tuple(dimensions)``.
+ grid : one ndarray or tuple of ndarrays
+ If sparse is False:
+ Returns one array of grid indices,
+ ``grid.shape = (len(dimensions),) + tuple(dimensions)``.
+ If sparse is True:
+ Returns a tuple of arrays, with
+ ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
+ dimensions[i] in the ith place
See Also
--------
- mgrid, meshgrid
+ mgrid, ogrid, meshgrid
Notes
-----
- The output shape is obtained by prepending the number of dimensions
- in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
- ``(r0, ..., rN-1)`` of length ``N``, the output shape is
- ``(N,r0,...,rN-1)``.
+ The output shape in the dense case is obtained by prepending the number
+ of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
+ is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
+ ``(N, r0, ..., rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
- grid[k,i0,i1,...,iN-1] = ik
+ grid[k, i0, i1, ..., iN-1] = ik
Examples
--------
@@ -1665,15 +1675,36 @@ def indices(dimensions, dtype=int):
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
+ If sparse is set to true, the grid will be returned in a sparse
+ representation.
+
+ >>> i, j = np.indices((2, 3), sparse=True)
+ >>> i.shape
+ (2, 1)
+ >>> j.shape
+ (1, 3)
+ >>> i # row indices
+ array([[0],
+ [1]])
+ >>> j # column indices
+ array([[0, 1, 2]])
+
"""
dimensions = tuple(dimensions)
N = len(dimensions)
shape = (1,)*N
- res = empty((N,)+dimensions, dtype=dtype)
+ if sparse:
+ res = tuple()
+ else:
+ res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
- res[i] = arange(dim, dtype=dtype).reshape(
+ idx = arange(dim, dtype=dtype).reshape(
shape[:i] + (dim,) + shape[i+1:]
)
+ if sparse:
+ res = res + (idx,)
+ else:
+ res[i] = idx
return res
@@ -2002,7 +2033,8 @@ def load(file):
"np.core.numeric.load is deprecated, use pickle.load instead",
DeprecationWarning, stacklevel=2)
if isinstance(file, type("")):
- file = open(file, "rb")
+ with open(file, "rb") as file_pointer:
+ return pickle.load(file_pointer)
return pickle.load(file)
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index 9f91adc83..ad4d1c721 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -1,6 +1,7 @@
"""Implementation of __array_function__ overrides from NEP-18."""
import collections
import functools
+import textwrap
from numpy.core._multiarray_umath import (
add_docstring, implement_array_function, _get_implementing_args)
@@ -143,15 +144,36 @@ def array_function_dispatch(dispatcher, module=None, verify=True,
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
+ # Equivalently, we could define this function directly instead of using
+ # exec. This version has the advantage of giving the helper function a
+ # more interpettable name. Otherwise, the original function does not
+ # show up at all in many cases, e.g., if it's written in C or if the
+ # dispatcher gets an invalid keyword argument.
+ source = textwrap.dedent("""
@functools.wraps(implementation)
- def public_api(*args, **kwargs):
+ def {name}(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
return implement_array_function(
- implementation, public_api, relevant_args, args, kwargs)
+ implementation, {name}, relevant_args, args, kwargs)
+ """).format(name=implementation.__name__)
+
+ source_object = compile(
+ source, filename='<__array_function__ internals>', mode='exec')
+ scope = {
+ 'implementation': implementation,
+ 'dispatcher': dispatcher,
+ 'functools': functools,
+ 'implement_array_function': implement_array_function,
+ }
+ exec(source_object, scope)
+
+ public_api = scope[implementation.__name__]
if module is not None:
public_api.__module__ = module
+ public_api.__skip_array_function__ = implementation
+
return public_api
return decorator
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 45b4fb3c7..62147d22b 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -710,6 +710,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'timsort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
+ join('src', 'npysort', 'radixsort.c.src'),
join('src', 'common', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'common', 'npy_binsearch.h.src'),
@@ -905,6 +906,8 @@ def configuration(parent_package='',top_path=None):
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'matmul.h.src'),
join('src', 'umath', 'matmul.c.src'),
+ join('src', 'umath', 'clip.h.src'),
+ join('src', 'umath', 'clip.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'extobj.c'),
join('src', 'umath', 'cpuid.c'),
diff --git a/numpy/core/src/common/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h
index 5f139cffb..bacd27473 100644
--- a/numpy/core/src/common/lowlevel_strided_loops.h
+++ b/numpy/core/src/common/lowlevel_strided_loops.h
@@ -4,6 +4,9 @@
#include <npy_config.h>
#include "mem_overlap.h"
+/* For PyArray_ macros used below */
+#include "numpy/ndarrayobject.h"
+
/*
* NOTE: This API should remain private for the time being, to allow
* for further refinement. I think the 'aligned' mechanism
diff --git a/numpy/core/src/common/npy_longdouble.c b/numpy/core/src/common/npy_longdouble.c
index 561f4b825..c580e0cce 100644
--- a/numpy/core/src/common/npy_longdouble.c
+++ b/numpy/core/src/common/npy_longdouble.c
@@ -6,6 +6,7 @@
#include "numpy/ndarraytypes.h"
#include "numpy/npy_math.h"
#include "npy_pycompat.h"
+#include "numpyos.h"
/*
* Heavily derived from PyLong_FromDouble
@@ -94,3 +95,84 @@ done:
Py_DECREF(l_chunk_size);
return v;
}
+
+/* Helper function to get unicode(PyLong).encode('utf8') */
+static PyObject *
+_PyLong_Bytes(PyObject *long_obj) {
+ PyObject *bytes;
+#if defined(NPY_PY3K)
+ PyObject *unicode = PyObject_Str(long_obj);
+ if (unicode == NULL) {
+ return NULL;
+ }
+ bytes = PyUnicode_AsUTF8String(unicode);
+ Py_DECREF(unicode);
+#else
+ bytes = PyObject_Str(long_obj);
+#endif
+ return bytes;
+}
+
+
+/**
+ * TODO: currently a hack that converts the long through a string. This is
+ * correct, but slow.
+ *
+ * Another approach would be to do this numerically, in a similar way to
+ * PyLong_AsDouble.
+ * However, in order to respect rounding modes correctly, this needs to know
+ * the size of the mantissa, which is platform-dependent.
+ */
+NPY_VISIBILITY_HIDDEN npy_longdouble
+npy_longdouble_from_PyLong(PyObject *long_obj) {
+ npy_longdouble result = 1234;
+ char *end;
+ char *cstr;
+ PyObject *bytes;
+
+ /* convert the long to a string */
+ bytes = _PyLong_Bytes(long_obj);
+ if (bytes == NULL) {
+ return -1;
+ }
+
+ cstr = PyBytes_AsString(bytes);
+ if (cstr == NULL) {
+ goto fail;
+ }
+ end = NULL;
+
+ /* convert the string to a long double and capture errors */
+ errno = 0;
+ result = NumPyOS_ascii_strtold(cstr, &end);
+ if (errno == ERANGE) {
+ /* strtold returns INFINITY of the correct sign. */
+ if (PyErr_Warn(PyExc_RuntimeWarning,
+ "overflow encountered in conversion from python long") < 0) {
+ goto fail;
+ }
+ }
+ else if (errno) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Could not parse python long as longdouble: %s (%s)",
+ cstr,
+ strerror(errno));
+ goto fail;
+ }
+
+ /* Extra characters at the end of the string, or nothing parsed */
+ if (end == cstr || *end != '\0') {
+ PyErr_Format(PyExc_RuntimeError,
+ "Could not parse long as longdouble: %s",
+ cstr);
+ goto fail;
+ }
+
+ /* finally safe to decref now that we're done with `end` */
+ Py_DECREF(bytes);
+ return result;
+
+fail:
+ Py_DECREF(bytes);
+ return -1;
+}
diff --git a/numpy/core/src/common/npy_longdouble.h b/numpy/core/src/common/npy_longdouble.h
index 036b53070..01db06de7 100644
--- a/numpy/core/src/common/npy_longdouble.h
+++ b/numpy/core/src/common/npy_longdouble.h
@@ -14,4 +14,14 @@
NPY_VISIBILITY_HIDDEN PyObject *
npy_longdouble_to_PyLong(npy_longdouble ldval);
+/* Convert a python `long` integer to a npy_longdouble
+ *
+ * This performs the same task as PyLong_AsDouble, but for long doubles
+ * which have a greater range.
+ *
+ * Returns -1 if an error occurs.
+ */
+NPY_VISIBILITY_HIDDEN npy_longdouble
+npy_longdouble_from_PyLong(PyObject *long_obj);
+
#endif
diff --git a/numpy/core/src/common/npy_sort.h.src b/numpy/core/src/common/npy_sort.h.src
index 521f0fee5..16a105499 100644
--- a/numpy/core/src/common/npy_sort.h.src
+++ b/numpy/core/src/common/npy_sort.h.src
@@ -44,6 +44,17 @@ int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
/**end repeat**/
+/**begin repeat
+ *
+ * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong#
+ */
+
+int radixsort_@suff@(void *vec, npy_intp cnt, void *null);
+int aradixsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+
+/**end repeat**/
+
/*
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index c26bd16ac..9061c0518 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -847,6 +847,29 @@ get_buffer_info(PyObject *NPY_UNUSED(self), PyObject *args)
#undef GET_PYBUF_FLAG
+/*
+ * Return a new array object wrapping existing C-allocated (dummy) data.
+ * Such an array does not own its data (must not free it), but because it
+ * wraps C data, it also has no base object. Used to test arr.flags.writeable
+ * setting behaviour.
+ */
+static PyObject*
+get_c_wrapping_array(PyObject* NPY_UNUSED(self), PyObject* arg)
+{
+ int writeable, flags;
+ npy_intp zero = 0;
+
+ writeable = PyObject_IsTrue(arg);
+ if (error_converting(writeable)) {
+ return NULL;
+ }
+
+ flags = writeable ? NPY_ARRAY_WRITEABLE : 0;
+ /* Create an empty array (which points to a random place) */
+ return PyArray_NewFromDescr(&PyArray_Type, PyArray_DescrFromType(NPY_INTP),
+ 1, &zero, NULL, &zero, flags, NULL);
+}
+
/*
* Test C-api level item getting.
@@ -1932,6 +1955,9 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"get_buffer_info",
get_buffer_info,
METH_VARARGS, NULL},
+ {"get_c_wrapping_array",
+ get_c_wrapping_array,
+ METH_O, NULL},
{"array_indexing",
array_indexing,
METH_VARARGS, NULL},
diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c
index e62b32ab2..02078306c 100644
--- a/numpy/core/src/multiarray/arrayfunction_override.c
+++ b/numpy/core/src/multiarray/arrayfunction_override.c
@@ -173,7 +173,7 @@ array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
}
}
- implementation = PyObject_GetAttr(func, npy_ma_str_wrapped);
+ implementation = PyObject_GetAttr(func, npy_ma_str_skip_array_function);
if (implementation == NULL) {
return NULL;
}
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 49819ca4a..5f7bcb8f7 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -30,6 +30,7 @@
#include <emmintrin.h>
#endif
+#include "npy_longdouble.h"
#include "numpyos.h"
#include <string.h>
@@ -328,6 +329,17 @@ string_to_long_double(PyObject*op)
npy_longdouble temp;
PyObject* b;
+ /* Convert python long objects to a longdouble, without precision or range
+ * loss via a double.
+ */
+ if ((PyLong_Check(op) && !PyBool_Check(op))
+#if !defined(NPY_PY3K)
+ || (PyInt_Check(op) && !PyBool_Check(op))
+#endif
+ ) {
+ return npy_longdouble_from_PyLong(op);
+ }
+
if (PyUnicode_Check(op)) {
b = PyUnicode_AsUTF8String(op);
if (!b) {
@@ -3798,176 +3810,6 @@ static void
/*
*****************************************************************************
- ** FASTCLIP **
- *****************************************************************************
- */
-
-#define _LESS_THAN(a, b) ((a) < (b))
-#define _GREATER_THAN(a, b) ((a) > (b))
-
-/*
- * In fastclip, 'b' was already checked for NaN, so the half comparison
- * only needs to check 'a' for NaN.
- */
-
-#define _HALF_LESS_THAN(a, b) (!npy_half_isnan(a) && npy_half_lt_nonan(a, b))
-#define _HALF_GREATER_THAN(a, b) (!npy_half_isnan(a) && npy_half_lt_nonan(b, a))
-
-/**begin repeat
- *
- * #name = BOOL,
- * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
- * LONG, ULONG, LONGLONG, ULONGLONG,
- * HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * DATETIME, TIMEDELTA#
- * #type = npy_bool,
- * npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
- * npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_half, npy_float, npy_double, npy_longdouble,
- * npy_datetime, npy_timedelta#
- * #isfloat = 0*11, 1*4, 0*2#
- * #isnan = nop*11, npy_half_isnan, npy_isnan*3, nop*2#
- * #lt = _LESS_THAN*11, _HALF_LESS_THAN, _LESS_THAN*5#
- * #gt = _GREATER_THAN*11, _HALF_GREATER_THAN, _GREATER_THAN*5#
- */
-static void
-@name@_fastclip(@type@ *in, npy_intp ni, @type@ *min, @type@ *max, @type@ *out)
-{
- npy_intp i;
- @type@ max_val = 0, min_val = 0;
-
- if (max != NULL) {
- max_val = *max;
-#if @isfloat@
- /* NaNs result in no clipping, so optimize the case away */
- if (@isnan@(max_val)) {
- if (min == NULL) {
- memmove(out, in, ni * sizeof(@type@));
- return;
- }
- max = NULL;
- }
-#endif
- }
- if (min != NULL) {
- min_val = *min;
-#if @isfloat@
- if (@isnan@(min_val)) {
- if (max == NULL) {
- memmove(out, in, ni * sizeof(@type@));
- return;
- }
- min = NULL;
- }
-#endif
- }
- if (max == NULL) {
- for (i = 0; i < ni; i++) {
- if (@lt@(in[i], min_val)) {
- out[i] = min_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
- else if (min == NULL) {
- for (i = 0; i < ni; i++) {
- if (@gt@(in[i], max_val)) {
- out[i] = max_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
- else {
- /*
- * Visual Studio 2015 loop vectorizer handles NaN in an unexpected
- * manner, see: https://github.com/numpy/numpy/issues/7601
- */
- #if (_MSC_VER == 1900)
- #pragma loop( no_vector )
- #endif
- for (i = 0; i < ni; i++) {
- if (@lt@(in[i], min_val)) {
- out[i] = min_val;
- }
- else if (@gt@(in[i], max_val)) {
- out[i] = max_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
-}
-/**end repeat**/
-
-#undef _LESS_THAN
-#undef _GREATER_THAN
-#undef _HALF_LESS_THAN
-#undef _HALF_GREATER_THAN
-
-/**begin repeat
- *
- * #name = CFLOAT, CDOUBLE, CLONGDOUBLE#
- * #type = npy_cfloat, npy_cdouble, npy_clongdouble#
- */
-static void
-@name@_fastclip(@type@ *in, npy_intp ni, @type@ *min, @type@ *max, @type@ *out)
-{
- npy_intp i;
- @type@ max_val, min_val;
-
- if (max != NULL) {
- max_val = *max;
- }
- if (min != NULL) {
- min_val = *min;
- }
- if (max == NULL) {
- for (i = 0; i < ni; i++) {
- if (PyArray_CLT(in[i],min_val)) {
- out[i] = min_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
- else if (min == NULL) {
- for (i = 0; i < ni; i++) {
- if (PyArray_CGT(in[i], max_val)) {
- out[i] = max_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
- else {
- for (i = 0; i < ni; i++) {
- if (PyArray_CLT(in[i], min_val)) {
- out[i] = min_val;
- }
- else if (PyArray_CGT(in[i], max_val)) {
- out[i] = max_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
-}
-
-/**end repeat**/
-
-#define OBJECT_fastclip NULL
-
-
-/*
- *****************************************************************************
** FASTPUTMASK **
*****************************************************************************
*/
@@ -4405,6 +4247,7 @@ static PyArray_Descr @from@_Descr = {
* npy_half, npy_float, npy_double, npy_longdouble,
* npy_cfloat, npy_cdouble, npy_clongdouble,
* PyObject *, npy_datetime, npy_timedelta#
+ * #rsort = 1*5, 0*16#
* #NAME = Bool,
* Byte, UByte, Short, UShort, Int, UInt,
* Long, ULong, LongLong, ULongLong,
@@ -4461,12 +4304,20 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = {
{
quicksort_@suff@,
heapsort_@suff@,
- timsort_@suff@
+ #if @rsort@
+ radixsort_@suff@
+ #else
+ timsort_@suff@
+ #endif
},
{
aquicksort_@suff@,
aheapsort_@suff@,
- atimsort_@suff@
+ #if @rsort@
+ aradixsort_@suff@
+ #else
+ atimsort_@suff@
+ #endif
},
#else
{
@@ -4480,7 +4331,7 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = {
(PyArray_ScalarKindFunc*)NULL,
NULL,
NULL,
- (PyArray_FastClipFunc*)@from@_fastclip,
+ (PyArray_FastClipFunc*)NULL,
(PyArray_FastPutmaskFunc*)@from@_fastputmask,
(PyArray_FastTakeFunc*)@from@_fasttake,
(PyArray_ArgFunc*)@from@_argmin
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index 90ee2c5b2..1d72a5227 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -918,6 +918,27 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o
}
func = PyArray_DESCR(self)->f->fastclip;
+ if (func == NULL) {
+ if (min == NULL) {
+ return PyObject_CallFunctionObjArgs(n_ops.minimum, self, max, out, NULL);
+ }
+ else if (max == NULL) {
+ return PyObject_CallFunctionObjArgs(n_ops.maximum, self, min, out, NULL);
+ }
+ else {
+ return PyObject_CallFunctionObjArgs(n_ops.clip, self, min, max, out, NULL);
+ }
+ }
+
+ /* NumPy 1.17.0, 2019-02-24 */
+ if (DEPRECATE(
+ "->f->fastclip is deprecated. Use PyUFunc_RegisterLoopForDescr to "
+ "attach a custom loop to np.core.umath.clip, np.minimum, and "
+ "np.maximum") < 0) {
+ return NULL;
+ }
+ /* everything below can be removed once this deprecation completes */
+
if (func == NULL
|| (min != NULL && !PyArray_CheckAnyScalar(min))
|| (max != NULL && !PyArray_CheckAnyScalar(max))
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 52694d491..8e167a37e 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -598,7 +598,7 @@ _zerofill(PyArrayObject *ret)
NPY_NO_EXPORT npy_bool
_IsWriteable(PyArrayObject *ap)
{
- PyObject *base=PyArray_BASE(ap);
+ PyObject *base = PyArray_BASE(ap);
#if defined(NPY_PY3K)
Py_buffer view;
#else
@@ -606,23 +606,38 @@ _IsWriteable(PyArrayObject *ap)
Py_ssize_t n;
#endif
- /* If we own our own data, then no-problem */
- if ((base == NULL) || (PyArray_FLAGS(ap) & NPY_ARRAY_OWNDATA)) {
+ /*
+ * C-data wrapping arrays may not own their data while not having a base;
+ * WRITEBACKIFCOPY arrays have a base, but do own their data.
+ */
+ if (base == NULL || PyArray_CHKFLAGS(ap, NPY_ARRAY_OWNDATA)) {
+ /*
+ * This is somewhat unsafe for directly wrapped non-writable C-arrays,
+ * which do not know whether the memory area is writable or not and
+ * do not own their data (but have no base).
+ * It would be better if this returned PyArray_ISWRITEABLE(ap).
+ * Since it is hard to deprecate, this is deprecated only on the Python
+ * side, but not on in PyArray_UpdateFlags.
+ */
return NPY_TRUE;
}
+
/*
- * Get to the final base object
- * If it is a writeable array, then return TRUE
- * If we can find an array object
- * or a writeable buffer object as the final base object
+ * Get to the final base object.
+ * If it is a writeable array, then return True if we can
+ * find an array object or a writeable buffer object as
+ * the final base object.
*/
+ while (PyArray_Check(base)) {
+ ap = (PyArrayObject *)base;
+ base = PyArray_BASE(ap);
- while(PyArray_Check(base)) {
- if (PyArray_CHKFLAGS((PyArrayObject *)base, NPY_ARRAY_OWNDATA)) {
- return (npy_bool) (PyArray_ISWRITEABLE((PyArrayObject *)base));
+ if (base == NULL || PyArray_CHKFLAGS(ap, NPY_ARRAY_OWNDATA)) {
+ return (npy_bool) PyArray_ISWRITEABLE(ap);
}
- base = PyArray_BASE((PyArrayObject *)base);
+ assert(!PyArray_CHKFLAGS(ap, NPY_ARRAY_OWNDATA));
}
+
#if defined(NPY_PY3K)
if (PyObject_GetBuffer(base, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
PyErr_Clear();
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 8e70d6cf1..dc79bfa09 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -1511,7 +1511,8 @@ pack_inner(const char *inptr,
npy_intp in_stride,
char *outptr,
npy_intp n_out,
- npy_intp out_stride)
+ npy_intp out_stride,
+ char order)
{
/*
* Loop through the elements of inptr.
@@ -1531,9 +1532,13 @@ pack_inner(const char *inptr,
vn_out -= (vn_out & 1);
for (index = 0; index < vn_out; index += 2) {
unsigned int r;
- /* swap as packbits is "big endian", note x86 can load unaligned */
- npy_uint64 a = npy_bswap8(*(npy_uint64*)inptr);
- npy_uint64 b = npy_bswap8(*(npy_uint64*)(inptr + 8));
+ npy_uint64 a = *(npy_uint64*)inptr;
+ npy_uint64 b = *(npy_uint64*)(inptr + 8);
+ if (order == 'b') {
+ a = npy_bswap8(a);
+ b = npy_bswap8(b);
+ }
+ /* note x86 can load unaligned */
__m128i v = _mm_set_epi64(_m_from_int64(b), _m_from_int64(a));
/* false -> 0x00 and true -> 0xFF (there is no cmpneq) */
v = _mm_cmpeq_epi8(v, zero);
@@ -1553,30 +1558,45 @@ pack_inner(const char *inptr,
if (remain == 0) { /* assumes n_in > 0 */
remain = 8;
}
- /* don't reset index to handle remainder of above block */
+ /* Don't reset index. Just handle remainder of above block */
for (; index < n_out; index++) {
- char build = 0;
+ unsigned char build = 0;
int i, maxi;
npy_intp j;
maxi = (index == n_out - 1) ? remain : 8;
- for (i = 0; i < maxi; i++) {
- build <<= 1;
- for (j = 0; j < element_size; j++) {
- build |= (inptr[j] != 0);
+ if (order == 'b') {
+ for (i = 0; i < maxi; i++) {
+ build <<= 1;
+ for (j = 0; j < element_size; j++) {
+ build |= (inptr[j] != 0);
+ }
+ inptr += in_stride;
+ }
+ if (index == n_out - 1) {
+ build <<= 8 - remain;
}
- inptr += in_stride;
}
- if (index == n_out - 1) {
- build <<= 8 - remain;
+ else
+ {
+ for (i = 0; i < maxi; i++) {
+ build >>= 1;
+ for (j = 0; j < element_size; j++) {
+ build |= (inptr[j] != 0) ? 128 : 0;
+ }
+ inptr += in_stride;
+ }
+ if (index == n_out - 1) {
+ build >>= 8 - remain;
+ }
}
- *outptr = build;
+ *outptr = (char)build;
outptr += out_stride;
}
}
static PyObject *
-pack_bits(PyObject *input, int axis)
+pack_bits(PyObject *input, int axis, char order)
{
PyArrayObject *inp;
PyArrayObject *new = NULL;
@@ -1661,7 +1681,7 @@ pack_bits(PyObject *input, int axis)
pack_inner(PyArray_ITER_DATA(it), PyArray_ITEMSIZE(new),
PyArray_DIM(new, axis), PyArray_STRIDE(new, axis),
PyArray_ITER_DATA(ot), PyArray_DIM(out, axis),
- PyArray_STRIDE(out, axis));
+ PyArray_STRIDE(out, axis), order);
PyArray_ITER_NEXT(it);
PyArray_ITER_NEXT(ot);
}
@@ -1681,10 +1701,17 @@ fail:
}
static PyObject *
-unpack_bits(PyObject *input, int axis, PyObject *count_obj)
+unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order)
{
static int unpack_init = 0;
- static char unpack_lookup[256][8];
+ /*
+ * lookuptable for bitorder big as it has been around longer
+ * bitorder little is handled via byteswapping in the loop
+ */
+ static union {
+ npy_uint8 bytes[8];
+ npy_uint64 uint64;
+ } unpack_lookup_big[256];
PyArrayObject *inp;
PyArrayObject *new = NULL;
PyArrayObject *out = NULL;
@@ -1770,24 +1797,18 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj)
goto fail;
}
- /* setup lookup table under GIL, big endian 0..256 as bytes */
+ /*
+ * setup lookup table under GIL, 256 8 byte blocks representing 8 bits
+ * expanded to 1/0 bytes
+ */
if (unpack_init == 0) {
- npy_uint64 j;
- npy_uint64 * unpack_lookup_64 = (npy_uint64 *)unpack_lookup;
+ npy_intp j;
for (j=0; j < 256; j++) {
- npy_uint64 v = 0;
- v |= (npy_uint64)((j & 1) == 1);
- v |= (npy_uint64)((j & 2) == 2) << 8;
- v |= (npy_uint64)((j & 4) == 4) << 16;
- v |= (npy_uint64)((j & 8) == 8) << 24;
- v |= (npy_uint64)((j & 16) == 16) << 32;
- v |= (npy_uint64)((j & 32) == 32) << 40;
- v |= (npy_uint64)((j & 64) == 64) << 48;
- v |= (npy_uint64)((j & 128) == 128) << 56;
-#if NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
- v = npy_bswap8(v);
-#endif
- unpack_lookup_64[j] = v;
+ npy_intp k;
+ for (k=0; k < 8; k++) {
+ npy_uint8 v = (j & (1 << k)) == (1 << k);
+ unpack_lookup_big[j].bytes[7 - k] = v;
+ }
}
unpack_init = 1;
}
@@ -1816,14 +1837,32 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj)
if (out_stride == 1) {
/* for unity stride we can just copy out of the lookup table */
- for (index = 0; index < in_n; index++) {
- memcpy(outptr, unpack_lookup[*inptr], 8);
- outptr += 8;
- inptr += in_stride;
+ if (order == 'b') {
+ for (index = 0; index < in_n; index++) {
+ npy_uint64 v = unpack_lookup_big[*inptr].uint64;
+ memcpy(outptr, &v, 8);
+ outptr += 8;
+ inptr += in_stride;
+ }
+ }
+ else {
+ for (index = 0; index < in_n; index++) {
+ npy_uint64 v = unpack_lookup_big[*inptr].uint64;
+ if (order != 'b') {
+ v = npy_bswap8(v);
+ }
+ memcpy(outptr, &v, 8);
+ outptr += 8;
+ inptr += in_stride;
+ }
}
/* Clean up the tail portion */
if (in_tail) {
- memcpy(outptr, unpack_lookup[*inptr], in_tail);
+ npy_uint64 v = unpack_lookup_big[*inptr].uint64;
+ if (order != 'b') {
+ v = npy_bswap8(v);
+ }
+ memcpy(outptr, &v, in_tail);
}
/* Add padding */
else if (out_pad) {
@@ -1831,21 +1870,33 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj)
}
}
else {
- unsigned char mask;
-
- for (index = 0; index < in_n; index++) {
- for (mask = 128; mask; mask >>= 1) {
- *outptr = ((mask & (*inptr)) != 0);
+ if (order == 'b') {
+ for (index = 0; index < in_n; index++) {
+ for (i = 0; i < 8; i++) {
+ *outptr = ((*inptr & (128 >> i)) != 0);
+ outptr += out_stride;
+ }
+ inptr += in_stride;
+ }
+ /* Clean up the tail portion */
+ for (i = 0; i < in_tail; i++) {
+ *outptr = ((*inptr & (128 >> i)) != 0);
outptr += out_stride;
}
- inptr += in_stride;
}
- /* Clean up the tail portion */
- mask = 128;
- for (i = 0; i < in_tail; i++) {
- *outptr = ((mask & (*inptr)) != 0);
- outptr += out_stride;
- mask >>= 1;
+ else {
+ for (index = 0; index < in_n; index++) {
+ for (i = 0; i < 8; i++) {
+ *outptr = ((*inptr & (1 << i)) != 0);
+ outptr += out_stride;
+ }
+ inptr += in_stride;
+ }
+ /* Clean up the tail portion */
+ for (i = 0; i < in_tail; i++) {
+ *outptr = ((*inptr & (1 << i)) != 0);
+ outptr += out_stride;
+ }
}
/* Add padding */
for (index = 0; index < out_pad; index++) {
@@ -1853,6 +1904,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj)
outptr += out_stride;
}
}
+
PyArray_ITER_NEXT(it);
PyArray_ITER_NEXT(ot);
}
@@ -1876,13 +1928,26 @@ io_pack(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
{
PyObject *obj;
int axis = NPY_MAXDIMS;
- static char *kwlist[] = {"a", "axis", NULL};
+ static char *kwlist[] = {"in", "axis", "bitorder", NULL};
+ char c = 'b';
+ const char * order_str = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&:pack" , kwlist,
- &obj, PyArray_AxisConverter, &axis)) {
+ if (!PyArg_ParseTupleAndKeywords( args, kwds, "O|O&s:pack" , kwlist,
+ &obj, PyArray_AxisConverter, &axis, &order_str)) {
return NULL;
}
- return pack_bits(obj, axis);
+ if (order_str != NULL) {
+ if (strncmp(order_str, "little", 6) == 0)
+ c = 'l';
+ else if (strncmp(order_str, "big", 3) == 0)
+ c = 'b';
+ else {
+ PyErr_SetString(PyExc_ValueError,
+ "'order' must be either 'little' or 'big'");
+ return NULL;
+ }
+ }
+ return pack_bits(obj, axis, c);
}
@@ -1892,12 +1957,20 @@ io_unpack(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
PyObject *obj;
int axis = NPY_MAXDIMS;
PyObject *count = Py_None;
- static char *kwlist[] = {"a", "axis", "count", NULL};
+ static char *kwlist[] = {"in", "axis", "count", "bitorder", NULL};
+ const char * c = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O:unpack" , kwlist,
- &obj, PyArray_AxisConverter, &axis,
- &count)) {
+ if (!PyArg_ParseTupleAndKeywords( args, kwds, "O|O&Os:unpack" , kwlist,
+ &obj, PyArray_AxisConverter, &axis, &count, &c)) {
+ return NULL;
+ }
+ if (c == NULL) {
+ c = "b";
+ }
+ if (c[0] != 'l' && c[0] != 'b') {
+ PyErr_SetString(PyExc_ValueError,
+ "'order' must begin with 'l' or 'b'");
return NULL;
}
- return unpack_bits(obj, axis, count);
+ return unpack_bits(obj, axis, count, c[0]);
}
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 9afcfd86d..a370874a6 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -393,6 +393,11 @@ PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind)
char *str;
PyObject *tmp = NULL;
+ if (obj == Py_None) {
+ *sortkind = NPY_QUICKSORT;
+ return NPY_SUCCEED;
+ }
+
if (PyUnicode_Check(obj)) {
obj = tmp = PyUnicode_AsASCIIString(obj);
if (obj == NULL) {
@@ -401,6 +406,8 @@ PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind)
}
*sortkind = NPY_QUICKSORT;
+
+
str = PyBytes_AsString(obj);
if (!str) {
Py_XDECREF(tmp);
@@ -424,7 +431,7 @@ PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind)
* That maintains backwards compatibility while
* allowing other types of stable sorts to be used.
*/
- *sortkind = NPY_STABLESORT;
+ *sortkind = NPY_MERGESORT;
}
else if (str[0] == 's' || str[0] == 'S') {
/*
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index 6065c1df4..f4d2513ca 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -1126,7 +1126,7 @@ fail:
NPY_NO_EXPORT int
PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
{
- PyArray_SortFunc *sort;
+ PyArray_SortFunc *sort = NULL;
int n = PyArray_NDIM(op);
if (check_and_adjust_axis(&axis, n) < 0) {
@@ -1143,6 +1143,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
}
sort = PyArray_DESCR(op)->f->sort[which];
+
if (sort == NULL) {
if (PyArray_DESCR(op)->f->compare) {
switch (which) {
@@ -1284,16 +1285,11 @@ NPY_NO_EXPORT PyObject *
PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which)
{
PyArrayObject *op2;
- PyArray_ArgSortFunc *argsort;
+ PyArray_ArgSortFunc *argsort = NULL;
PyObject *ret;
- if (which < 0 || which >= NPY_NSORTS) {
- PyErr_SetString(PyExc_ValueError,
- "not a valid sort kind");
- return NULL;
- }
-
argsort = PyArray_DESCR(op)->f->argsort[which];
+
if (argsort == NULL) {
if (PyArray_DESCR(op)->f->compare) {
switch (which) {
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 9fcdc91b2..029671e4a 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -1262,10 +1262,14 @@ PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ...)
int i, ntot, err=0;
ntot = n + nadd;
- if (ntot < 1 || ntot > NPY_MAXARGS) {
+ if (ntot < 0) {
PyErr_Format(PyExc_ValueError,
- "Need at least 1 and at most %d "
- "array objects.", NPY_MAXARGS);
+ "n and nadd arguments must be non-negative", NPY_MAXARGS);
+ return NULL;
+ }
+ if (ntot > NPY_MAXARGS) {
+ PyErr_Format(PyExc_ValueError,
+ "At most %d array objects are supported.", NPY_MAXARGS);
return NULL;
}
multi = PyArray_malloc(sizeof(PyArrayMultiIterObject));
@@ -1328,10 +1332,14 @@ PyArray_MultiIterNew(int n, ...)
int i, err = 0;
- if (n < 1 || n > NPY_MAXARGS) {
+ if (n < 0) {
+ PyErr_Format(PyExc_ValueError,
+ "n argument must be non-negative", NPY_MAXARGS);
+ return NULL;
+ }
+ if (n > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
- "Need at least 1 and at most %d "
- "array objects.", NPY_MAXARGS);
+ "At most %d array objects are supported.", NPY_MAXARGS);
return NULL;
}
@@ -1388,7 +1396,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *k
PyArrayMultiIterObject *multi;
PyObject *arr;
- if (kwds != NULL) {
+ if (kwds != NULL && PyDict_Size(kwds) > 0) {
PyErr_SetString(PyExc_ValueError,
"keyword arguments not accepted.");
return NULL;
@@ -1409,13 +1417,12 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *k
++n;
}
}
- if (n < 1 || n > NPY_MAXARGS) {
- if (PyErr_Occurred()) {
- return NULL;
- }
+ if (PyErr_Occurred()) {
+ return NULL;
+ }
+ if (n > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
- "Need at least 1 and at most %d "
- "array objects.", NPY_MAXARGS);
+ "At most %d array objects are supported.", NPY_MAXARGS);
return NULL;
}
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 0ddec2995..385fc4381 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -2399,21 +2399,7 @@ array_trace(PyArrayObject *self, PyObject *args, PyObject *kwds)
static PyObject *
array_clip(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
- PyObject *min = NULL, *max = NULL;
- PyArrayObject *out = NULL;
- static char *kwlist[] = {"min", "max", "out", NULL};
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO&:clip", kwlist,
- &min,
- &max,
- PyArray_OutputConverter, &out)) {
- return NULL;
- }
- if (max == NULL && min == NULL) {
- PyErr_SetString(PyExc_ValueError, "One of max or min must be given.");
- return NULL;
- }
- return PyArray_Return((PyArrayObject *)PyArray_Clip(self, min, max, out));
+ NPY_FORWARD_NDARRAY_METHOD("_clip");
}
@@ -2546,6 +2532,22 @@ array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds)
if (write_flag != Py_None) {
if (PyObject_IsTrue(write_flag)) {
if (_IsWriteable(self)) {
+ /*
+ * _IsWritable (and PyArray_UpdateFlags) allows flipping this,
+ * although the C-Api user who created the array may have
+ * chosen to make it non-writable for a good reason, so
+ * deprecate.
+ */
+ if ((PyArray_BASE(self) == NULL) &&
+ !PyArray_CHKFLAGS(self, NPY_ARRAY_OWNDATA) &&
+ !PyArray_CHKFLAGS(self, NPY_ARRAY_WRITEABLE)) {
+ /* 2017-05-03, NumPy 1.17.0 */
+ if (DEPRECATE("making a non-writeable array writeable "
+ "is deprecated for arrays without a base "
+ "which do not own their data.") < 0) {
+ return NULL;
+ }
+ }
PyArray_ENABLEFLAGS(self, NPY_ARRAY_WRITEABLE);
}
else {
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 52412b827..e15ab5172 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -4498,7 +4498,7 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_prepare = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_wrap = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_finalize = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ufunc = NULL;
-NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_wrapped = NULL;
+NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_skip_array_function = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_order = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_copy = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL;
@@ -4514,7 +4514,8 @@ intern_strings(void)
npy_ma_str_array_wrap = PyUString_InternFromString("__array_wrap__");
npy_ma_str_array_finalize = PyUString_InternFromString("__array_finalize__");
npy_ma_str_ufunc = PyUString_InternFromString("__array_ufunc__");
- npy_ma_str_wrapped = PyUString_InternFromString("__wrapped__");
+ npy_ma_str_skip_array_function = PyUString_InternFromString(
+ "__skip_array_function__");
npy_ma_str_order = PyUString_InternFromString("order");
npy_ma_str_copy = PyUString_InternFromString("copy");
npy_ma_str_dtype = PyUString_InternFromString("dtype");
@@ -4524,7 +4525,7 @@ intern_strings(void)
return npy_ma_str_array && npy_ma_str_array_prepare &&
npy_ma_str_array_wrap && npy_ma_str_array_finalize &&
- npy_ma_str_ufunc && npy_ma_str_wrapped &&
+ npy_ma_str_ufunc && npy_ma_str_skip_array_function &&
npy_ma_str_order && npy_ma_str_copy && npy_ma_str_dtype &&
npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2;
}
diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h
index 6d33c3295..5cf082fbb 100644
--- a/numpy/core/src/multiarray/multiarraymodule.h
+++ b/numpy/core/src/multiarray/multiarraymodule.h
@@ -6,7 +6,7 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_prepare;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_wrap;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_finalize;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ufunc;
-NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_wrapped;
+NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_skip_array_function;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_order;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_copy;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype;
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 420501ce2..0ceb994ef 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -113,6 +113,7 @@ _PyArray_SetNumericOps(PyObject *dict)
SET(rint);
SET(conjugate);
SET(matmul);
+ SET(clip);
return 0;
}
@@ -179,6 +180,7 @@ _PyArray_GetNumericOps(void)
GET(rint);
GET(conjugate);
GET(matmul);
+ GET(clip);
return dict;
fail:
diff --git a/numpy/core/src/multiarray/number.h b/numpy/core/src/multiarray/number.h
index 33a7cf872..643241b3d 100644
--- a/numpy/core/src/multiarray/number.h
+++ b/numpy/core/src/multiarray/number.h
@@ -40,6 +40,7 @@ typedef struct {
PyObject *rint;
PyObject *conjugate;
PyObject *matmul;
+ PyObject *clip;
} NumericOps;
extern NPY_NO_EXPORT NumericOps n_ops;
diff --git a/numpy/core/src/npysort/radixsort.c.src b/numpy/core/src/npysort/radixsort.c.src
new file mode 100644
index 000000000..c1435bd96
--- /dev/null
+++ b/numpy/core/src/npysort/radixsort.c.src
@@ -0,0 +1,229 @@
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "npy_sort.h"
+#include "npysort_common.h"
+#include <stdlib.h>
+
+/*
+ *****************************************************************************
+ ** INTEGER SORTS **
+ *****************************************************************************
+ */
+
+
+/**begin repeat
+ *
+ * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG#
+ * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong#
+ * #type = npy_ubyte, npy_ubyte, npy_ubyte, npy_ushort, npy_ushort, npy_uint,
+ * npy_uint, npy_ulong, npy_ulong, npy_ulonglong, npy_ulonglong#
+ * #sign = 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0#
+ * #floating = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0#
+ */
+
+// Reference: https://github.com/eloj/radix-sorting#-key-derivation
+#if @sign@
+ // Floating-point is currently disabled.
+ // Floating-point tests succeed for double and float on macOS but not on Windows/Linux.
+ // Basic sorting tests succeed but others relying on sort fail.
+ // Possibly related to floating-point normalisation or multiple NaN reprs? Not sure.
+ #if @floating@
+ // For floats, we invert the key if the sign bit is set, else we invert the sign bit.
+ #define KEY_OF(x) ((x) ^ (-((x) >> (sizeof(@type@) * 8 - 1)) | ((@type@)1 << (sizeof(@type@) * 8 - 1))))
+ #else
+ // For signed ints, we flip the sign bit so the negatives are below the positives.
+ #define KEY_OF(x) ((x) ^ ((@type@)1 << (sizeof(@type@) * 8 - 1)))
+ #endif
+#else
+ // For unsigned ints, the key is as-is
+ #define KEY_OF(x) (x)
+#endif
+
+static inline npy_ubyte
+nth_byte_@suff@(@type@ key, npy_intp l) {
+ return (key >> (l << 3)) & 0xFF;
+}
+
+@type@*
+radixsort0_@suff@(@type@ *arr, @type@ *aux, npy_intp num)
+{
+ npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } };
+ npy_intp i, l;
+ @type@ key0 = KEY_OF(arr[0]);
+ npy_intp ncols = 0;
+ npy_ubyte cols[sizeof(@type@)];
+
+ for (i = 0; i < num; i++) {
+ @type@ k = KEY_OF(arr[i]);
+
+ for (l = 0; l < sizeof(@type@); l++) {
+ cnt[l][nth_byte_@suff@(k, l)]++;
+ }
+ }
+
+ for (l = 0; l < sizeof(@type@); l++) {
+ if (cnt[l][nth_byte_@suff@(key0, l)] != num) {
+ cols[ncols++] = l;
+ }
+ }
+
+ for (l = 0; l < ncols; l++) {
+ npy_intp a = 0;
+ for (i = 0; i < 256; i++) {
+ npy_intp b = cnt[cols[l]][i];
+ cnt[cols[l]][i] = a;
+ a += b;
+ }
+ }
+
+ for (l = 0; l < ncols; l++) {
+ @type@* temp;
+ for (i = 0; i < num; i++) {
+ @type@ k = KEY_OF(arr[i]);
+ npy_intp dst = cnt[cols[l]][nth_byte_@suff@(k, cols[l])]++;
+ aux[dst] = arr[i];
+ }
+
+ temp = aux;
+ aux = arr;
+ arr = temp;
+ }
+
+ return arr;
+}
+
+int
+radixsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr))
+{
+ void *sorted;
+ @type@ *aux;
+ @type@ *arr = start;
+ @type@ k1, k2;
+ npy_bool all_sorted = 1;
+
+ if (num < 2) {
+ return 0;
+ }
+
+ k1 = KEY_OF(arr[0]);
+ for (npy_intp i = 1; i < num; i++) {
+ k2 = KEY_OF(arr[i]);
+ if (k1 > k2) {
+ all_sorted = 0;
+ break;
+ }
+ k1 = k2;
+ }
+
+ if (all_sorted) {
+ return 0;
+ }
+
+ aux = malloc(num * sizeof(@type@));
+ if (aux == NULL) {
+ return -NPY_ENOMEM;
+ }
+
+ sorted = radixsort0_@suff@(start, aux, num);
+ if (sorted != start) {
+ memcpy(start, sorted, num * sizeof(@type@));
+ }
+
+ free(aux);
+ return 0;
+}
+
+npy_intp*
+aradixsort0_@suff@(@type@ *arr, npy_intp *aux, npy_intp *tosort, npy_intp num)
+{
+ npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } };
+ npy_intp i, l;
+ @type@ key0 = KEY_OF(arr[0]);
+ npy_intp ncols = 0;
+ npy_ubyte cols[sizeof(@type@)];
+
+ for (i = 0; i < num; i++) {
+ @type@ k = KEY_OF(arr[i]);
+
+ for (l = 0; l < sizeof(@type@); l++) {
+ cnt[l][nth_byte_@suff@(k, l)]++;
+ }
+ }
+
+ for (l = 0; l < sizeof(@type@); l++) {
+ if (cnt[l][nth_byte_@suff@(key0, l)] != num) {
+ cols[ncols++] = l;
+ }
+ }
+
+ for (l = 0; l < ncols; l++) {
+ npy_intp a = 0;
+ for (i = 0; i < 256; i++) {
+ npy_intp b = cnt[cols[l]][i];
+ cnt[cols[l]][i] = a;
+ a += b;
+ }
+ }
+
+ for (l = 0; l < ncols; l++) {
+ npy_intp* temp;
+ for (i = 0; i < num; i++) {
+ @type@ k = KEY_OF(arr[tosort[i]]);
+ npy_intp dst = cnt[cols[l]][nth_byte_@suff@(k, cols[l])]++;
+ aux[dst] = tosort[i];
+ }
+
+ temp = aux;
+ aux = tosort;
+ tosort = temp;
+ }
+
+ return tosort;
+}
+
+int
+aradixsort_@suff@(void *start, npy_intp* tosort, npy_intp num, void *NPY_UNUSED(varr))
+{
+ npy_intp *sorted;
+ npy_intp *aux;
+ @type@ *arr = start;
+ @type@ k1, k2;
+ npy_bool all_sorted = 1;
+
+ if (num < 2) {
+ return 0;
+ }
+
+ k1 = KEY_OF(arr[0]);
+ for (npy_intp i = 1; i < num; i++) {
+ k2 = KEY_OF(arr[i]);
+ if (k1 > k2) {
+ all_sorted = 0;
+ break;
+ }
+ k1 = k2;
+ }
+
+ if (all_sorted) {
+ return 0;
+ }
+
+ aux = malloc(num * sizeof(npy_intp));
+ if (aux == NULL) {
+ return -NPY_ENOMEM;
+ }
+
+ sorted = aradixsort0_@suff@(start, aux, tosort, num);
+ if (sorted != tosort) {
+ memcpy(tosort, sorted, num * sizeof(npy_intp));
+ }
+
+ free(aux);
+ return 0;
+}
+
+#undef KEY_OF
+
+/**end repeat**/
diff --git a/numpy/core/src/umath/clip.c.src b/numpy/core/src/umath/clip.c.src
new file mode 100644
index 000000000..30fa3d2b3
--- /dev/null
+++ b/numpy/core/src/umath/clip.c.src
@@ -0,0 +1,119 @@
+/**
+ * This module provides the inner loops for the clip ufunc
+ */
+#define _UMATHMODULE
+#define _MULTIARRAYMODULE
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "Python.h"
+
+#include "numpy/halffloat.h"
+#include "numpy/npy_math.h"
+#include "numpy/ndarraytypes.h"
+#include "numpy/npy_common.h"
+#include "numpy/utils.h"
+#include "fast_loop_macros.h"
+
+/*
+ * Produce macros that perform nan/nat-propagating min and max
+ */
+
+/**begin repeat
+ * #name = BOOL,
+ * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
+ * LONG, ULONG, LONGLONG, ULONGLONG#
+ */
+#define _NPY_@name@_MIN(a, b) PyArray_MIN(a, b)
+#define _NPY_@name@_MAX(a, b) PyArray_MAX(a, b)
+/**end repeat**/
+
+#define _NPY_HALF_MIN(a, b) (npy_half_isnan(a) || npy_half_le(a, b) ? (a) : (b))
+#define _NPY_HALF_MAX(a, b) (npy_half_isnan(a) || npy_half_ge(a, b) ? (a) : (b))
+
+/**begin repeat
+ * #name = FLOAT, DOUBLE, LONGDOUBLE#
+ */
+#define _NPY_@name@_MIN(a, b) (npy_isnan(a) ? (a) : PyArray_MIN(a, b))
+#define _NPY_@name@_MAX(a, b) (npy_isnan(a) ? (a) : PyArray_MAX(a, b))
+/**end repeat**/
+
+/**begin repeat
+ * #name = CFLOAT, CDOUBLE, CLONGDOUBLE#
+ */
+#define _NPY_@name@_MIN(a, b) (npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CLT(a, b) ? (a) : (b))
+#define _NPY_@name@_MAX(a, b) (npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CGT(a, b) ? (a) : (b))
+/**end repeat**/
+
+/**begin repeat
+ * #name = DATETIME, TIMEDELTA#
+ */
+#define _NPY_@name@_MIN(a, b) ( \
+ (a) == NPY_DATETIME_NAT ? (a) : \
+ (b) == NPY_DATETIME_NAT ? (b) : \
+ (a) < (b) ? (a) : (b) \
+)
+#define _NPY_@name@_MAX(a, b) ( \
+ (a) == NPY_DATETIME_NAT ? (a) : \
+ (b) == NPY_DATETIME_NAT ? (b) : \
+ (a) > (b) ? (a) : (b) \
+)
+/**end repeat**/
+
+/**begin repeat
+ *
+ * #name = BOOL,
+ * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
+ * LONG, ULONG, LONGLONG, ULONGLONG,
+ * HALF, FLOAT, DOUBLE, LONGDOUBLE,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * DATETIME, TIMEDELTA#
+ * #type = npy_bool,
+ * npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
+ * npy_long, npy_ulong, npy_longlong, npy_ulonglong,
+ * npy_half, npy_float, npy_double, npy_longdouble,
+ * npy_cfloat, npy_cdouble, npy_clongdouble,
+ * npy_datetime, npy_timedelta#
+ */
+
+#define _NPY_CLIP(x, min, max) \
+ _NPY_@name@_MIN(_NPY_@name@_MAX((x), (min)), (max))
+
+NPY_NO_EXPORT void
+@name@_clip(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ if (steps[1] == 0 && steps[2] == 0) {
+ /* min and max are constant throughout the loop, the most common case */
+ /* NOTE: it may be possible to optimize these checks for nan */
+ @type@ min_val = *(@type@ *)args[1];
+ @type@ max_val = *(@type@ *)args[2];
+
+ char *ip1 = args[0], *op1 = args[3];
+ npy_intp is1 = steps[0], os1 = steps[3];
+ npy_intp n = dimensions[0];
+
+ /* contiguous, branch to let the compiler optimize */
+ if (is1 == sizeof(@type@) && os1 == sizeof(@type@)) {
+ for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) {
+ *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, min_val, max_val);
+ }
+ }
+ else {
+ for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) {
+ *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, min_val, max_val);
+ }
+ }
+ }
+ else {
+ TERNARY_LOOP {
+ *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, *(@type@ *)ip2, *(@type@ *)ip3);
+ }
+ }
+ npy_clear_floatstatus_barrier((char*)dimensions);
+}
+
+// clean up the macros we defined above
+#undef _NPY_CLIP
+#undef _NPY_@name@_MAX
+#undef _NPY_@name@_MIN
+
+/**end repeat**/
diff --git a/numpy/core/src/umath/clip.h.src b/numpy/core/src/umath/clip.h.src
new file mode 100644
index 000000000..d77971ad7
--- /dev/null
+++ b/numpy/core/src/umath/clip.h.src
@@ -0,0 +1,18 @@
+#ifndef _NPY_UMATH_CLIP_H_
+#define _NPY_UMATH_CLIP_H_
+
+
+/**begin repeat
+ *
+ * #name = BOOL,
+ * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
+ * LONG, ULONG, LONGLONG, ULONGLONG,
+ * HALF, FLOAT, DOUBLE, LONGDOUBLE,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * DATETIME, TIMEDELTA#
+ */
+NPY_NO_EXPORT void
+@name@_clip(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+/**end repeat**/
+
+#endif
diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h
index 7a1ed66bc..ae6d69a3e 100644
--- a/numpy/core/src/umath/fast_loop_macros.h
+++ b/numpy/core/src/umath/fast_loop_macros.h
@@ -58,6 +58,14 @@
npy_intp i;\
for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1, op2 += os2)
+/** (ip1, ip2, ip3) -> (op1) */
+#define TERNARY_LOOP\
+ char *ip1 = args[0], *ip2 = args[1], *ip3 = args[2], *op1 = args[3];\
+ npy_intp is1 = steps[0], is2 = steps[1], is3 = steps[2], os1 = steps[3];\
+ npy_intp n = dimensions[0];\
+ npy_intp i;\
+ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, ip3 += is3, op1 += os1)
+
/** @} */
/* unary loop input and output contiguous */
diff --git a/numpy/core/src/umath/funcs.inc.src b/numpy/core/src/umath/funcs.inc.src
index 2acae3c37..c2732f925 100644
--- a/numpy/core/src/umath/funcs.inc.src
+++ b/numpy/core/src/umath/funcs.inc.src
@@ -259,6 +259,17 @@ npy_ObjectLCM(PyObject *i1, PyObject *i2)
return PyNumber_Absolute(tmp);
}
+
+static PyObject *
+npy_ObjectClip(PyObject *arr, PyObject *min, PyObject *max) {
+ PyObject *o = npy_ObjectMax(arr, min);
+ if (o == NULL) {
+ return NULL;
+ }
+ Py_SETREF(o, npy_ObjectMin(o, max));
+ return o;
+}
+
/*
*****************************************************************************
** COMPLEX FUNCTIONS **
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index ff3b36428..f84d74efe 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -425,6 +425,28 @@ PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
}
}
+NPY_NO_EXPORT void
+PyUFunc_OOO_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+{
+ ternaryfunc f = (ternaryfunc)func;
+ TERNARY_LOOP {
+ PyObject *in1 = *(PyObject **)ip1;
+ PyObject *in2 = *(PyObject **)ip2;
+ PyObject *in3 = *(PyObject **)ip3;
+ PyObject **out = (PyObject **)op1;
+ PyObject *ret = f(
+ in1 ? in1 : Py_None,
+ in2 ? in2 : Py_None,
+ in3 ? in3 : Py_None
+ );
+ if (ret == NULL) {
+ return;
+ }
+ Py_XDECREF(*out);
+ *out = ret;
+ }
+}
+
/*UFUNC_API*/
NPY_NO_EXPORT void
PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
@@ -1599,21 +1621,22 @@ FLOAT_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSE
NPY_NO_EXPORT NPY_GCC_OPT_3 void
FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
{
+ if (!run_unary_@isa@_@func@_FLOAT(args, dimensions, steps)) {
+ UNARY_LOOP {
+ /*
+ * We use the AVX function to compute exp/log for scalar elements as well.
+ * This is needed to ensure the output of strided and non-strided
+ * cases match. SIMD code handles strided input cases, but not
+ * strided output.
+ */
#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
- @ISA@_@func@_FLOAT((npy_float*)args[1], (npy_float*)args[0], dimensions[0]);
+ @ISA@_@func@_FLOAT((npy_float *)op1, (npy_float *)ip1, 1, steps[0]);
#else
- /*
- * This is the path it would take if ISA was runtime detected, but not
- * compiled for. It fixes the error on clang6.0 which fails to compile
- * AVX512F version. Not sure if I like this idea, if during runtime it
- * detects AXV512F, it will end up running the scalar version instead
- * of AVX2.
- */
- UNARY_LOOP {
- const npy_float in1 = *(npy_float *)ip1;
- *(npy_float *)op1 = @scalarf@(in1);
- }
+ const npy_float in1 = *(npy_float *)ip1;
+ *(npy_float *)op1 = @scalarf@(in1);
#endif
+ }
+ }
}
/**end repeat1**/
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index 8dd3170e3..7f05a693a 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -549,6 +549,9 @@ OBJECT@suffix@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *
NPY_NO_EXPORT void
OBJECT_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+PyUFunc_OOO_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func);
+
/*
*****************************************************************************
** END LOOPS **
diff --git a/numpy/core/src/umath/matmul.c.src b/numpy/core/src/umath/matmul.c.src
index 0cb3c82ad..480c0c72f 100644
--- a/numpy/core/src/umath/matmul.c.src
+++ b/numpy/core/src/umath/matmul.c.src
@@ -267,19 +267,88 @@ NPY_NO_EXPORT void
/**end repeat**/
+
+NPY_NO_EXPORT void
+OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n,
+ void *_ip2, npy_intp is2_n, npy_intp is2_p,
+ void *_op, npy_intp os_m, npy_intp os_p,
+ npy_intp dm, npy_intp dn, npy_intp dp)
+{
+ char *ip1 = (char *)_ip1, *ip2 = (char *)_ip2, *op = (char *)_op;
+
+ npy_intp ib1_n = is1_n * dn;
+ npy_intp ib2_n = is2_n * dn;
+ npy_intp ib2_p = is2_p * dp;
+ npy_intp ob_p = os_p * dp;
+
+ PyObject *product, *sum_of_products = NULL;
+
+ for (npy_intp m = 0; m < dm; m++) {
+ for (npy_intp p = 0; p < dp; p++) {
+ if ( 0 == dn ) {
+ sum_of_products = PyLong_FromLong(0);
+ if (sum_of_products == NULL) {
+ return;
+ }
+ }
+
+ for (npy_intp n = 0; n < dn; n++) {
+ PyObject *obj1 = *(PyObject**)ip1, *obj2 = *(PyObject**)ip2;
+ if (obj1 == NULL) {
+ obj1 = Py_None;
+ }
+ if (obj2 == NULL) {
+ obj2 = Py_None;
+ }
+
+ product = PyNumber_Multiply(obj1, obj2);
+ if (product == NULL) {
+ Py_XDECREF(sum_of_products);
+ return;
+ }
+
+ if (n == 0) {
+ sum_of_products = product;
+ }
+ else {
+ Py_SETREF(sum_of_products, PyNumber_Add(sum_of_products, product));
+ Py_DECREF(product);
+ if (sum_of_products == NULL) {
+ return;
+ }
+ }
+
+ ip2 += is2_n;
+ ip1 += is1_n;
+ }
+
+ *((PyObject **)op) = sum_of_products;
+ ip1 -= ib1_n;
+ ip2 -= ib2_n;
+ op += os_p;
+ ip2 += is2_p;
+ }
+ op -= ob_p;
+ ip2 -= ib2_p;
+ ip1 += is1_m;
+ op += os_m;
+ }
+}
+
+
/**begin repeat
* #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF,
* CFLOAT, CDOUBLE, CLONGDOUBLE,
* UBYTE, USHORT, UINT, ULONG, ULONGLONG,
* BYTE, SHORT, INT, LONG, LONGLONG,
- * BOOL#
+ * BOOL, OBJECT#
* #typ = npy_float,npy_double,npy_longdouble, npy_half,
* npy_cfloat, npy_cdouble, npy_clongdouble,
* npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
* npy_byte, npy_short, npy_int, npy_long, npy_longlong,
- * npy_bool#
- * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*11#
- * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*12#
+ * npy_bool,npy_object#
+ * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*12#
+ * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*13#
*/
@@ -398,5 +467,3 @@ NPY_NO_EXPORT void
}
/**end repeat**/
-
-
diff --git a/numpy/core/src/umath/matmul.h.src b/numpy/core/src/umath/matmul.h.src
index 16be7675b..a664b1b4e 100644
--- a/numpy/core/src/umath/matmul.h.src
+++ b/numpy/core/src/umath/matmul.h.src
@@ -3,7 +3,7 @@
* CFLOAT, CDOUBLE, CLONGDOUBLE,
* UBYTE, USHORT, UINT, ULONG, ULONGLONG,
* BYTE, SHORT, INT, LONG, LONGLONG,
- * BOOL#
+ * BOOL, OBJECT#
**/
NPY_NO_EXPORT void
@TYPE@_matmul(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index b94a5a0f7..6ddd6fcc6 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -51,6 +51,15 @@ abs_ptrdiff(char *a, char *b)
((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \
((abs_ptrdiff(args[1], args[0]) == 0))))
+/*
+ * output should be contiguous, can handle strided input data
+ */
+#define IS_OUTPUT_BLOCKABLE_UNARY(esize, vsize) \
+ (steps[1] == (esize) && \
+ (npy_is_aligned(args[0], esize) && npy_is_aligned(args[1], esize)) && \
+ ((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \
+ ((abs_ptrdiff(args[1], args[0]) == 0))))
+
#define IS_BLOCKABLE_REDUCE(esize, vsize) \
(steps[1] == (esize) && abs_ptrdiff(args[1], args[0]) >= (vsize) && \
npy_is_aligned(args[1], (esize)) && \
@@ -122,20 +131,36 @@ abs_ptrdiff(char *a, char *b)
/**begin repeat
* #ISA = AVX2, AVX512F#
+ * #isa = avx2, avx512f#
+ * #REGISTER_SIZE = 32, 64#
*/
/* prototypes */
-#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
/**begin repeat1
* #func = exp, log#
*/
-static void
-@ISA@_@func@_FLOAT(npy_float *, npy_float *, const npy_intp n);
+#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_INLINE void
+@ISA@_@func@_FLOAT(npy_float *, npy_float *, const npy_intp n, const npy_intp stride);
+#endif
-/**end repeat1**/
+static NPY_INLINE int
+run_unary_@isa@_@func@_FLOAT(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+ if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), @REGISTER_SIZE@)) {
+ @ISA@_@func@_FLOAT((npy_float*)args[1], (npy_float*)args[0], dimensions[0], steps[0]);
+ return 1;
+ }
+ else
+ return 0;
#endif
+ return 0;
+}
+
+/**end repeat1**/
/**end repeat**/
@@ -1111,15 +1136,24 @@ avx2_get_full_load_mask(void)
}
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_get_partial_load_mask(const npy_int num_elem, const npy_int total_elem)
+avx2_get_partial_load_mask(const npy_int num_lanes, const npy_int total_elem)
{
float maskint[16] = {-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,
1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0};
- float* addr = maskint + total_elem - num_elem;
+ float* addr = maskint + total_elem - num_lanes;
return _mm256_loadu_ps(addr);
}
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
+avx2_masked_gather(__m256 src,
+ npy_float* addr,
+ __m256i vindex,
+ __m256 mask)
+{
+ return _mm256_mask_i32gather_ps(src, addr, vindex, mask, 4);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
avx2_masked_load(__m256 mask, npy_float* addr)
{
return _mm256_maskload_ps(addr, _mm256_cvtps_epi32(mask));
@@ -1205,6 +1239,15 @@ avx512_get_partial_load_mask(const npy_int num_elem, const npy_int total_elem)
}
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
+avx512_masked_gather(__m512 src,
+ npy_float* addr,
+ __m512i vindex,
+ __mmask16 kmask)
+{
+ return _mm512_mask_i32gather_ps(src, kmask, vindex, addr, 4);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
avx512_masked_load(__mmask16 mask, npy_float* addr)
{
return _mm512_maskz_loadu_ps(mask, (__m512 *)addr);
@@ -1294,11 +1337,19 @@ static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS
static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
-@ISA@_exp_FLOAT(npy_float * op, npy_float * ip, const npy_intp array_size)
+@ISA@_exp_FLOAT(npy_float * op,
+ npy_float * ip,
+ const npy_intp array_size,
+ const npy_intp steps)
{
+ const npy_intp stride = steps/sizeof(npy_float);
const npy_int num_lanes = @BYTES@/sizeof(npy_float);
npy_float xmax = 88.72283935546875f;
npy_float xmin = -87.3365478515625f;
+ npy_int indexarr[16];
+ for (npy_int ii = 0; ii < 16; ii++) {
+ indexarr[ii] = ii*stride;
+ }
/* Load up frequently used constants */
@vtype@ codyw_c1 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_LOGE_2_HIGHf);
@@ -1317,6 +1368,7 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
@vtype@ inf = _mm@vsize@_set1_ps(NPY_INFINITYF);
@vtype@ zeros_f = _mm@vsize@_set1_ps(0.0f);
@vtype@ poly, num_poly, denom_poly, quadrant;
+ @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]);
@vtype@i exponent;
@mask@ xmax_mask, xmin_mask, nan_mask, inf_mask;
@@ -1326,10 +1378,18 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
while (num_remaining_elements > 0) {
- if (num_remaining_elements < num_lanes)
+ if (num_remaining_elements < num_lanes) {
load_mask = @isa@_get_partial_load_mask(num_remaining_elements,
- num_lanes);
- @vtype@ x = @isa@_masked_load(load_mask, ip);
+ num_lanes);
+ }
+
+ @vtype@ x;
+ if (stride == 1) {
+ x = @isa@_masked_load(load_mask, ip);
+ }
+ else {
+ x = @isa@_masked_gather(zeros_f, ip, vindex, load_mask);
+ }
xmax_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmax), _CMP_GE_OQ);
xmin_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmin), _CMP_LE_OQ);
@@ -1380,13 +1440,14 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
@masked_store@(op, @cvtps_epi32@(load_mask), poly);
- ip += num_lanes;
+ ip += num_lanes*stride;
op += num_lanes;
num_remaining_elements -= num_lanes;
}
- if (@mask_to_int@(overflow_mask))
+ if (@mask_to_int@(overflow_mask)) {
npy_set_floatstatus_overflow();
+ }
}
/*
@@ -1404,9 +1465,17 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
*/
static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
-@ISA@_log_FLOAT(npy_float * op, npy_float * ip, const npy_intp array_size)
+@ISA@_log_FLOAT(npy_float * op,
+ npy_float * ip,
+ const npy_intp array_size,
+ const npy_intp steps)
{
+ const npy_intp stride = steps/sizeof(npy_float);
const npy_int num_lanes = @BYTES@/sizeof(npy_float);
+ npy_int indexarr[16];
+ for (npy_int ii = 0; ii < 16; ii++) {
+ indexarr[ii] = ii*stride;
+ }
/* Load up frequently used constants */
@vtype@ log_p0 = _mm@vsize@_set1_ps(NPY_COEFF_P0_LOGf);
@@ -1427,6 +1496,7 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
@vtype@ inf = _mm@vsize@_set1_ps(NPY_INFINITYF);
@vtype@ zeros_f = _mm@vsize@_set1_ps(0.0f);
@vtype@ ones_f = _mm@vsize@_set1_ps(1.0f);
+ @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)indexarr);
@vtype@ poly, num_poly, denom_poly, exponent;
@mask@ inf_mask, nan_mask, sqrt2_mask, zero_mask, negx_mask;
@@ -1437,10 +1507,18 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
while (num_remaining_elements > 0) {
- if (num_remaining_elements < num_lanes)
+ if (num_remaining_elements < num_lanes) {
load_mask = @isa@_get_partial_load_mask(num_remaining_elements,
- num_lanes);
- @vtype@ x_in = @isa@_masked_load(load_mask, ip);
+ num_lanes);
+ }
+
+ @vtype@ x_in;
+ if (stride == 1) {
+ x_in = @isa@_masked_load(load_mask, ip);
+ }
+ else {
+ x_in = @isa@_masked_gather(zeros_f, ip, vindex, load_mask);
+ }
negx_mask = _mm@vsize@_cmp_ps@vsub@(x_in, zeros_f, _CMP_LT_OQ);
zero_mask = _mm@vsize@_cmp_ps@vsub@(x_in, zeros_f, _CMP_EQ_OQ);
@@ -1490,15 +1568,17 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
@masked_store@(op, @cvtps_epi32@(load_mask), poly);
- ip += num_lanes;
+ ip += num_lanes*stride;
op += num_lanes;
num_remaining_elements -= num_lanes;
}
- if (@mask_to_int@(invalid_mask))
+ if (@mask_to_int@(invalid_mask)) {
npy_set_floatstatus_invalid();
- if (@mask_to_int@(divide_by_zero_mask))
+ }
+ if (@mask_to_int@(divide_by_zero_mask)) {
npy_set_floatstatus_divbyzero();
+ }
}
#endif
/**end repeat**/
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index cf50d5d5c..ee4197f8f 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -1,5 +1,6 @@
from __future__ import division, absolute_import, print_function
+import warnings
import pytest
import numpy as np
@@ -205,3 +206,28 @@ class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
def test_fromstring_foreign_value(self):
b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
assert_array_equal(b[0], 1)
+
+@pytest.mark.parametrize("int_val", [
+ # cases discussed in gh-10723
+ # and gh-9968
+ 2 ** 1024, 0])
+def test_longdouble_from_int(int_val):
+ # for issue gh-9968
+ str_val = str(int_val)
+ # we'll expect a RuntimeWarning on platforms
+ # with np.longdouble equivalent to np.double
+ # for large integer input
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ # can be inf==inf on some platforms
+ assert np.longdouble(int_val) == np.longdouble(str_val)
+ # we can't directly compare the int and
+ # max longdouble value on all platforms
+ if np.allclose(np.finfo(np.longdouble).max,
+ np.finfo(np.double).max) and w:
+ assert w[0].category is RuntimeWarning
+
+@pytest.mark.parametrize("bool_val", [
+ True, False])
+def test_longdouble_from_bool(bool_val):
+ assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index b29daa675..0894d881d 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -143,6 +143,47 @@ class TestFlags(object):
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
+ def test_writeable_from_c_data(self):
+ # Test that the writeable flag can be changed for an array wrapping
+ # low level C-data, but not owning its data.
+ # Also see that this is deprecated to change from python.
+ from numpy.core._multiarray_tests import get_c_wrapping_array
+
+ arr_writeable = get_c_wrapping_array(True)
+ assert not arr_writeable.flags.owndata
+ assert arr_writeable.flags.writeable
+ view = arr_writeable[...]
+
+ # Toggling the writeable flag works on the view:
+ view.flags.writeable = False
+ assert not view.flags.writeable
+ view.flags.writeable = True
+ assert view.flags.writeable
+ # Flag can be unset on the arr_writeable:
+ arr_writeable.flags.writeable = False
+
+ arr_readonly = get_c_wrapping_array(False)
+ assert not arr_readonly.flags.owndata
+ assert not arr_readonly.flags.writeable
+
+ for arr in [arr_writeable, arr_readonly]:
+ view = arr[...]
+ view.flags.writeable = False # make sure it is readonly
+ arr.flags.writeable = False
+ assert not arr.flags.writeable
+
+ with assert_raises(ValueError):
+ view.flags.writeable = True
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+ with assert_raises(DeprecationWarning):
+ arr.flags.writeable = True
+
+ with assert_warns(DeprecationWarning):
+ arr.flags.writeable = True
+
+
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
@@ -1616,16 +1657,31 @@ class TestMethods(object):
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
- a = np.arange(101)
- b = a[::-1].copy()
- for kind in self.sort_kinds:
- msg = "scalar sort, kind=%s" % kind
- c = a.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
- c = b.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
+ # Test unsigned dtypes and nonnegative numbers
+ for dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.float16, np.float32, np.float64, np.longdouble]:
+ a = np.arange(101, dtype=dtype)
+ b = a[::-1].copy()
+ for kind in self.sort_kinds:
+ msg = "scalar sort, kind=%s, dtype=%s" % (kind, dtype)
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ # Test signed dtypes and negative numbers as well
+ for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64, np.longdouble]:
+ a = np.arange(-50, 51, dtype=dtype)
+ b = a[::-1].copy()
+ for kind in self.sort_kinds:
+ msg = "scalar sort, kind=%s, dtype=%s" % (kind, dtype)
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
@@ -1881,12 +1937,14 @@ class TestMethods(object):
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
- a = np.arange(101)
- b = a[::-1].copy()
- for kind in self.sort_kinds:
- msg = "scalar argsort, kind=%s" % kind
- assert_equal(a.copy().argsort(kind=kind), a, msg)
- assert_equal(b.copy().argsort(kind=kind), b, msg)
+
+ for dtype in [np.int32, np.uint32, np.float32]:
+ a = np.arange(101, dtype=dtype)
+ b = a[::-1].copy()
+ for kind in self.sort_kinds:
+ msg = "scalar argsort, kind=%s, dtype=%s" % (kind, dtype)
+ assert_equal(a.copy().argsort(kind=kind), a, msg)
+ assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
@@ -3685,17 +3743,6 @@ class TestSubscripting(object):
class TestPickling(object):
- def test_highest_available_pickle_protocol(self):
- try:
- import pickle5
- except ImportError:
- pickle5 = None
-
- if sys.version_info[:2] >= (3, 8) or pickle5 is not None:
- assert pickle.HIGHEST_PROTOCOL >= 5
- else:
- assert pickle.HIGHEST_PROTOCOL < 5
-
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
reason=('this tests the error messages when trying to'
'protocol 5 although it is not available'))
@@ -4270,7 +4317,11 @@ class TestClip(object):
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
- x.clip(clip_min, clip_max, x)
+ # The tests that call us pass clip_min and clip_max that
+ # might not fit in the destination dtype. They were written
+ # assuming the previous unsafe casting, which now must be
+ # passed explicitly to avoid a warning.
+ x.clip(clip_min, clip_max, x, casting='unsafe')
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
@@ -4289,7 +4340,7 @@ class TestClip(object):
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
- 'int', 1024, -120, 100.5, inplace=inplace)
+ 'int', 1024, -120, 100, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
@@ -5735,7 +5786,7 @@ class MatmulCommon(object):
"""
# Should work with these types. Will want to add
# "O" at some point
- types = "?bhilqBHILQefdgFDG"
+ types = "?bhilqBHILQefdgFDGO"
def test_exceptions(self):
dims = [
@@ -5786,8 +5837,9 @@ class MatmulCommon(object):
assert_(res.dtype == dt)
# vector vector returns scalars
- res = self.matmul(v, v)
- assert_(type(res) is np.dtype(dt).type)
+ if dt != "O":
+ res = self.matmul(v, v)
+ assert_(type(res) is np.dtype(dt).type)
def test_scalar_output(self):
vec1 = np.array([2])
@@ -6038,7 +6090,52 @@ class TestMatmul(MatmulCommon):
r3 = np.matmul(args[0].copy(), args[1].copy())
assert_equal(r1, r3)
-
+
+ def test_matmul_object(self):
+ import fractions
+
+ f = np.vectorize(fractions.Fraction)
+ def random_ints():
+ return np.random.randint(1, 1000, size=(10, 3, 3))
+ M1 = f(random_ints(), random_ints())
+ M2 = f(random_ints(), random_ints())
+
+ M3 = self.matmul(M1, M2)
+
+ [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]]
+
+ assert_allclose(N3, self.matmul(N1, N2))
+
+ def test_matmul_object_type_scalar(self):
+ from fractions import Fraction as F
+ v = np.array([F(2,3), F(5,7)])
+ res = self.matmul(v, v)
+ assert_(type(res) is F)
+
+ def test_matmul_empty(self):
+ a = np.empty((3, 0), dtype=object)
+ b = np.empty((0, 3), dtype=object)
+ c = np.zeros((3, 3))
+ assert_array_equal(np.matmul(a, b), c)
+
+ def test_matmul_exception_multiply(self):
+ # test that matmul fails if `__mul__` is missing
+ class add_not_multiply():
+ def __add__(self, other):
+ return self
+ a = np.full((3,3), add_not_multiply())
+ with assert_raises(TypeError):
+ b = np.matmul(a, a)
+
+ def test_matmul_exception_add(self):
+ # test that matmul fails if `__add__` is missing
+ class multiply_not_add():
+ def __mul__(self, other):
+ return self
+ a = np.full((3,3), multiply_not_add())
+ with assert_raises(TypeError):
+ b = np.matmul(a, a)
+
if sys.version_info[:2] >= (3, 5):
@@ -7776,13 +7873,6 @@ class TestWritebackIfCopy(object):
res = np.argmin(mat, 0, out=out)
assert_equal(res, range(5))
- def test_clip_with_out(self):
- mat = np.eye(5)
- out = np.eye(5, dtype='i2')
- res = np.clip(mat, a_min=-10, a_max=0, out=out)
- assert_(res is out)
- assert_equal(np.sum(out), 0)
-
def test_insert_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
# uses arr_insert
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 1c53f9372..c62eb257b 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -13,7 +13,7 @@ from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- HAS_REFCOUNT
+ assert_warns, HAS_REFCOUNT
)
@@ -1336,11 +1336,17 @@ class TestClip(object):
self.nr = 5
self.nc = 3
- def fastclip(self, a, m, M, out=None):
+ def fastclip(self, a, m, M, out=None, casting=None):
if out is None:
- return a.clip(m, M)
+ if casting is None:
+ return a.clip(m, M)
+ else:
+ return a.clip(m, M, casting=casting)
else:
- return a.clip(m, M, out)
+ if casting is None:
+ return a.clip(m, M, out)
+ else:
+ return a.clip(m, M, out, casting=casting)
def clip(self, a, m, M, out=None):
# use slow-clip
@@ -1378,6 +1384,20 @@ class TestClip(object):
return (10 * rand(n, m)).astype(np.int32)
# Now the real test cases
+
+ @pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO')
+ def test_ones_pathological(self, dtype):
+ # for preservation of behavior described in
+ # gh-12519; amin > amax behavior may still change
+ # in the future
+ arr = np.ones(10, dtype=dtype)
+ expected = np.zeros(10, dtype=dtype)
+ actual = np.clip(arr, 1, 0)
+ if dtype == 'O':
+ assert actual.tolist() == expected.tolist()
+ else:
+ assert_equal(actual, expected)
+
def test_simple_double(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
@@ -1476,14 +1496,21 @@ class TestClip(object):
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
- def test_simple_int32_inout(self):
+ @pytest.mark.parametrize("casting", [None, "unsafe"])
+ def test_simple_int32_inout(self, casting):
# Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ if casting is None:
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac, casting=casting)
+ else:
+ # explicitly passing "unsafe" will silence warning
+ self.fastclip(a, m, M, ac, casting=casting)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1505,7 +1532,9 @@ class TestClip(object):
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1516,7 +1545,9 @@ class TestClip(object):
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1692,7 +1723,9 @@ class TestClip(object):
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1714,7 +1747,9 @@ class TestClip(object):
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1725,7 +1760,9 @@ class TestClip(object):
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1778,11 +1815,94 @@ class TestClip(object):
def test_clip_nan(self):
d = np.arange(7.)
- assert_equal(d.clip(min=np.nan), d)
- assert_equal(d.clip(max=np.nan), d)
- assert_equal(d.clip(min=np.nan, max=np.nan), d)
- assert_equal(d.clip(min=-2, max=np.nan), d)
- assert_equal(d.clip(min=np.nan, max=10), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(max=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=np.nan, max=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=-2, max=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=np.nan, max=10), d)
+
+ def test_object_clip(self):
+ a = np.arange(10, dtype=object)
+ actual = np.clip(a, 1, 5)
+ expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5])
+ assert actual.tolist() == expected.tolist()
+
+ def test_clip_all_none(self):
+ a = np.arange(10, dtype=object)
+ with assert_raises_regex(ValueError, 'max or min'):
+ np.clip(a, None, None)
+
+ def test_clip_invalid_casting(self):
+ a = np.arange(10, dtype=object)
+ with assert_raises_regex(ValueError,
+ 'casting must be one of'):
+ self.fastclip(a, 1, 8, casting="garbage")
+
+ @pytest.mark.parametrize("amin, amax", [
+ # two scalars
+ (1, 0),
+ # mix scalar and array
+ (1, np.zeros(10)),
+ # two arrays
+ (np.ones(10), np.zeros(10)),
+ ])
+ def test_clip_value_min_max_flip(self, amin, amax):
+ a = np.arange(10, dtype=np.int64)
+ # requirement from ufunc_docstrings.py
+ expected = np.minimum(np.maximum(a, amin), amax)
+ actual = np.clip(a, amin, amax)
+ assert_equal(actual, expected)
+
+ @pytest.mark.parametrize("arr, amin, amax, exp", [
+ # for a bug in npy_ObjectClip, based on a
+ # case produced by hypothesis
+ (np.zeros(10, dtype=np.int64),
+ 0,
+ -2**64+1,
+ np.full(10, -2**64+1, dtype=object)),
+ # for bugs in NPY_TIMEDELTA_MAX, based on a case
+ # produced by hypothesis
+ (np.zeros(10, dtype='m8') - 1,
+ 0,
+ 0,
+ np.zeros(10, dtype='m8')),
+ ])
+ def test_clip_problem_cases(self, arr, amin, amax, exp):
+ actual = np.clip(arr, amin, amax)
+ assert_equal(actual, exp)
+
+ @pytest.mark.xfail(reason="no scalar nan propagation yet")
+ @pytest.mark.parametrize("arr, amin, amax", [
+ # problematic scalar nan case from hypothesis
+ (np.zeros(10, dtype=np.int64),
+ np.array(np.nan),
+ np.zeros(10, dtype=np.int32)),
+ ])
+ def test_clip_scalar_nan_propagation(self, arr, amin, amax):
+ # enforcement of scalar nan propagation for comparisons
+ # called through clip()
+ expected = np.minimum(np.maximum(a, amin), amax)
+ with assert_warns(DeprecationWarning):
+ actual = np.clip(arr, amin, amax)
+ assert_equal(actual, expected)
+
+ @pytest.mark.xfail(reason="propagation doesn't match spec")
+ @pytest.mark.parametrize("arr, amin, amax", [
+ (np.array([1] * 10, dtype='m8'),
+ np.timedelta64('NaT'),
+ np.zeros(10, dtype=np.int32)),
+ ])
+ def test_NaT_propagation(self, arr, amin, amax):
+ # NOTE: the expected function spec doesn't
+ # propagate NaT, but clip() now does
+ expected = np.minimum(np.maximum(a, amin), amax)
+ actual = np.clip(arr, amin, amax)
+ assert_equal(actual, expected)
class TestAllclose(object):
@@ -2656,6 +2776,47 @@ def test_outer_out_param():
assert_equal(np.outer(arr2, arr3, out2), out2)
+class TestIndices(object):
+
+ def test_simple(self):
+ [x, y] = np.indices((4, 3))
+ assert_array_equal(x, np.array([[0, 0, 0],
+ [1, 1, 1],
+ [2, 2, 2],
+ [3, 3, 3]]))
+ assert_array_equal(y, np.array([[0, 1, 2],
+ [0, 1, 2],
+ [0, 1, 2],
+ [0, 1, 2]]))
+
+ def test_single_input(self):
+ [x] = np.indices((4,))
+ assert_array_equal(x, np.array([0, 1, 2, 3]))
+
+ [x] = np.indices((4,), sparse=True)
+ assert_array_equal(x, np.array([0, 1, 2, 3]))
+
+ def test_scalar_input(self):
+ assert_array_equal([], np.indices(()))
+ assert_array_equal([], np.indices((), sparse=True))
+ assert_array_equal([[]], np.indices((0,)))
+ assert_array_equal([[]], np.indices((0,), sparse=True))
+
+ def test_sparse(self):
+ [x, y] = np.indices((4,3), sparse=True)
+ assert_array_equal(x, np.array([[0], [1], [2], [3]]))
+ assert_array_equal(y, np.array([[0, 1, 2]]))
+
+ @pytest.mark.parametrize("dtype", [np.int, np.float32, np.float64])
+ @pytest.mark.parametrize("dims", [(), (0,), (4, 3)])
+ def test_return_type(self, dtype, dims):
+ inds = np.indices(dims, dtype=dtype)
+ assert_(inds.dtype == dtype)
+
+ for arr in np.indices(dims, dtype=dtype, sparse=True):
+ assert_(arr.dtype == dtype)
+
+
class TestRequire(object):
flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
'F', 'F_CONTIGUOUS', 'FORTRAN',
@@ -2736,6 +2897,8 @@ class TestBroadcast(object):
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
np.empty((5, 1, 7))]
mits = [np.broadcast(*arrs),
+ np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])),
+ np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])),
np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
for mit in mits:
@@ -2760,12 +2923,24 @@ class TestBroadcast(object):
arr = np.empty((5,))
for j in range(35):
arrs = [arr] * j
- if j < 1 or j > 32:
+ if j > 32:
assert_raises(ValueError, np.broadcast, *arrs)
else:
mit = np.broadcast(*arrs)
assert_equal(mit.numiter, j)
+ def test_broadcast_error_kwargs(self):
+ #gh-13455
+ arrs = [np.empty((5, 6, 7))]
+ mit = np.broadcast(*arrs)
+ mit2 = np.broadcast(*arrs, **{})
+ assert_equal(mit.shape, mit2.shape)
+ assert_equal(mit.ndim, mit2.ndim)
+ assert_equal(mit.nd, mit2.nd)
+ assert_equal(mit.numiter, mit2.numiter)
+ assert_(mit.iters[0].base is mit2.iters[0].base)
+
+ assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
class TestKeepdims(object):
@@ -2788,7 +2963,7 @@ class TestTensordot(object):
td = np.tensordot(a, b, (1, 0))
assert_array_equal(td, np.dot(a, b))
assert_array_equal(td, np.einsum('ij,jk', a, b))
-
+
def test_zero_dimensional(self):
# gh-12130
arr_0d = np.array(1)
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
index 6e9610fff..7f02399b2 100644
--- a/numpy/core/tests/test_overrides.py
+++ b/numpy/core/tests/test_overrides.py
@@ -2,6 +2,7 @@ from __future__ import division, absolute_import, print_function
import inspect
import sys
+from unittest import mock
import numpy as np
from numpy.testing import (
@@ -10,7 +11,6 @@ from numpy.core.overrides import (
_get_implementing_args, array_function_dispatch,
verify_matching_signatures)
from numpy.compat import pickle
-import pytest
def _return_not_implemented(self, *args, **kwargs):
@@ -190,12 +190,18 @@ class TestNDArrayArrayFunction(object):
result = np.concatenate((array, override_sub))
assert_equal(result, expected.view(OverrideSub))
+ def test_skip_array_function(self):
+ assert_(dispatched_one_arg.__skip_array_function__
+ is dispatched_one_arg.__wrapped__)
+
def test_no_wrapper(self):
+ # This shouldn't happen unless a user intentionally calls
+ # __array_function__ with invalid arguments, but check that we raise
+ # an appropriate error all the same.
array = np.array(1)
- func = dispatched_one_arg.__wrapped__
- with assert_raises_regex(AttributeError, '__wrapped__'):
- array.__array_function__(func=func,
- types=(np.ndarray,),
+ func = dispatched_one_arg.__skip_array_function__
+ with assert_raises_regex(AttributeError, '__skip_array_function__'):
+ array.__array_function__(func=func, types=(np.ndarray,),
args=(array,), kwargs={})
@@ -378,3 +384,49 @@ class TestNumPyFunctions(object):
return 'yes'
assert_equal(np.sum(MyArray()), 'yes')
+
+ def test_sum_implementation_on_list(self):
+ assert_equal(np.sum.__skip_array_function__([1, 2, 3]), 6)
+
+ def test_sum_on_mock_array(self):
+
+ # We need a proxy for mocks because __array_function__ is only looked
+ # up in the class dict
+ class ArrayProxy:
+ def __init__(self, value):
+ self.value = value
+ def __array_function__(self, *args, **kwargs):
+ return self.value.__array_function__(*args, **kwargs)
+ def __array__(self, *args, **kwargs):
+ return self.value.__array__(*args, **kwargs)
+
+ proxy = ArrayProxy(mock.Mock(spec=ArrayProxy))
+ proxy.value.__array_function__.return_value = 1
+ result = np.sum(proxy)
+ assert_equal(result, 1)
+ proxy.value.__array_function__.assert_called_once_with(
+ np.sum, (ArrayProxy,), (proxy,), {})
+ proxy.value.__array__.assert_not_called()
+
+ proxy = ArrayProxy(mock.Mock(spec=ArrayProxy))
+ proxy.value.__array__.return_value = np.array(2)
+ result = np.sum.__skip_array_function__(proxy)
+ assert_equal(result, 2)
+ # TODO: switch to proxy.value.__array__.assert_called() and
+ # proxy.value.__array_function__.assert_not_called() once we drop
+ # Python 3.5 support.
+ ((called_method_name, _, _),) = proxy.value.mock_calls
+ assert_equal(called_method_name, '__array__')
+
+ def test_sum_forwarding_implementation(self):
+
+ class MyArray(object):
+
+ def sum(self, axis, out):
+ return 'summed'
+
+ def __array_function__(self, func, types, args, kwargs):
+ return func.__skip_array_function__(*args, **kwargs)
+
+ # note: the internal implementation of np.sum() calls the .sum() method
+ assert_equal(np.sum(MyArray()), 'summed')
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index b6b68d922..caeea39f4 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1936,3 +1936,56 @@ class TestUfunc(object):
assert not np.isinf(nat)
except TypeError:
pass # ok, just not implemented
+
+
+@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
+ if isinstance(getattr(np, x), np.ufunc)])
+def test_ufunc_types(ufunc):
+ '''
+ Check all ufuncs that the correct type is returned. Avoid
+ object and boolean types since many operations are not defined for
+ for them.
+
+ Choose the shape so even dot and matmul will succeed
+ '''
+ for typ in ufunc.types:
+ # types is a list of strings like ii->i
+ if 'O' in typ or '?' in typ:
+ continue
+ inp, out = typ.split('->')
+ args = [np.ones((3, 3), t) for t in inp]
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings("always")
+ res = ufunc(*args)
+ if isinstance(res, tuple):
+ outs = tuple(out)
+ assert len(res) == len(outs)
+ for r, t in zip(res, outs):
+ assert r.dtype == np.dtype(t)
+ else:
+ assert res.dtype == np.dtype(out)
+
+@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
+ if isinstance(getattr(np, x), np.ufunc)])
+def test_ufunc_noncontiguous(ufunc):
+ '''
+ Check that contiguous and non-contiguous calls to ufuncs
+ have the same results for values in range(9)
+ '''
+ for typ in ufunc.types:
+ # types is a list of strings like ii->i
+ if any(set('O?mM') & set(typ)):
+ # bool, object, datetime are too irregular for this simple test
+ continue
+ inp, out = typ.split('->')
+ args_c = [np.empty(6, t) for t in inp]
+ args_n = [np.empty(18, t)[::3] for t in inp]
+ for a in args_c:
+ a.flat = range(1,7)
+ for a in args_n:
+ a.flat = range(1,7)
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings("always")
+ res_c = ufunc(*args_c)
+ res_n = ufunc(*args_n)
+ assert_equal(res_c, res_n)
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 2c963b5eb..cd2034d9c 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -13,7 +13,7 @@ import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- assert_allclose, assert_no_warnings, suppress_warnings,
+ assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings,
_gen_alignment_data
)
@@ -678,6 +678,31 @@ class TestSpecialFloats(object):
assert_raises(FloatingPointError, np.log, np.float32(-np.inf))
assert_raises(FloatingPointError, np.log, np.float32(-1.0))
+class TestExpLogFloat32(object):
+ def test_exp_float32(self):
+ np.random.seed(42)
+ x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))
+ x_f64 = np.float64(x_f32)
+ assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=2.6)
+
+ def test_log_float32(self):
+ np.random.seed(42)
+ x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000))
+ x_f64 = np.float64(x_f32)
+ assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=3.9)
+
+ def test_strided_exp_log_float32(self):
+ np.random.seed(42)
+ strides = np.random.randint(low=-100, high=100, size=100)
+ sizes = np.random.randint(low=1, high=2000, size=100)
+ for ii in sizes:
+ x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii))
+ exp_true = np.exp(x_f32)
+ log_true = np.log(x_f32)
+ for jj in strides:
+ assert_equal(np.exp(x_f32[::jj]), exp_true[::jj])
+ assert_equal(np.log(x_f32[::jj]), log_true[::jj])
+
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py
index b33e315b4..3bcb7b884 100644
--- a/numpy/distutils/conv_template.py
+++ b/numpy/distutils/conv_template.py
@@ -267,22 +267,21 @@ include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
def resolve_includes(source):
d = os.path.dirname(source)
- fid = open(source)
- lines = []
- for line in fid:
- m = include_src_re.match(line)
- if m:
- fn = m.group('name')
- if not os.path.isabs(fn):
- fn = os.path.join(d, fn)
- if os.path.isfile(fn):
- print('Including file', fn)
- lines.extend(resolve_includes(fn))
+ with open(source) as fid:
+ lines = []
+ for line in fid:
+ m = include_src_re.match(line)
+ if m:
+ fn = m.group('name')
+ if not os.path.isabs(fn):
+ fn = os.path.join(d, fn)
+ if os.path.isfile(fn):
+ print('Including file', fn)
+ lines.extend(resolve_includes(fn))
+ else:
+ lines.append(line)
else:
lines.append(line)
- else:
- lines.append(line)
- fid.close()
return lines
def process_file(source):
@@ -331,6 +330,7 @@ def main():
except ValueError:
e = get_exception()
raise ValueError("In %s loop at %s" % (file, e))
+
outfile.write(writestr)
if __name__ == "__main__":
diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py
index 65c60c498..c5c1163c6 100644
--- a/numpy/distutils/from_template.py
+++ b/numpy/distutils/from_template.py
@@ -212,22 +212,21 @@ include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]sr
def resolve_includes(source):
d = os.path.dirname(source)
- fid = open(source)
- lines = []
- for line in fid:
- m = include_src_re.match(line)
- if m:
- fn = m.group('name')
- if not os.path.isabs(fn):
- fn = os.path.join(d, fn)
- if os.path.isfile(fn):
- print('Including file', fn)
- lines.extend(resolve_includes(fn))
+ with open(source) as fid:
+ lines = []
+ for line in fid:
+ m = include_src_re.match(line)
+ if m:
+ fn = m.group('name')
+ if not os.path.isabs(fn):
+ fn = os.path.join(d, fn)
+ if os.path.isfile(fn):
+ print('Including file', fn)
+ lines.extend(resolve_includes(fn))
+ else:
+ lines.append(line)
else:
lines.append(line)
- else:
- lines.append(line)
- fid.close()
return lines
def process_file(source):
@@ -260,5 +259,6 @@ def main():
writestr = process_str(allstr)
outfile.write(writestr)
+
if __name__ == "__main__":
main()
diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py
index 2420798ab..fe8fd1b0f 100644
--- a/numpy/distutils/line_endings.py
+++ b/numpy/distutils/line_endings.py
@@ -11,7 +11,8 @@ def dos2unix(file):
print(file, "Directory!")
return
- data = open(file, "rb").read()
+ with open(file, "rb") as fp:
+ data = fp.read()
if '\0' in data:
print(file, "Binary!")
return
@@ -44,7 +45,8 @@ def unix2dos(file):
print(file, "Directory!")
return
- data = open(file, "rb").read()
+ with open(file, "rb") as fp:
+ data = fp.read()
if '\0' in data:
print(file, "Binary!")
return
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 1f4552aea..ba4ad4643 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -17,6 +17,7 @@ classes are available:
atlas_3_10_blas_threads_info,
lapack_atlas_3_10_info
lapack_atlas_3_10_threads_info
+ flame_info
blas_info
lapack_info
openblas_info
@@ -126,6 +127,8 @@ import os
import re
import copy
import warnings
+import subprocess
+
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
@@ -298,13 +301,12 @@ else:
default_x11_include_dirs.extend(['/usr/lib/X11/include',
'/usr/include/X11'])
- import subprocess as sp
tmp = None
try:
# Explicitly open/close file to avoid ResourceWarning when
# tests are run in debug mode Python 3.
tmp = open(os.devnull, 'w')
- p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE,
+ p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE,
stderr=tmp)
except (OSError, DistutilsError):
# OSError if gcc is not installed, or SandboxViolation (DistutilsError
@@ -388,6 +390,7 @@ def get_info(name, notfound_action=0):
'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
+ 'flame': flame_info, # use lapack_opt instead
'mkl': mkl_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
@@ -463,6 +466,13 @@ class AtlasNotFoundError(NotFoundError):
the ATLAS environment variable."""
+class FlameNotFoundError(NotFoundError):
+ """
+ FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [flame])."""
+
+
class LapackNotFoundError(NotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) libraries not found.
@@ -1590,7 +1600,7 @@ class lapack_opt_info(system_info):
notfounderror = LapackNotFoundError
# Default order of LAPACK checks
- lapack_order = ['mkl', 'openblas', 'atlas', 'accelerate', 'lapack']
+ lapack_order = ['mkl', 'openblas', 'flame', 'atlas', 'accelerate', 'lapack']
def _calc_info_mkl(self):
info = get_info('lapack_mkl')
@@ -1610,6 +1620,13 @@ class lapack_opt_info(system_info):
return True
return False
+ def _calc_info_flame(self):
+ info = get_info('flame')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
def _calc_info_atlas(self):
info = get_info('atlas_3_10_threads')
if not info:
@@ -2042,6 +2059,82 @@ class blis_info(blas_info):
include_dirs=incl_dirs)
self.set_info(**info)
+
+class flame_info(system_info):
+ """ Usage of libflame for LAPACK operations
+
+ This requires libflame to be compiled with lapack wrappers:
+
+ ./configure --enable-lapack2flame ...
+
+ Be aware that libflame 5.1.0 has some missing names in the shared library, so
+ if you have problems, try the static flame library.
+ """
+ section = 'flame'
+ _lib_names = ['flame']
+ notfounderror = FlameNotFoundError
+
+ def check_embedded_lapack(self, info):
+ """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """
+ c = customized_ccompiler()
+
+ tmpdir = tempfile.mkdtemp()
+ s = """void zungqr_();
+ int main(int argc, const char *argv[])
+ {
+ zungqr_();
+ return 0;
+ }"""
+ src = os.path.join(tmpdir, 'source.c')
+ out = os.path.join(tmpdir, 'a.out')
+ # Add the additional "extra" arguments
+ extra_args = info.get('extra_link_args', [])
+ try:
+ with open(src, 'wt') as f:
+ f.write(s)
+ obj = c.compile([src], output_dir=tmpdir)
+ try:
+ c.link_executable(obj, out, libraries=info['libraries'],
+ library_dirs=info['library_dirs'],
+ extra_postargs=extra_args)
+ return True
+ except distutils.ccompiler.LinkError:
+ return False
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ flame_libs = self.get_libs('libraries', self._lib_names)
+
+ info = self.check_libs2(lib_dirs, flame_libs, [])
+ if info is None:
+ return
+
+ if self.check_embedded_lapack(info):
+ # check if the user has supplied all information required
+ self.set_info(**info)
+ else:
+ # Try and get the BLAS lib to see if we can get it to work
+ blas_info = get_info('blas_opt')
+ if not blas_info:
+ # since we already failed once, this ain't going to work either
+ return
+
+ # Now we need to merge the two dictionaries
+ for key in blas_info:
+ if isinstance(blas_info[key], list):
+ info[key] = info.get(key, []) + blas_info[key]
+ elif isinstance(blas_info[key], tuple):
+ info[key] = info.get(key, ()) + blas_info[key]
+ else:
+ info[key] = info.get(key, '') + blas_info[key]
+
+ # Now check again
+ if self.check_embedded_lapack(info):
+ self.set_info(**info)
+
+
class accelerate_info(system_info):
section = 'accelerate'
_lib_names = ['accelerate', 'veclib']
diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py
index 47223151f..110337f92 100755
--- a/numpy/f2py/f2py2e.py
+++ b/numpy/f2py/f2py2e.py
@@ -269,7 +269,8 @@ def scaninputline(inputline):
options["f2py_wrapper_output"] = l
elif f == 1:
try:
- open(l).close()
+ with open(l):
+ pass
files.append(l)
except IOError as detail:
errmess('IOError: %s. Skipping file "%s".\n' %
@@ -333,9 +334,8 @@ def callcrackfortran(files, options):
if options['signsfile'][-6:] == 'stdout':
sys.stdout.write(pyf)
else:
- f = open(options['signsfile'], 'w')
- f.write(pyf)
- f.close()
+ with open(options['signsfile'], 'w') as f:
+ f.write(pyf)
if options["coutput"] is None:
for mod in postlist:
mod["coutput"] = "%smodule.c" % mod["name"]
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 23d36b2c2..6769f1b1f 100644
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -1257,82 +1257,77 @@ def buildmodule(m, um):
fn = os.path.join(options['buildpath'], vrd['coutput'])
ret['csrc'] = fn
- f = open(fn, 'w')
- f.write(ar['modulebody'].replace('\t', 2 * ' '))
- f.close()
+ with open(fn, 'w') as f:
+ f.write(ar['modulebody'].replace('\t', 2 * ' '))
outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn))
if options['dorestdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.rest')
- f = open(fn, 'w')
- f.write('.. -*- rest -*-\n')
- f.write('\n'.join(ar['restdoc']))
- f.close()
+ with open(fn, 'w') as f:
+ f.write('.. -*- rest -*-\n')
+ f.write('\n'.join(ar['restdoc']))
outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' %
(options['buildpath'], vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.tex')
ret['ltx'] = fn
- f = open(fn, 'w')
- f.write(
- '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version))
- if 'shortlatex' not in options:
+ with open(fn, 'w') as f:
f.write(
- '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
- f.write('\n'.join(ar['latexdoc']))
- if 'shortlatex' not in options:
- f.write('\\end{document}')
- f.close()
+ '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version))
+ if 'shortlatex' not in options:
+ f.write(
+ '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
+ f.write('\n'.join(ar['latexdoc']))
+ if 'shortlatex' not in options:
+ f.write('\\end{document}')
outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' %
(options['buildpath'], vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output'])
ret['fsrc'] = wn
- f = open(wn, 'w')
- f.write('C -*- fortran -*-\n')
- f.write(
- 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
- f.write(
- 'C It contains Fortran 77 wrappers to fortran functions.\n')
- lines = []
- for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'):
- if l and l[0] == ' ':
- while len(l) >= 66:
- lines.append(l[:66] + '\n &')
- l = l[66:]
- lines.append(l + '\n')
- else:
- lines.append(l + '\n')
- lines = ''.join(lines).replace('\n &\n', '\n')
- f.write(lines)
- f.close()
+ with open(wn, 'w') as f:
+ f.write('C -*- fortran -*-\n')
+ f.write(
+ 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
+ f.write(
+ 'C It contains Fortran 77 wrappers to fortran functions.\n')
+ lines = []
+ for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'):
+ if l and l[0] == ' ':
+ while len(l) >= 66:
+ lines.append(l[:66] + '\n &')
+ l = l[66:]
+ lines.append(l + '\n')
+ else:
+ lines.append(l + '\n')
+ lines = ''.join(lines).replace('\n &\n', '\n')
+ f.write(lines)
outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn))
if funcwrappers2:
wn = os.path.join(
options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename']))
ret['fsrc'] = wn
- f = open(wn, 'w')
- f.write('! -*- f90 -*-\n')
- f.write(
- '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
- f.write(
- '! It contains Fortran 90 wrappers to fortran functions.\n')
- lines = []
- for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'):
- if len(l) > 72 and l[0] == ' ':
- lines.append(l[:72] + '&\n &')
- l = l[72:]
- while len(l) > 66:
- lines.append(l[:66] + '&\n &')
- l = l[66:]
- lines.append(l + '\n')
- else:
- lines.append(l + '\n')
- lines = ''.join(lines).replace('\n &\n', '\n')
- f.write(lines)
- f.close()
+ with open(wn, 'w') as f:
+ f.write('! -*- f90 -*-\n')
+ f.write(
+ '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
+ f.write(
+ '! It contains Fortran 90 wrappers to fortran functions.\n')
+ lines = []
+ for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'):
+ if len(l) > 72 and l[0] == ' ':
+ lines.append(l[:72] + '&\n &')
+ l = l[72:]
+ while len(l) > 66:
+ lines.append(l[:66] + '&\n &')
+ l = l[66:]
+ lines.append(l + '\n')
+ else:
+ lines.append(l + '\n')
+ lines = ''.join(lines).replace('\n &\n', '\n')
+ f.write(lines)
outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn))
return ret
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 553c9371d..cd8700051 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -146,6 +146,13 @@ The description of the fourth element of the header therefore has become:
"The next 4 bytes form a little-endian unsigned int: the length of the header
data HEADER_LEN."
+Format Version 3.0
+------------------
+
+This version replaces the ASCII string (which in practice was latin1) with
+a utf8-encoded string, so supports structured types with any unicode field
+names.
+
Notes
-----
The ``.npy`` format, including motivation for creating it and a comparison of
@@ -162,7 +169,7 @@ import io
import warnings
from numpy.lib.utils import safe_eval
from numpy.compat import (
- asbytes, asstr, isfileobj, long, os_fspath, pickle
+ isfileobj, long, os_fspath, pickle
)
@@ -173,10 +180,16 @@ BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
# instead of 2 bytes (H) allowing storage of large structured arrays
+_header_size_info = {
+ (1, 0): ('<H', 'latin1'),
+ (2, 0): ('<I', 'latin1'),
+ (3, 0): ('<I', 'utf8'),
+}
+
def _check_version(version):
- if version not in [(1, 0), (2, 0), None]:
- msg = "we only support format version (1,0) and (2, 0), not %s"
+ if version not in [(1, 0), (2, 0), (3, 0), None]:
+ msg = "we only support format version (1,0), (2,0), and (3,0), not %s"
raise ValueError(msg % (version,))
def magic(major, minor):
@@ -261,15 +274,19 @@ def dtype_to_descr(dtype):
def descr_to_dtype(descr):
'''
descr may be stored as dtype.descr, which is a list of
- (name, format, [shape]) tuples. Offsets are not explicitly saved, rather
- empty fields with name,format == '', '|Vn' are added as padding.
+ (name, format, [shape]) tuples where format may be a str or a tuple.
+ Offsets are not explicitly saved, rather empty fields with
+ name, format == '', '|Vn' are added as padding.
This function reverses the process, eliminating the empty padding fields.
'''
- if isinstance(descr, (str, dict)):
+ if isinstance(descr, str):
# No padding removal needed
return numpy.dtype(descr)
-
+ elif isinstance(descr, tuple):
+ # subtype, will always have a shape descr[1]
+ dt = descr_to_dtype(descr[0])
+ return numpy.dtype((dt, descr[1]))
fields = []
offset = 0
for field in descr:
@@ -322,6 +339,56 @@ def header_data_from_array_1_0(array):
d['descr'] = dtype_to_descr(array.dtype)
return d
+
+def _wrap_header(header, version):
+ """
+ Takes a stringified header, and attaches the prefix and padding to it
+ """
+ import struct
+ assert version is not None
+ fmt, encoding = _header_size_info[version]
+ if not isinstance(header, bytes): # always true on python 3
+ header = header.encode(encoding)
+ hlen = len(header) + 1
+ padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
+ try:
+ header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
+ except struct.error:
+ msg = "Header length {} too big for version={}".format(hlen, version)
+ raise ValueError(msg)
+
+ # Pad the header with spaces and a final newline such that the magic
+ # string, the header-length short and the header are aligned on a
+ # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
+ # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
+ # offset must be page-aligned (i.e. the beginning of the file).
+ return header_prefix + header + b' '*padlen + b'\n'
+
+
+def _wrap_header_guess_version(header):
+ """
+ Like `_wrap_header`, but chooses an appropriate version given the contents
+ """
+ try:
+ return _wrap_header(header, (1, 0))
+ except ValueError:
+ pass
+
+ try:
+ ret = _wrap_header(header, (2, 0))
+ except UnicodeEncodeError:
+ pass
+ else:
+ warnings.warn("Stored array in format 2.0. It can only be"
+ "read by NumPy >= 1.9", UserWarning, stacklevel=2)
+ return ret
+
+ header = _wrap_header(header, (3, 0))
+ warnings.warn("Stored array in format 3.0. It can only be"
+ "read by NumPy >= 1.17", UserWarning, stacklevel=2)
+ return header
+
+
def _write_array_header(fp, d, version=None):
""" Write the header for an array and returns the version used
@@ -335,48 +402,19 @@ def _write_array_header(fp, d, version=None):
None means use oldest that works
explicit version will raise a ValueError if the format does not
allow saving this data. Default: None
- Returns
- -------
- version : tuple of int
- the file version which needs to be used to store the data
"""
- import struct
header = ["{"]
for key, value in sorted(d.items()):
# Need to use repr here, since we eval these when reading
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
- header = asbytes(_filter_header(header))
-
- hlen = len(header) + 1 # 1 for newline
- padlen_v1 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<H') + hlen) % ARRAY_ALIGN)
- padlen_v2 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<I') + hlen) % ARRAY_ALIGN)
-
- # Which version(s) we write depends on the total header size; v1 has a max of 65535
- if hlen + padlen_v1 < 2**16 and version in (None, (1, 0)):
- version = (1, 0)
- header_prefix = magic(1, 0) + struct.pack('<H', hlen + padlen_v1)
- topad = padlen_v1
- elif hlen + padlen_v2 < 2**32 and version in (None, (2, 0)):
- version = (2, 0)
- header_prefix = magic(2, 0) + struct.pack('<I', hlen + padlen_v2)
- topad = padlen_v2
+ header = _filter_header(header)
+ if version is None:
+ header = _wrap_header_guess_version(header)
else:
- msg = "Header length %s too big for version=%s"
- msg %= (hlen, version)
- raise ValueError(msg)
-
- # Pad the header with spaces and a final newline such that the magic
- # string, the header-length short and the header are aligned on a
- # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
- # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
- # offset must be page-aligned (i.e. the beginning of the file).
- header = header + b' '*topad + b'\n'
-
- fp.write(header_prefix)
+ header = _wrap_header(header, version)
fp.write(header)
- return version
def write_array_header_1_0(fp, d):
""" Write the header for an array using the 1.0 format.
@@ -479,7 +517,7 @@ def _filter_header(s):
Parameters
----------
- s : byte string
+ s : string
Npy file header.
Returns
@@ -497,7 +535,7 @@ def _filter_header(s):
tokens = []
last_token_was_number = False
# adding newline as python 2.7.5 workaround
- string = asstr(s) + "\n"
+ string = s + "\n"
for token in tokenize.generate_tokens(StringIO(string).readline):
token_type = token[0]
token_string = token[1]
@@ -519,16 +557,15 @@ def _read_array_header(fp, version):
# Read an unsigned, little-endian short int which has the length of the
# header.
import struct
- if version == (1, 0):
- hlength_type = '<H'
- elif version == (2, 0):
- hlength_type = '<I'
- else:
+ hinfo = _header_size_info.get(version)
+ if hinfo is None:
raise ValueError("Invalid version {!r}".format(version))
+ hlength_type, encoding = hinfo
hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
header_length = struct.unpack(hlength_type, hlength_str)[0]
header = _read_bytes(fp, header_length, "array header")
+ header = header.decode(encoding)
# The header is a pretty-printed string representation of a literal
# Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
@@ -603,12 +640,7 @@ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
"""
_check_version(version)
- used_ver = _write_array_header(fp, header_data_from_array_1_0(array),
- version)
- # this warning can be removed when 1.9 has aged enough
- if version != (2, 0) and used_ver == (2, 0):
- warnings.warn("Stored array in format 2.0. It can only be"
- "read by NumPy >= 1.9", UserWarning, stacklevel=2)
+ _write_array_header(fp, header_data_from_array_1_0(array), version)
if array.itemsize == 0:
buffersize = 0
@@ -811,11 +843,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
# If we got here, then it should be safe to create the file.
fp = open(os_fspath(filename), mode+'b')
try:
- used_ver = _write_array_header(fp, d, version)
- # this warning can be removed when 1.9 has aged enough
- if version != (2, 0) and used_ver == (2, 0):
- warnings.warn("Stored array in format 2.0. It can only be"
- "read by NumPy >= 1.9", UserWarning, stacklevel=2)
+ _write_array_header(fp, d, version)
offset = fp.tell()
finally:
fp.close()
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 7fa51d683..2a6d39abc 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -682,7 +682,7 @@ def select(condlist, choicelist, default=0):
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
@@ -717,7 +717,7 @@ def select(condlist, choicelist, default=0):
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
@@ -2443,7 +2443,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice",
- RuntimeWarning, stacklevel=2)
+ RuntimeWarning, stacklevel=3)
fact = 0.0
X -= avg[:, None]
@@ -2522,7 +2522,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue):
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
c = cov(x, y, rowvar)
try:
d = diag(c)
@@ -4304,7 +4304,7 @@ def delete(arr, obj, axis=None):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
- "from delete and raise an error", DeprecationWarning, stacklevel=2)
+ "from delete and raise an error", DeprecationWarning, stacklevel=3)
if wrap:
return wrap(arr)
else:
@@ -4373,7 +4373,7 @@ def delete(arr, obj, axis=None):
if obj.dtype == bool:
warnings.warn("in the future insert will treat boolean arrays and "
"array-likes as boolean index instead of casting it "
- "to integer", FutureWarning, stacklevel=2)
+ "to integer", FutureWarning, stacklevel=3)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
@@ -4401,7 +4401,7 @@ def delete(arr, obj, axis=None):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
- "error in the future", DeprecationWarning, stacklevel=2)
+ "error in the future", DeprecationWarning, stacklevel=3)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
@@ -4412,13 +4412,13 @@ def delete(arr, obj, axis=None):
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
- "`numpy.delete`.", FutureWarning, stacklevel=2)
+ "`numpy.delete`.", FutureWarning, stacklevel=3)
obj = obj[positive_indices]
keep[obj, ] = False
@@ -4543,7 +4543,7 @@ def insert(arr, obj, values, axis=None):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
- "from insert and raise an error", DeprecationWarning, stacklevel=2)
+ "from insert and raise an error", DeprecationWarning, stacklevel=3)
arr = arr.copy(order=arrorder)
arr[...] = values
if wrap:
@@ -4567,7 +4567,7 @@ def insert(arr, obj, values, axis=None):
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
- "integer", FutureWarning, stacklevel=2)
+ "integer", FutureWarning, stacklevel=3)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
@@ -4617,7 +4617,7 @@ def insert(arr, obj, values, axis=None):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
- "error in the future", DeprecationWarning, stacklevel=2)
+ "error in the future", DeprecationWarning, stacklevel=3)
indices = indices.astype(intp)
indices[indices < 0] += N
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index ee9a3053c..5bcedc7f4 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -148,7 +148,8 @@ def _hist_bin_stone(x, range):
nbins_upper_bound = max(100, int(np.sqrt(n)))
nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
if nbins == nbins_upper_bound:
- warnings.warn("The number of bins estimated may be suboptimal.", RuntimeWarning, stacklevel=2)
+ warnings.warn("The number of bins estimated may be suboptimal.",
+ RuntimeWarning, stacklevel=3)
return ptp_x / nbins
@@ -279,7 +280,7 @@ def _ravel_and_check_weights(a, weights):
if a.dtype == np.bool_:
warnings.warn("Converting input from {} to {} for compatibility."
.format(a.dtype, np.uint8),
- RuntimeWarning, stacklevel=2)
+ RuntimeWarning, stacklevel=3)
a = a.astype(np.uint8)
if weights is not None:
@@ -888,7 +889,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
warnings.warn(
"The normed argument is ignored when density is provided. "
"In future passing both will result in an error.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
normed = None
if density:
@@ -904,7 +905,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
"density=True will produce the same result anyway. "
"The argument will be removed in a future version of "
"numpy.",
- np.VisibleDeprecationWarning, stacklevel=2)
+ np.VisibleDeprecationWarning, stacklevel=3)
# this normalization is incorrect, but
db = np.array(np.diff(bin_edges), float)
@@ -915,7 +916,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
warnings.warn(
"Passing normed=False is deprecated, and has no effect. "
"Consider passing the density argument instead.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
return n, bin_edges
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 77c851fcf..f497aca63 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -165,7 +165,8 @@ def _remove_nan_1d(arr1d, overwrite_input=False):
c = np.isnan(arr1d)
s = np.nonzero(c)[0]
if s.size == arr1d.size:
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=4)
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=5)
return arr1d[:0], True
elif s.size == 0:
return arr1d, overwrite_input
@@ -318,7 +319,8 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
# which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=3)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
@@ -330,7 +332,8 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
- warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
+ stacklevel=3)
return res
@@ -431,7 +434,8 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
# which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=3)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
@@ -443,7 +447,8 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
- warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
+ stacklevel=3)
return res
@@ -947,7 +952,7 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
isbad = (cnt == 0)
if isbad.any():
- warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
+ warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=3)
# NaN is the only possible bad value, so no further
# action is needed to handle bad results.
return avg
@@ -959,7 +964,7 @@ def _nanmedian1d(arr1d, overwrite_input=False):
See nanmedian for parameter usage
"""
arr1d, overwrite_input = _remove_nan_1d(arr1d,
- overwrite_input=overwrite_input)
+ overwrite_input=overwrite_input)
if arr1d.size == 0:
return np.nan
@@ -1002,7 +1007,8 @@ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
a = np.ma.masked_array(a, np.isnan(a))
m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
for i in range(np.count_nonzero(m.mask.ravel())):
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=4)
if out is not None:
out[...] = m.filled(np.nan)
return out
@@ -1547,7 +1553,8 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
isbad = (dof <= 0)
if np.any(isbad):
- warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, stacklevel=2)
+ warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning,
+ stacklevel=3)
# NaN, inf, or negative numbers are all possible bad
# values, so explicitly replace them with NaN.
var = _copyto(var, np.nan, isbad)
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 0a284d78f..1845305d1 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -1549,7 +1549,8 @@ def fromregex(file, regexp, dtype, encoding=None):
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
- names=None, excludelist=None, deletechars=None,
+ names=None, excludelist=None,
+ deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None, encoding='bytes'):
@@ -1716,6 +1717,16 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
array((1, 1.3, b'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
+ An example to show comments
+
+ >>> f = StringIO('''
+ ... text,# of chars
+ ... hello world,11
+ ... numpy,5''')
+ >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
+ array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
+ dtype=[('f0', 'S12'), ('f1', 'S12')])
+
"""
if max_rows is not None:
if skip_footer:
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 1f08abf36..2c72f623c 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -634,7 +634,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
- warnings.warn(msg, RankWarning, stacklevel=3)
+ warnings.warn(msg, RankWarning, stacklevel=4)
if full:
return c, resids, rank, s, rcond
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index ccbcfad91..08a9cf09c 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -976,7 +976,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
# next cast to a packed format with all fields converted to new dtype
packed_fields = np.dtype({'names': names,
- 'formats': [(out_dtype, c) for c in counts]})
+ 'formats': [(out_dtype, dt.shape) for dt in dts]})
arr = arr.astype(packed_fields, copy=copy, casting=casting)
# finally is it safe to view the packed fields as the unstructured type
@@ -1069,7 +1069,7 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
# first view as a packed structured array of one dtype
packed_fields = np.dtype({'names': names,
- 'formats': [(arr.dtype, c) for c in counts]})
+ 'formats': [(arr.dtype, dt.shape) for dt in dts]})
arr = np.ascontiguousarray(arr).view(packed_fields)
# next cast to an unpacked but flattened format with varied dtypes
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index ac2a25604..f2517b474 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -94,7 +94,7 @@ def take_along_axis(arr, indices, axis):
Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
J = indices.shape[axis] # Need not equal M
- out = np.empty(Nk + (J,) + Nk)
+ out = np.empty(Ni + (J,) + Nk)
for ii in ndindex(Ni):
for kk in ndindex(Nk):
@@ -532,8 +532,7 @@ def expand_dims(a, axis):
Returns
-------
res : ndarray
- Output array. The number of dimensions is one greater than that of
- the input array.
+ View of `a` with the number of dimensions increased by one.
See Also
--------
@@ -579,7 +578,7 @@ def expand_dims(a, axis):
# 2017-05-17, 1.13.0
warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are "
"deprecated and will raise an AxisError in the future.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
# When the deprecation period expires, delete this if block,
if axis < 0:
axis = axis + a.ndim + 1
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 0dc36e41c..fd401c57c 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -186,8 +186,6 @@ def _broadcast_shape(*args):
"""Returns the shape of the arrays that would result from broadcasting the
supplied arrays against each other.
"""
- if not args:
- return ()
# use the old-iterator because np.nditer does not handle size 0 arrays
# consistently
b = np.broadcast(*args[:32])
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 2ebd483d5..062c21725 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -287,6 +287,7 @@ from io import BytesIO
import numpy as np
from numpy.testing import (
assert_, assert_array_equal, assert_raises, assert_raises_regex,
+ assert_warns
)
from numpy.lib import format
@@ -411,6 +412,7 @@ record_arrays = [
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
+ np.zeros(1, dtype=[('c', ('<f8', (5,)), (2,))])
]
@@ -628,6 +630,61 @@ def test_pickle_disallow():
assert_raises(ValueError, np.save, path, np.array([None], dtype=object),
allow_pickle=False)
+@pytest.mark.parametrize('dt', [
+ np.dtype(np.dtype([('a', np.int8),
+ ('b', np.int16),
+ ('c', np.int32),
+ ], align=True),
+ (3,)),
+ np.dtype([('x', np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8,
+ },
+ (3,)),
+ (4,),
+ )]),
+ np.dtype([('x',
+ ('<f8', (5,)),
+ (2,),
+ )]),
+ np.dtype([('x', np.dtype((
+ np.dtype((
+ np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8}),
+ (3,)
+ )),
+ (4,)
+ )))
+ ]),
+ np.dtype([
+ ('a', np.dtype((
+ np.dtype((
+ np.dtype((
+ np.dtype([
+ ('a', int),
+ ('b', np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8})),
+ ]),
+ (3,),
+ )),
+ (4,),
+ )),
+ (5,),
+ )))
+ ]),
+ ])
+
+def test_descr_to_dtype(dt):
+ dt1 = format.descr_to_dtype(dt.descr)
+ assert_equal_(dt1, dt)
+ arr1 = np.zeros(3, dt)
+ arr2 = roundtrip(arr1)
+ assert_array_equal(arr1, arr2)
def test_version_2_0():
f = BytesIO()
@@ -882,3 +939,27 @@ def test_empty_npz():
fname = os.path.join(tempdir, "nothing.npz")
np.savez(fname)
np.load(fname)
+
+
+def test_unicode_field_names():
+ # gh-7391
+ arr = np.array([
+ (1, 3),
+ (1, 2),
+ (1, 3),
+ (1, 2)
+ ], dtype=[
+ ('int', int),
+ (u'\N{CJK UNIFIED IDEOGRAPH-6574}\N{CJK UNIFIED IDEOGRAPH-5F62}', int)
+ ])
+ fname = os.path.join(tempdir, "unicode.npy")
+ with open(fname, 'wb') as f:
+ format.write_array(f, arr, version=(3, 0))
+ with open(fname, 'rb') as f:
+ arr2 = format.read_array(f)
+ assert_array_equal(arr, arr2)
+
+ # notifies the user that 3.0 is selected
+ with open(fname, 'wb') as f:
+ with assert_warns(UserWarning):
+ format.write_array(f, arr, version=None)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index a5ae2eeed..a3d4c6efb 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -948,7 +948,7 @@ class TestGradient(object):
assert_equal(type(out), type(x))
# And make sure that the output and input don't have aliased mask
# arrays
- assert_(x.mask is not out.mask)
+ assert_(x._mask is not out._mask)
# Also check that edge_order=2 doesn't alter the original mask
x2 = np.ma.arange(5)
x2[2] = np.ma.masked
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index c96b01d42..afaa526af 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -554,15 +554,11 @@ class TestHistogramOptimBinNums(object):
return a / (a + b)
ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]
- for seed in range(256)]
+ for seed in range(10)]
# the average difference between the two methods decreases as the dataset size increases.
- assert_almost_equal(abs(np.mean(ll, axis=0) - 0.5),
- [0.1065248,
- 0.0968844,
- 0.0331818,
- 0.0178057],
- decimal=3)
+ avg = abs(np.mean(ll, axis=0) - 0.5)
+ assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2)
def test_simple_range(self):
"""
diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py
index 00d5ca827..95a465c36 100644
--- a/numpy/lib/tests/test_packbits.py
+++ b/numpy/lib/tests/test_packbits.py
@@ -2,7 +2,8 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_equal, assert_raises
-
+import pytest
+from itertools import chain
def test_packbits():
# Copied from the docstring.
@@ -50,8 +51,8 @@ def test_packbits_empty_with_axis():
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, out_shape)
-
-def test_packbits_large():
+@pytest.mark.parametrize('bitorder', ('little', 'big'))
+def test_packbits_large(bitorder):
# test data large enough for 16 byte vectorization
a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,
0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,
@@ -71,7 +72,7 @@ def test_packbits_large():
a = a.repeat(3)
for dtype in '?bBhHiIlLqQ':
arr = np.array(a, dtype=dtype)
- b = np.packbits(arr, axis=None)
+ b = np.packbits(arr, axis=None, bitorder=bitorder)
assert_equal(b.dtype, np.uint8)
r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252,
113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255,
@@ -81,9 +82,10 @@ def test_packbits_large():
255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15,
199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227,
129, 248, 227, 129, 199, 31, 128]
- assert_array_equal(b, r)
+ if bitorder == 'big':
+ assert_array_equal(b, r)
# equal for size being multiple of 8
- assert_array_equal(np.unpackbits(b)[:-4], a)
+ assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a)
# check last byte of different remainders (16 byte vectorization)
b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)]
@@ -229,6 +231,20 @@ def test_unpackbits():
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]]))
+def test_pack_unpack_order():
+ a = np.array([[2], [7], [23]], dtype=np.uint8)
+ b = np.unpackbits(a, axis=1)
+ assert_equal(b.dtype, np.uint8)
+ b_little = np.unpackbits(a, axis=1, bitorder='little')
+ b_big = np.unpackbits(a, axis=1, bitorder='big')
+ assert_array_equal(b, b_big)
+ assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little'))
+ assert_array_equal(b[:,::-1], b_little)
+ assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big'))
+ assert_raises(ValueError, np.unpackbits, a, bitorder='r')
+ assert_raises(TypeError, np.unpackbits, a, bitorder=10)
+
+
def test_unpackbits_empty():
a = np.empty((0,), dtype=np.uint8)
@@ -268,8 +284,7 @@ def test_unpackbits_large():
assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
-def test_unpackbits_count():
- # test complete invertibility of packbits and unpackbits with count
+class TestCount():
x = np.array([
[1, 0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 0, 0, 0],
@@ -279,53 +294,85 @@ def test_unpackbits_count():
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0],
], dtype=np.uint8)
-
padded1 = np.zeros(57, dtype=np.uint8)
padded1[:49] = x.ravel()
+ padded1b = np.zeros(57, dtype=np.uint8)
+ padded1b[:49] = x[::-1].copy().ravel()
+ padded2 = np.zeros((9, 9), dtype=np.uint8)
+ padded2[:7, :7] = x
- packed = np.packbits(x)
- for count in range(58):
- unpacked = np.unpackbits(packed, count=count)
+ @pytest.mark.parametrize('bitorder', ('little', 'big'))
+ @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1)))
+ def test_roundtrip(self, bitorder, count):
+ if count < 0:
+ # one extra zero of padding
+ cutoff = count - 1
+ else:
+ cutoff = count
+ # test complete invertibility of packbits and unpackbits with count
+ packed = np.packbits(self.x, bitorder=bitorder)
+ unpacked = np.unpackbits(packed, count=count, bitorder=bitorder)
assert_equal(unpacked.dtype, np.uint8)
- assert_array_equal(unpacked, padded1[:count])
- for count in range(-1, -57, -1):
- unpacked = np.unpackbits(packed, count=count)
- assert_equal(unpacked.dtype, np.uint8)
- # count -1 because padded1 has 57 instead of 56 elements
- assert_array_equal(unpacked, padded1[:count-1])
- for kwargs in [{}, {'count': None}]:
+ assert_array_equal(unpacked, self.padded1[:cutoff])
+
+ @pytest.mark.parametrize('kwargs', [
+ {}, {'count': None},
+ ])
+ def test_count(self, kwargs):
+ packed = np.packbits(self.x)
unpacked = np.unpackbits(packed, **kwargs)
assert_equal(unpacked.dtype, np.uint8)
- assert_array_equal(unpacked, padded1[:-1])
- assert_raises(ValueError, np.unpackbits, packed, count=-57)
-
- padded2 = np.zeros((9, 9), dtype=np.uint8)
- padded2[:7, :7] = x
-
- packed0 = np.packbits(x, axis=0)
- packed1 = np.packbits(x, axis=1)
- for count in range(10):
- unpacked0 = np.unpackbits(packed0, axis=0, count=count)
+ assert_array_equal(unpacked, self.padded1[:-1])
+
+ @pytest.mark.parametrize('bitorder', ('little', 'big'))
+ # delta==-1 when count<0 because one extra zero of padding
+ @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1)))
+ def test_roundtrip_axis(self, bitorder, count):
+ if count < 0:
+ # one extra zero of padding
+ cutoff = count - 1
+ else:
+ cutoff = count
+ packed0 = np.packbits(self.x, axis=0, bitorder=bitorder)
+ unpacked0 = np.unpackbits(packed0, axis=0, count=count,
+ bitorder=bitorder)
assert_equal(unpacked0.dtype, np.uint8)
- assert_array_equal(unpacked0, padded2[:count, :x.shape[1]])
- unpacked1 = np.unpackbits(packed1, axis=1, count=count)
- assert_equal(unpacked1.dtype, np.uint8)
- assert_array_equal(unpacked1, padded2[:x.shape[1], :count])
- for count in range(-1, -9, -1):
- unpacked0 = np.unpackbits(packed0, axis=0, count=count)
- assert_equal(unpacked0.dtype, np.uint8)
- # count -1 because one extra zero of padding
- assert_array_equal(unpacked0, padded2[:count-1, :x.shape[1]])
- unpacked1 = np.unpackbits(packed1, axis=1, count=count)
+ assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]])
+
+ packed1 = np.packbits(self.x, axis=1, bitorder=bitorder)
+ unpacked1 = np.unpackbits(packed1, axis=1, count=count,
+ bitorder=bitorder)
assert_equal(unpacked1.dtype, np.uint8)
- assert_array_equal(unpacked1, padded2[:x.shape[0], :count-1])
- for kwargs in [{}, {'count': None}]:
+ assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff])
+
+ @pytest.mark.parametrize('kwargs', [
+ {}, {'count': None},
+ {'bitorder' : 'little'},
+ {'bitorder': 'little', 'count': None},
+ {'bitorder' : 'big'},
+ {'bitorder': 'big', 'count': None},
+ ])
+ def test_axis_count(self, kwargs):
+ packed0 = np.packbits(self.x, axis=0)
unpacked0 = np.unpackbits(packed0, axis=0, **kwargs)
assert_equal(unpacked0.dtype, np.uint8)
- assert_array_equal(unpacked0, padded2[:-1, :x.shape[1]])
+ if kwargs.get('bitorder', 'big') == 'big':
+ assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]])
+ else:
+ assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]])
+
+ packed1 = np.packbits(self.x, axis=1)
unpacked1 = np.unpackbits(packed1, axis=1, **kwargs)
assert_equal(unpacked1.dtype, np.uint8)
- assert_array_equal(unpacked1, padded2[:x.shape[0], :-1])
- assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)
- assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
-
+ if kwargs.get('bitorder', 'big') == 'big':
+ assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1])
+ else:
+ assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1])
+
+ def test_bad_count(self):
+ packed0 = np.packbits(self.x, axis=0)
+ assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)
+ packed1 = np.packbits(self.x, axis=1)
+ assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
+ packed = np.packbits(self.x)
+ assert_raises(ValueError, np.unpackbits, packed, count=-57)
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index 112678294..f713fb64d 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -243,6 +243,15 @@ class TestRecFunctions(object):
assert_(dd.base is d)
assert_(ddd.base is d)
+ # including uniform fields with subarrays unpacked
+ d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
+ (8, [9, 10], [[11, 12], [13, 14]])],
+ dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))])
+ dd = structured_to_unstructured(d)
+ ddd = unstructured_to_structured(dd, d.dtype)
+ assert_(dd.base is d)
+ assert_(ddd.base is d)
+
# test that nested fields with identical names don't break anything
point = np.dtype([('x', int), ('y', int)])
triangle = np.dtype([('a', point), ('b', point), ('c', point)])
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 2b254b6c0..ac4b03a6c 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -541,6 +541,9 @@ def real_if_close(a, tol=100):
def _asscalar_dispatcher(a):
+ # 2018-10-10, 1.16
+ warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use '
+ 'a.item() instead', DeprecationWarning, stacklevel=3)
return (a,)
@@ -569,10 +572,6 @@ def asscalar(a):
>>> np.asscalar(np.array([24]))
24
"""
-
- # 2018-10-10, 1.16
- warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use '
- 'a.item() instead', DeprecationWarning, stacklevel=1)
return a.item()
#-----------------------------------------------------------------------------
diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py
index e72a39e64..434586113 100644
--- a/numpy/linalg/lapack_lite/clapack_scrub.py
+++ b/numpy/linalg/lapack_lite/clapack_scrub.py
@@ -294,9 +294,8 @@ def scrubSource(source, nsteps=None, verbose=False):
if __name__ == '__main__':
filename = sys.argv[1]
outfilename = os.path.join(sys.argv[2], os.path.basename(filename))
- fo = open(filename, 'r')
- source = fo.read()
- fo.close()
+ with open(filename, 'r') as fo:
+ source = fo.read()
if len(sys.argv) > 3:
nsteps = int(sys.argv[3])
diff --git a/numpy/linalg/lapack_lite/fortran.py b/numpy/linalg/lapack_lite/fortran.py
index 3b6ac70f0..87c27aab9 100644
--- a/numpy/linalg/lapack_lite/fortran.py
+++ b/numpy/linalg/lapack_lite/fortran.py
@@ -110,15 +110,14 @@ def getDependencies(filename):
"""For a Fortran source file, return a list of routines declared as EXTERNAL
in it.
"""
- fo = open(filename)
external_pat = re.compile(r'^\s*EXTERNAL\s', re.I)
routines = []
- for lineno, line in fortranSourceLines(fo):
- m = external_pat.match(line)
- if m:
- names = line = line[m.end():].strip().split(',')
- names = [n.strip().lower() for n in names]
- names = [n for n in names if n]
- routines.extend(names)
- fo.close()
+ with open(filename) as fo:
+ for lineno, line in fortranSourceLines(fo):
+ m = external_pat.match(line)
+ if m:
+ names = line = line[m.end():].strip().split(',')
+ names = [n.strip().lower() for n in names]
+ names = [n for n in names if n]
+ routines.extend(names)
return routines
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 29da77655..27ec62403 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -890,12 +890,12 @@ def qr(a, mode='reduced'):
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
@@ -1900,9 +1900,9 @@ def pinv(a, rcond=1e-15, hermitian=False):
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
- Singular values smaller (in modulus) than
- `rcond` * largest_singular_value (again, in modulus)
- are set to zero. Broadcasts against the stack of matrices
+ Singular values less than or equal to
+ ``rcond * largest_singular_value`` are set to zero.
+ Broadcasts against the stack of matrices.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
@@ -2245,7 +2245,7 @@ def lstsq(a, b, rcond="warn"):
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
- FutureWarning, stacklevel=2)
+ FutureWarning, stacklevel=3)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * max(n, m)
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 90aff6ec8..93eb4d87a 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -3449,7 +3449,9 @@ class MaskedArray(ndarray):
# We could try to force a reshape, but that wouldn't work in some
# cases.
- return self._mask
+ # Return a view so that the dtype and shape cannot be changed in place
+ # This still preserves nomask by identity
+ return self._mask.view()
@mask.setter
def mask(self, value):
@@ -5354,7 +5356,7 @@ class MaskedArray(ndarray):
out.__setmask__(self._mask)
return out
- def argsort(self, axis=np._NoValue, kind='quicksort', order=None,
+ def argsort(self, axis=np._NoValue, kind=None, order=None,
endwith=True, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
@@ -5374,7 +5376,7 @@ class MaskedArray(ndarray):
Until then, the axis should be given explicitly when
``arr.ndim > 1``, to avoid a FutureWarning.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
- Sorting algorithm.
+ The sorting algorithm used.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
@@ -5516,7 +5518,7 @@ class MaskedArray(ndarray):
d = self.filled(fill_value).view(ndarray)
return d.argmax(axis, out=out)
- def sort(self, axis=-1, kind='quicksort', order=None,
+ def sort(self, axis=-1, kind=None, order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
@@ -5529,7 +5531,7 @@ class MaskedArray(ndarray):
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
- Sorting algorithm. Default is 'quicksort'.
+ The sorting algorithm used.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
@@ -5537,7 +5539,7 @@ class MaskedArray(ndarray):
endwith : {True, False}, optional
Whether missing values (if any) should be treated as the largest values
(True) or the smallest values (False)
- When the array contains unmasked values at the same extremes of the
+ When the array contains unmasked values sorting at the same extremes of the
datatype, the ordering of these values and the masked values is
undefined.
fill_value : {var}, optional
@@ -5935,7 +5937,7 @@ class MaskedArray(ndarray):
returns bytes not strings.
"""
- return self.tobytes(fill_value, order='C')
+ return self.tobytes(fill_value, order=order)
def tobytes(self, fill_value=None, order='C'):
"""
@@ -6734,7 +6736,7 @@ def power(a, b, third=None):
argmin = _frommethod('argmin')
argmax = _frommethod('argmax')
-def argsort(a, axis=np._NoValue, kind='quicksort', order=None, endwith=True, fill_value=None):
+def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = np.asanyarray(a)
@@ -6749,7 +6751,7 @@ def argsort(a, axis=np._NoValue, kind='quicksort', order=None, endwith=True, fil
return a.argsort(axis=axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
-def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None):
+def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = np.array(a, copy=True, subok=True)
if axis is None:
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 1f80ba26d..fb3f1a810 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -374,12 +374,12 @@ class TestMaskedArray(object):
y2a = array(x1, mask=m, copy=1)
assert_(y2a._data.__array_interface__ != x1.__array_interface__)
- #assert_( y2a.mask is not m)
+ #assert_( y2a._mask is not m)
assert_(y2a._mask.__array_interface__ != m.__array_interface__)
assert_(y2a[2] is masked)
y2a[2] = 9
assert_(y2a[2] is not masked)
- #assert_( y2a.mask is not m)
+ #assert_( y2a._mask is not m)
assert_(y2a._mask.__array_interface__ != m.__array_interface__)
assert_(allequal(y2a.mask, 0))
@@ -5203,3 +5203,10 @@ def test_fieldless_void():
mx = np.ma.array(x, mask=x)
assert_equal(mx.dtype, x.dtype)
assert_equal(mx.shape, x.shape)
+
+
+def test_mask_shape_assignment_does_not_break_masked():
+ a = np.ma.masked
+ b = np.ma.array(1, mask=a.mask)
+ b.shape = (1,)
+ assert_equal(a.mask.shape, ())
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index 07bfb613f..1c523768d 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -270,7 +270,7 @@ class TestMa(object):
y1 = array(x1, mask=m)
assert_(y1._data is not x1)
assert_(allequal(x1, y1._data))
- assert_(y1.mask is m)
+ assert_(y1._mask is m)
y1a = array(y1, copy=0)
# For copy=False, one might expect that the array would just
@@ -280,19 +280,19 @@ class TestMa(object):
y1._mask.__array_interface__)
y2 = array(x1, mask=m3, copy=0)
- assert_(y2.mask is m3)
+ assert_(y2._mask is m3)
assert_(y2[2] is masked)
y2[2] = 9
assert_(y2[2] is not masked)
- assert_(y2.mask is m3)
+ assert_(y2._mask is m3)
assert_(allequal(y2.mask, 0))
y2a = array(x1, mask=m, copy=1)
- assert_(y2a.mask is not m)
+ assert_(y2a._mask is not m)
assert_(y2a[2] is masked)
y2a[2] = 9
assert_(y2a[2] is not masked)
- assert_(y2a.mask is not m)
+ assert_(y2a._mask is not m)
assert_(allequal(y2a.mask, 0))
y3 = array(x1 * 1.0, mask=m)
@@ -318,14 +318,14 @@ class TestMa(object):
assert_(x[3] is masked)
assert_(x[4] is masked)
x[[1, 4]] = [10, 40]
- assert_(x.mask is m)
+ assert_(x._mask is m)
assert_(x[3] is masked)
assert_(x[4] is not masked)
assert_(eq(x, [0, 10, 2, -1, 40]))
x = array(d, mask=m2, copy=True)
x.put([0, 1, 2], [-1, 100, 200])
- assert_(x.mask is not m2)
+ assert_(x._mask is not m2)
assert_(x[3] is masked)
assert_(x[4] is masked)
assert_(eq(x, [-1, 100, 200, 0, 0]))
diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py
index 54f1bda7d..b83873a5a 100644
--- a/numpy/ma/tests/test_regression.py
+++ b/numpy/ma/tests/test_regression.py
@@ -87,3 +87,7 @@ class TestRegression(object):
# See gh-12464. Indexing with empty list should give empty result.
ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4')
assert_array_equal(ma[[]], ma[:0])
+
+ def test_masked_array_tostring_fortran(self):
+ ma = np.ma.arange(4).reshape((2,2))
+ assert_array_equal(ma.tostring(order='F'), ma.T.tostring())
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
index de397ccfa..2b1f5e7a9 100644
--- a/numpy/random/mtrand/distributions.c
+++ b/numpy/random/mtrand/distributions.c
@@ -198,9 +198,10 @@ double rk_beta(rk_state *state, double a, double b)
X = pow(U, 1.0/a);
Y = pow(V, 1.0/b);
- if ((X + Y) <= 1.0)
+ /* Reject if both U and V are 0.0, which is approx 1 in 10^106 */
+ if (((X + Y) <= 1.0) && ((U + V) > 0.0))
{
- if (X +Y > 0)
+ if (X + Y > 0)
{
return X / (X + Y);
}
@@ -329,13 +330,15 @@ long rk_binomial_btpe(rk_state *state, long n, double p)
Step30:
if (u > p3) goto Step40;
y = (long)floor(xl + log(v)/laml);
- if (y < 0) goto Step10;
+ /* Reject if v == 0.0 since cast of inf not well defined */
+ if ((y < 0) || (v == 0.0)) goto Step10;
v = v*(u-p2)*laml;
goto Step50;
Step40:
y = (long)floor(xr - log(v)/lamr);
- if (y > n) goto Step10;
+ /* Reject if v == 0.0 since cast of inf not well defined */
+ if ((y > n) || (v == 0.0)) goto Step10;
v = v*(u-p3)*lamr;
Step50:
@@ -666,12 +669,17 @@ double rk_laplace(rk_state *state, double loc, double scale)
double U;
U = rk_double(state);
- if (U < 0.5)
+ if (U >= 0.5)
+ {
+ U = loc - scale * log(2.0 - U - U);
+
+ } else if (U > 0.0)
{
U = loc + scale * log(U + U);
} else
{
- U = loc - scale * log(2.0 - U - U);
+ /* Reject if U == 0.0 */
+ return rk_laplace(state, loc, scale);
}
return U;
}
@@ -681,7 +689,9 @@ double rk_gumbel(rk_state *state, double loc, double scale)
double U;
U = 1.0 - rk_double(state);
- return loc - scale * log(-log(U));
+ if (U < 1.0)
+ return loc - scale * log(-log(U));
+ return rk_gumbel(state, loc, scale);
}
double rk_logistic(rk_state *state, double loc, double scale)
@@ -689,7 +699,9 @@ double rk_logistic(rk_state *state, double loc, double scale)
double U;
U = rk_double(state);
- return loc + scale * log(U/(1.0 - U));
+ if (U > 0.0)
+ return loc + scale * log(U/(1.0 - U));
+ return rk_logistic(state, loc, scale);
}
double rk_lognormal(rk_state *state, double mean, double sigma)
@@ -914,7 +926,8 @@ long rk_logseries(rk_state *state, double p)
q = 1.0 - exp(r*U);
if (V <= q*q) {
result = (long)floor(1 + log(V)/log(q));
- if (result < 1) {
+ /* Reject if v == 0.0 since cast of inf not well defined */
+ if ((result < 1) || (V == 0.0)) {
continue;
}
else {
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index c97c166aa..329d5cd68 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -1235,7 +1235,7 @@ cdef class RandomState:
greater than or equal to low. The default value is 0.
high : float or array_like of floats
Upper boundary of the output interval. All values generated will be
- less than high. The default value is 1.0.
+ less than or equal to high. The default value is 1.0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -1270,7 +1270,14 @@ cdef class RandomState:
If ``high`` < ``low``, the results are officially undefined
and may eventually raise an error, i.e. do not rely on this
function to behave when passed arguments satisfying that
- inequality condition.
+ inequality condition. The ``high`` limit may be included in the
+ returned array of floats due to floating-point rounding in the
+ equation ``low + (high-low) * random_sample()``. For example:
+
+ >>> x = np.float32(5*0.99999999)
+ >>> x
+ 5.0
+
Examples
--------
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 711270072..121c7dd8c 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -515,7 +515,7 @@ class TestRandomDist(object):
def test_binomial(self):
np.random.seed(self.seed)
- actual = np.random.binomial(100.123, .456, size=(3, 2))
+ actual = np.random.binomial(100, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
@@ -612,7 +612,7 @@ class TestRandomDist(object):
def test_hypergeometric(self):
np.random.seed(self.seed)
- actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
+ actual = np.random.hypergeometric(10, 5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
@@ -923,6 +923,8 @@ class TestRandomDist(object):
def __int__(self):
raise TypeError
+ __index__ = __int__
+
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index c22348103..ee8eac9e8 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -20,7 +20,7 @@ from warnings import WarningMessage
import pprint
from numpy.core import(
- float32, empty, arange, array_repr, ndarray, isnat, array)
+ intp, float32, empty, arange, array_repr, ndarray, isnat, array)
from numpy.lib.utils import deprecate
if sys.version_info[0] >= 3:
@@ -301,6 +301,11 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
check that all elements of these objects are equal. An exception is raised
at the first conflicting values.
+ This function handles NaN comparisons as if NaN was a "normal" number.
+ That is, no assertion is raised if both objects have NaNs in the same
+ positions. This is in contrast to the IEEE standard on NaNs, which says
+ that NaN compared to anything must return False.
+
Parameters
----------
actual : array_like
@@ -328,6 +333,11 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
ACTUAL: 5
DESIRED: 6
+ The following comparison does not raise an exception. There are NaNs
+ in the inputs, but they are in the same positions.
+
+ >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
+
"""
__tracebackhide__ = True # Hide traceback for py.test
if isinstance(desired, dict):
@@ -784,18 +794,17 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
if isinstance(val, bool):
cond = val
- reduced = [0]
+ reduced = array([val])
else:
reduced = val.ravel()
cond = reduced.all()
- reduced = reduced.tolist()
# The below comparison is a hack to ensure that fully masked
# results, for which val.ravel().all() returns np.ma.masked,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
- mismatch = 100.0 * reduced.count(0) / ox.size
+ mismatch = 100. * (reduced.size - reduced.sum(dtype=intp)) / ox.size
remarks = ['Mismatch: {:.3g}%'.format(mismatch)]
with errstate(invalid='ignore', divide='ignore'):
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 9081f3d6e..643d143ee 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -1498,6 +1498,7 @@ class TestAssertNoGcCycles(object):
with assert_raises(AssertionError):
assert_no_gc_cycles(make_cycle)
+ @pytest.mark.slow
def test_fails(self):
"""
Test that in cases where the garbage cannot be collected, we raise an
diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py
index aa6f69f7e..f5560a099 100644
--- a/numpy/tests/test_warnings.py
+++ b/numpy/tests/test_warnings.py
@@ -44,7 +44,7 @@ if sys.version_info >= (3, 4):
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
- if "testing/tests/test_warnings.py" is self.__filename:
+ if "testing/tests/test_warnings.py" == self.__filename:
# This file
return
diff --git a/site.cfg.example b/site.cfg.example
index 1af19d353..b6b0175d6 100644
--- a/site.cfg.example
+++ b/site.cfg.example
@@ -153,6 +153,22 @@
# include_dirs = /home/username/blis/include/blis
# runtime_library_dirs = /home/username/blis/lib
+# libFLAME
+# --------
+# libFLAME (https://www.cs.utexas.edu/~flame/web/libFLAME.html) provides a
+# LAPACK interface. It's a relatively new library, its performance in some
+# cases seems to match that of MKL and OpenBLAS.
+# It hasn't been benchmarked with NumPy or SciPy yet.
+#
+# Notes on compiling libFLAME itself:
+# - the LAPACK interface (needed by NumPy) isn't built by default; please
+# configure with ``./configure --enable-lapack2flame``.
+#
+# [flame]
+# libraries = flame
+# library_dirs = /home/username/flame/lib
+# runtime_library_dirs = /home/username/flame/lib
+
# MKL
#----
# Intel MKL is Intel's very optimized yet proprietary implementation of BLAS and
diff --git a/tools/pypy-test.sh b/tools/pypy-test.sh
index 61a09cd85..28afdea5d 100755
--- a/tools/pypy-test.sh
+++ b/tools/pypy-test.sh
@@ -11,7 +11,7 @@ sudo apt-get -yq install libatlas-base-dev liblapack-dev gfortran-5
F77=gfortran-5 F90=gfortran-5 \
# Download the proper OpenBLAS x64 precompiled library
-OPENBLAS=openblas-v0.3.5-manylinux1_x86_64.tar.gz
+OPENBLAS=openblas-v0.3.5-274-g6a8b4269-manylinux1_x86_64.tar.gz
echo getting $OPENBLAS
wget -q https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/$OPENBLAS -O openblas.tar.gz
mkdir -p openblas
@@ -46,14 +46,6 @@ pypy3/bin/pypy3 runtests.py --show-build-log -- -rsx \
--junitxml=junit/test-results.xml --durations 10
echo Make sure the correct openblas has been linked in
-# rework after merging PR #12790 or alternative
-TEST_GET_CONFIG="import numpy, ctypes, os
-dll = ctypes.CDLL(numpy.core._multiarray_umath.__file__)
-get_config = dll.openblas_get_config
-get_config.restype=ctypes.c_char_p
-res = get_config()
-print('OpenBLAS get_config returned', str(res))
-assert b'OpenBLAS 0.3.5' in res"
pypy3/bin/pip install .
(cd pypy3; bin/pypy3 -c "$TEST_GET_CONFIG")
diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py
index 14f11b7ed..5240253b6 100644..100755
--- a/tools/test-installed-numpy.py
+++ b/tools/test-installed-numpy.py
@@ -32,6 +32,9 @@ parser.add_option("-m", "--mode",
action="store", dest="mode", default="fast",
help="'fast', 'full', or something that could be "
"passed to pytest [default: %default]")
+parser.add_option("-n", "--durations",
+ dest="durations", default=-1,
+ help="show time to run slowest N tests [default: -1]")
(options, args) = parser.parse_args()
import numpy
@@ -54,6 +57,7 @@ result = numpy.test(options.mode,
verbose=options.verbose,
extra_argv=args,
doctests=options.doctests,
+ durations=int(options.durations),
coverage=options.coverage)
if result:
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index 5860deea6..8df33e943 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -83,9 +83,9 @@ run_test()
export PYTHONWARNINGS=default
if [ -n "$RUN_FULL_TESTS" ]; then
export PYTHONWARNINGS="ignore::DeprecationWarning:virtualenv"
- $PYTHON ../tools/test-installed-numpy.py -v --mode=full $COVERAGE_FLAG
+ $PYTHON ../tools/test-installed-numpy.py -v --durations 10 --mode=full $COVERAGE_FLAG
else
- $PYTHON ../tools/test-installed-numpy.py -v
+ $PYTHON ../tools/test-installed-numpy.py -v --durations 10
fi
if [ -n "$RUN_COVERAGE" ]; then