summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorczgdp1807 <gdp.1807@gmail.com>2021-09-03 12:17:26 +0530
committerczgdp1807 <gdp.1807@gmail.com>2021-09-03 12:17:26 +0530
commit781d0a7ac61ce007e65abcd4e30f2181e729ae61 (patch)
treef45f38a246bcefbca9ca8a08bd8ba55cbc6cdb15
parentb341e4c3249817d2e14ddf71aa850a8a896b9303 (diff)
parent2ae1e068710174dc57b5ba5ad688517608efcf26 (diff)
downloadnumpy-781d0a7ac61ce007e65abcd4e30f2181e729ae61.tar.gz
resolved conflicts
-rw-r--r--.circleci/config.yml4
-rw-r--r--.gitattributes3
-rw-r--r--.github/workflows/build_test.yml26
-rw-r--r--.lgtm.yml1
-rw-r--r--CITATION.bib20
-rw-r--r--MANIFEST.in3
-rw-r--r--README.md26
-rw-r--r--azure-pipelines.yml78
-rw-r--r--doc/changelog/1.21.2-changelog.rst41
-rw-r--r--doc/neps/nep-0047-array-api-standard.rst337
-rw-r--r--doc/neps/nep-0049.rst31
-rw-r--r--doc/release/upcoming_changes/18585.new_feature.rst15
-rw-r--r--doc/release/upcoming_changes/19527.new_feature.rst3
-rw-r--r--doc/release/upcoming_changes/19539.expired.rst2
-rw-r--r--doc/release/upcoming_changes/19615.expired.rst8
-rw-r--r--doc/release/upcoming_changes/19655.change.rst4
-rw-r--r--doc/release/upcoming_changes/19680.improvement.rst5
-rw-r--r--doc/release/upcoming_changes/19803.new_feature.rst14
-rw-r--r--doc/source/_templates/indexcontent.html7
-rw-r--r--doc/source/conf.py1
-rw-r--r--doc/source/dev/howto-docs.rst63
-rw-r--r--doc/source/dev/howto_build_docs.rst (renamed from doc/source/docs/howto_build_docs.rst)0
-rw-r--r--doc/source/dev/index.rst1
-rw-r--r--doc/source/doc_conventions.rst23
-rw-r--r--doc/source/docs/howto_document.rst75
-rw-r--r--doc/source/docs/index.rst11
-rw-r--r--doc/source/reference/arrays.indexing.rst617
-rw-r--r--doc/source/reference/c-api/types-and-structures.rst2
-rw-r--r--doc/source/reference/routines.indexing.rst69
-rw-r--r--doc/source/reference/routines.rst1
-rw-r--r--doc/source/reference/ufuncs.rst567
-rw-r--r--doc/source/release.rst1
-rw-r--r--doc/source/release/1.21.2-notes.rst59
-rw-r--r--doc/source/user/absolute_beginners.rst7
-rw-r--r--doc/source/user/basics.broadcasting.rst28
-rw-r--r--doc/source/user/basics.creation.rst8
-rw-r--r--doc/source/user/basics.indexing.rst1050
-rw-r--r--doc/source/user/basics.rst1
-rw-r--r--doc/source/user/basics.ufuncs.rst313
-rw-r--r--doc/source/user/index.rst2
-rw-r--r--environment.yml3
-rw-r--r--numpy/__init__.py94
-rw-r--r--numpy/__init__.pyi173
-rw-r--r--numpy/_pytesttester.py13
-rw-r--r--numpy/_pytesttester.pyi3
-rw-r--r--numpy/array_api/__init__.py370
-rw-r--r--numpy/array_api/_array_object.py1029
-rw-r--r--numpy/array_api/_constants.py6
-rw-r--r--numpy/array_api/_creation_functions.py316
-rw-r--r--numpy/array_api/_data_type_functions.py127
-rw-r--r--numpy/array_api/_dtypes.py143
-rw-r--r--numpy/array_api/_elementwise_functions.py729
-rw-r--r--numpy/array_api/_linear_algebra_functions.py68
-rw-r--r--numpy/array_api/_manipulation_functions.py86
-rw-r--r--numpy/array_api/_searching_functions.py46
-rw-r--r--numpy/array_api/_set_functions.py31
-rw-r--r--numpy/array_api/_sorting_functions.py37
-rw-r--r--numpy/array_api/_statistical_functions.py81
-rw-r--r--numpy/array_api/_typing.py44
-rw-r--r--numpy/array_api/_utility_functions.py37
-rw-r--r--numpy/array_api/setup.py12
-rw-r--r--numpy/array_api/tests/__init__.py7
-rw-r--r--numpy/array_api/tests/test_array_object.py269
-rw-r--r--numpy/array_api/tests/test_creation_functions.py141
-rw-r--r--numpy/array_api/tests/test_elementwise_functions.py111
-rw-r--r--numpy/core/_add_newdocs.py2
-rw-r--r--numpy/core/_add_newdocs_scalars.py45
-rw-r--r--numpy/core/_asarray.pyi8
-rw-r--r--numpy/core/_type_aliases.py9
-rw-r--r--numpy/core/_type_aliases.pyi8
-rw-r--r--numpy/core/_ufunc_config.pyi10
-rw-r--r--numpy/core/arrayprint.pyi8
-rw-r--r--numpy/core/code_generators/generate_umath.py4
-rw-r--r--numpy/core/einsumfunc.pyi35
-rw-r--r--numpy/core/fromnumeric.py9
-rw-r--r--numpy/core/fromnumeric.pyi8
-rw-r--r--numpy/core/function_base.pyi8
-rw-r--r--numpy/core/include/numpy/npy_cpu.h3
-rw-r--r--numpy/core/include/numpy/npy_endian.h1
-rw-r--r--numpy/core/include/numpy/npy_math.h2
-rw-r--r--numpy/core/multiarray.pyi116
-rw-r--r--numpy/core/numeric.pyi7
-rw-r--r--numpy/core/numerictypes.pyi12
-rw-r--r--numpy/core/overrides.py38
-rw-r--r--numpy/core/records.py14
-rw-r--r--numpy/core/setup.py6
-rw-r--r--numpy/core/shape_base.pyi20
-rw-r--r--numpy/core/src/common/npy_cpu_dispatch.h8
-rw-r--r--numpy/core/src/common/simd/avx2/arithmetic.h2
-rw-r--r--numpy/core/src/multiarray/alloc.c5
-rw-r--r--numpy/core/src/multiarray/convert.c12
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c2
-rw-r--r--numpy/core/src/multiarray/ctors.c2
-rw-r--r--numpy/core/src/multiarray/descriptor.c16
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src4
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c2
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src49
-rw-r--r--numpy/core/src/umath/_scaled_float_dtype.c67
-rw-r--r--numpy/core/src/umath/dispatching.c74
-rw-r--r--numpy/core/src/umath/dispatching.h4
-rw-r--r--numpy/core/src/umath/legacy_array_method.c4
-rw-r--r--numpy/core/src/umath/loops.c.src2
-rw-r--r--numpy/core/src/umath/loops_exponent_log.dispatch.c.src2
-rw-r--r--numpy/core/src/umath/ufunc_object.c11
-rw-r--r--numpy/core/tests/test_casting_unittests.py9
-rw-r--r--numpy/core/tests/test_custom_dtypes.py12
-rw-r--r--numpy/core/tests/test_datetime.py14
-rw-r--r--numpy/core/tests/test_deprecations.py48
-rw-r--r--numpy/core/tests/test_dtype.py9
-rw-r--r--numpy/core/tests/test_multiarray.py12
-rw-r--r--numpy/core/tests/test_numeric.py10
-rw-r--r--numpy/core/tests/test_scalar_methods.py26
-rw-r--r--numpy/core/tests/test_simd.py2
-rw-r--r--numpy/core/tests/test_ufunc.py87
-rw-r--r--numpy/core/tests/test_umath_complex.py56
-rw-r--r--numpy/distutils/ccompiler_opt.py11
-rw-r--r--numpy/distutils/cpuinfo.py4
-rw-r--r--numpy/distutils/exec_command.py2
-rw-r--r--numpy/distutils/fcompiler/compaq.py4
-rw-r--r--numpy/distutils/misc_util.py9
-rw-r--r--numpy/distutils/npy_pkg_config.py4
-rw-r--r--numpy/distutils/system_info.py4
-rw-r--r--numpy/distutils/unixccompiler.py2
-rw-r--r--numpy/f2py/__init__.pyi3
-rw-r--r--numpy/f2py/cb_rules.py1
-rwxr-xr-xnumpy/f2py/crackfortran.py4
-rwxr-xr-xnumpy/f2py/f2py2e.py62
-rw-r--r--numpy/f2py/tests/util.py2
-rw-r--r--numpy/fft/tests/test_pocketfft.py2
-rw-r--r--numpy/lib/__init__.pyi1
-rw-r--r--numpy/lib/_datasource.py2
-rw-r--r--numpy/lib/arraypad.pyi17
-rw-r--r--numpy/lib/arrayterator.pyi1
-rw-r--r--numpy/lib/format.py5
-rw-r--r--numpy/lib/format.pyi8
-rw-r--r--numpy/lib/function_base.py4
-rw-r--r--numpy/lib/index_tricks.py18
-rw-r--r--numpy/lib/index_tricks.pyi8
-rw-r--r--numpy/lib/npyio.py321
-rw-r--r--numpy/lib/npyio.pyi335
-rw-r--r--numpy/lib/polynomial.py23
-rw-r--r--numpy/lib/shape_base.pyi13
-rw-r--r--numpy/lib/stride_tricks.pyi83
-rw-r--r--numpy/lib/tests/test__datasource.py6
-rw-r--r--numpy/lib/tests/test_function_base.py20
-rw-r--r--numpy/lib/tests/test_io.py28
-rw-r--r--numpy/lib/tests/test_shape_base.py10
-rw-r--r--numpy/lib/tests/test_utils.py6
-rw-r--r--numpy/lib/type_check.pyi8
-rw-r--r--numpy/lib/utils.py6
-rw-r--r--numpy/lib/utils.pyi12
-rw-r--r--numpy/ma/core.py8
-rw-r--r--numpy/ma/mrecords.py4
-rw-r--r--numpy/ma/tests/test_old_ma.py16
-rw-r--r--numpy/random/_generator.pyi8
-rw-r--r--numpy/random/_generator.pyx2
-rw-r--r--numpy/random/_mt19937.pyi8
-rw-r--r--numpy/random/_pcg64.pyi8
-rw-r--r--numpy/random/_philox.pyi8
-rw-r--r--numpy/random/_sfc64.pyi8
-rw-r--r--numpy/random/bit_generator.pyi7
-rw-r--r--numpy/random/mtrand.pyi8
-rw-r--r--numpy/random/mtrand.pyx2
-rw-r--r--numpy/random/tests/test_generator_mt19937_regressions.py4
-rw-r--r--numpy/random/tests/test_randomstate_regression.py4
-rw-r--r--numpy/random/tests/test_regression.py4
-rw-r--r--numpy/setup.py1
-rw-r--r--numpy/testing/_private/utils.pyi31
-rw-r--r--numpy/tests/test_public_api.py21
-rw-r--r--numpy/typing/__init__.py26
-rw-r--r--numpy/typing/_array_like.py26
-rw-r--r--numpy/typing/_callable.py587
-rw-r--r--numpy/typing/_char_codes.py282
-rw-r--r--numpy/typing/_dtype_like.py51
-rw-r--r--numpy/typing/_shape.py12
-rw-r--r--numpy/typing/_ufunc.pyi28
-rw-r--r--numpy/typing/tests/data/fail/modules.py1
-rw-r--r--numpy/typing/tests/data/fail/npyio.py30
-rw-r--r--numpy/typing/tests/data/fail/scalars.py1
-rw-r--r--numpy/typing/tests/data/fail/stride_tricks.py9
-rw-r--r--numpy/typing/tests/data/reveal/arraypad.py3
-rw-r--r--numpy/typing/tests/data/reveal/npyio.py91
-rw-r--r--numpy/typing/tests/data/reveal/scalars.py2
-rw-r--r--numpy/typing/tests/data/reveal/stride_tricks.py28
-rw-r--r--numpy/typing/tests/test_runtime.py10
-rw-r--r--numpy/typing/tests/test_typing_extensions.py35
-rw-r--r--pyproject.toml2
-rwxr-xr-xruntests.py23
-rwxr-xr-xsetup.py7
-rw-r--r--test_requirements.txt6
-rwxr-xr-xtools/cythonize.py9
-rwxr-xr-xtools/swig/test/testFarray.py22
192 files changed, 7549 insertions, 3370 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 8cf18d809..fdb85be98 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -8,7 +8,7 @@ jobs:
docker:
# CircleCI maintains a library of pre-built images
# documented at https://circleci.com/docs/2.0/circleci-images/
- - image: circleci/python:3.8.4
+ - image: cimg/python:3.8
working_directory: ~/repo
@@ -23,7 +23,7 @@ jobs:
name: create virtual environment, install dependencies
command: |
sudo apt-get update
- sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex
+ sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra latexmk texlive-xetex
python3.8 -m venv venv
. venv/bin/activate
diff --git a/.gitattributes b/.gitattributes
index 8723dd9dc..a0676bee4 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -15,6 +15,9 @@ numpy/core/include/numpy/libdivide/* linguist-vendored
# Mark some files as generated
numpy/linalg/lapack_lite/f2c_*.c linguist-generated
numpy/linalg/lapack_lite/lapack_lite_names.h linguist-generated
+numpy/_version.py linguist-generated
+
+# versioneer config
numpy/_version.py export-subst
# Configuration files
diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml
index bfd77ddb0..950294fe8 100644
--- a/.github/workflows/build_test.yml
+++ b/.github/workflows/build_test.yml
@@ -56,7 +56,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: [3.7, 3.9, 3.10.0-beta.4]
+ python-version: [3.9, 3.10.0-rc.1]
steps:
- uses: actions/checkout@v2
with:
@@ -202,18 +202,18 @@ jobs:
python-version: ${{ env.PYTHON_VERSION }}
- uses: ./.github/actions
- pypy37:
- needs: [smoke_test]
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- with:
- submodules: recursive
- fetch-depth: 0
- - uses: actions/setup-python@v2
- with:
- python-version: pypy-3.7-v7.3.4
- - uses: ./.github/actions
+ #pypy37:
+ #needs: [smoke_test]
+ #runs-on: ubuntu-latest
+ #steps:
+ #- uses: actions/checkout@v2
+ #with:
+ #submodules: recursive
+ #fetch-depth: 0
+ #- uses: actions/setup-python@v2
+ #with:
+ #python-version: pypy-3.7-v7.3.4
+ #- uses: ./.github/actions
sdist:
needs: [smoke_test]
diff --git a/.lgtm.yml b/.lgtm.yml
index cc16544a3..242afd597 100644
--- a/.lgtm.yml
+++ b/.lgtm.yml
@@ -1,6 +1,7 @@
path_classifiers:
library:
- tools
+ - numpy/_version.py
generated:
# The exports defined in __init__.py are defined in the Cython module
# np.random.mtrand. By excluding this file we suppress a number of
diff --git a/CITATION.bib b/CITATION.bib
new file mode 100644
index 000000000..66e7dfd37
--- /dev/null
+++ b/CITATION.bib
@@ -0,0 +1,20 @@
+@ARTICLE{2020NumPy-Array,
+ author = {Harris, Charles R. and Millman, K. Jarrod and
+ van der Walt, Stéfan J and Gommers, Ralf and
+ Virtanen, Pauli and Cournapeau, David and
+ Wieser, Eric and Taylor, Julian and Berg, Sebastian and
+ Smith, Nathaniel J. and Kern, Robert and Picus, Matti and
+ Hoyer, Stephan and van Kerkwijk, Marten H. and
+ Brett, Matthew and Haldane, Allan and
+ Fernández del Río, Jaime and Wiebe, Mark and
+ Peterson, Pearu and Gérard-Marchant, Pierre and
+ Sheppard, Kevin and Reddy, Tyler and Weckesser, Warren and
+ Abbasi, Hameer and Gohlke, Christoph and
+ Oliphant, Travis E.},
+ title = {Array programming with {NumPy}},
+ journal = {Nature},
+ year = {2020},
+ volume = {585},
+ pages = {357–362},
+ doi = {10.1038/s41586-020-2649-2}
+}
diff --git a/MANIFEST.in b/MANIFEST.in
index 8ec62123b..ab6ecd518 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -12,6 +12,9 @@ exclude LICENSES_bundled.txt
exclude .*
exclude azure-*.yml
+# Include coveragerc for runtests.py
+include .coveragerc
+
# Sub-directories. Included are: numpy/, doc/, benchmarks/, tools/
include numpy/_version.py
recursive-include numpy/random *.pyx *.pxd *.pyx.in *.pxd.in
diff --git a/README.md b/README.md
index 4195ff917..b211ce466 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,27 @@
# <img alt="NumPy" src="/branding/logo/primary/numpylogo.svg" height="60">
+<!--[![Azure Pipelines](https://dev.azure.com/numpy/numpy/_apis/build/status/numpy.numpy?branchName=main)](-->
+<!--https://dev.azure.com/numpy/numpy/_build/latest?definitionId=1?branchName=main)-->
+<!--[![Actions build_test](https://github.com/numpy/numpy/actions/workflows/build_test.yml/badge.svg)](-->
+<!--https://github.com/numpy/numpy/actions/workflows/build_test.yml)-->
+<!--[![TravisCI](https://app.travis-ci.com/numpy/numpy.svg?branch=main)](-->
+<!--https://app.travis-ci.com/numpy/numpy)-->
+<!--[![CircleCI](https://img.shields.io/circleci/project/github/numpy/numpy/main.svg?label=CircleCI)](-->
+<!--https://circleci.com/gh/numpy/numpy)-->
+<!--[![Codecov](https://codecov.io/gh/numpy/numpy/branch/main/graph/badge.svg)](-->
+<!--https://codecov.io/gh/numpy/numpy)-->
+
+[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](
+https://numfocus.org)
+[![Pypi Downloads](https://img.shields.io/pypi/dm/numpy.svg?label=Pypi%20downloads)](
+https://pypi.org/project/numpy/)
+[![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/numpy.svg?label=Conda%20downloads)](
+https://anaconda.org/conda-forge/numpy)
+[![Stack Overflow](https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg)](
+https://stackoverflow.com/questions/tagged/numpy)
+[![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41592--019--0686--2-blue)](
+https://doi.org/10.1038/s41586-020-2649-2)
+
NumPy is the fundamental package needed for scientific computing with Python.
- **Website:** https://www.numpy.org
@@ -60,7 +82,3 @@ mailing list. You are very welcome to join.
If you are new to contributing to open source, [this
guide](https://opensource.guide/how-to-contribute/) helps explain why, what,
and how to successfully get involved.
-
-
-
-[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org)
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 03571aed2..714f62912 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -67,19 +67,6 @@ stages:
displayName: 'Run Lint Checks'
failOnStderr: true
- - job: WindowsFast
- pool:
- vmImage: 'VS2017-Win2016'
- strategy:
- matrix:
- Python37-32bit-fast:
- PYTHON_VERSION: '3.8'
- PYTHON_ARCH: 'x86'
- TEST_MODE: fast
- BITS: 32
- steps:
- - template: azure-steps-windows.yml
-
- job: Linux_Python_38_32bit_full_with_asserts
pool:
vmImage: 'ubuntu-20.04'
@@ -241,17 +228,6 @@ stages:
strategy:
maxParallel: 6
matrix:
- # Python37 32 bit fast tested in InitialTest stage.
- Python37-64bit-full:
- PYTHON_VERSION: '3.7'
- PYTHON_ARCH: 'x64'
- TEST_MODE: full
- BITS: 64
- #PyPy37-64bit-full:
- # PYTHON_VERSION: 'PyPy3.7'
- # PYTHON_ARCH: 'x64'
- # TEST_MODE: fast
- # BITS: 64
Python38-32bit-fast:
PYTHON_VERSION: '3.8'
PYTHON_ARCH: 'x86'
@@ -278,33 +254,6 @@ stages:
- template: azure-steps-windows.yml
- - job: Linux_gcc48
- pool:
- # ubuntu-20.04 does not provide a gcc-4.8 package
- vmImage: 'ubuntu-18.04'
- steps:
- - script: |
- sudo apt update
- sudo apt install python3.7
- sudo apt install python3.7-dev
- if ! `gcc-4.8 2>/dev/null`; then
- sudo apt install gcc-4.8
- fi
- displayName: 'add gcc 4.8'
- - script: |
- # python3 has no setuptools, so install one to get us going
- python3.7 -m pip install --user --upgrade pip 'setuptools<49.2.0'
- python3.7 -m pip install --user -r test_requirements.txt
- CPPFLAGS='' CC=gcc-4.8 F77=gfortran-5 F90=gfortran-5 \
- python3.7 runtests.py --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml
- displayName: 'Run gcc4.8 Build / Tests'
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- testResultsFiles: '**/test-*.xml'
- failTaskOnFailedTests: true
- testRunTitle: 'Publish test results for gcc 4.8'
-
- job: Linux_conda
pool:
vmImage: 'ubuntu-20.04'
@@ -333,3 +282,30 @@ stages:
failTaskOnFailedTests: true
testRunTitle: 'Publish test results for conda installation'
+
+ #- job: Linux_gcc48
+ #pool:
+ ## ubuntu-20.04 does not provide a gcc-4.8 package
+ #vmImage: 'ubuntu-18.04'
+ #steps:
+ #- script: |
+ #sudo apt update
+ #sudo apt install python3.7
+ #sudo apt install python3.7-dev
+ #if ! `gcc-4.8 2>/dev/null`; then
+ #sudo apt install gcc-4.8
+ #fi
+ #displayName: 'add gcc 4.8'
+ #- script: |
+ ## python3 has no setuptools, so install one to get us going
+ #python3.7 -m pip install --user --upgrade pip 'setuptools<49.2.0'
+ #python3.7 -m pip install --user -r test_requirements.txt
+ #CPPFLAGS='' CC=gcc-4.8 F77=gfortran-5 F90=gfortran-5 \
+ #python3.7 runtests.py --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml
+ #displayName: 'Run gcc4.8 Build / Tests'
+ #- task: PublishTestResults@2
+ #condition: succeededOrFailed()
+ #inputs:
+ #testResultsFiles: '**/test-*.xml'
+ #failTaskOnFailedTests: true
+ #testRunTitle: 'Publish test results for gcc 4.8'
diff --git a/doc/changelog/1.21.2-changelog.rst b/doc/changelog/1.21.2-changelog.rst
new file mode 100644
index 000000000..f23fb0cfd
--- /dev/null
+++ b/doc/changelog/1.21.2-changelog.rst
@@ -0,0 +1,41 @@
+
+Contributors
+============
+
+A total of 10 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Bas van Beek
+* Carl Johnsen +
+* Charles Harris
+* Gwyn Ciesla +
+* Matthieu Dartiailh
+* Matti Picus
+* Niyas Sait +
+* Ralf Gommers
+* Sayed Adel
+* Sebastian Berg
+
+Pull requests merged
+====================
+
+A total of 18 pull requests were merged for this release.
+
+* `#19497 <https://github.com/numpy/numpy/pull/19497>`__: MAINT: set Python version for 1.21.x to ``<3.11``
+* `#19533 <https://github.com/numpy/numpy/pull/19533>`__: BUG: Fix an issue wherein importing ``numpy.typing`` could raise
+* `#19646 <https://github.com/numpy/numpy/pull/19646>`__: MAINT: Update Cython version for Python 3.10.
+* `#19648 <https://github.com/numpy/numpy/pull/19648>`__: TST: Bump the python 3.10 test version from beta4 to rc1
+* `#19651 <https://github.com/numpy/numpy/pull/19651>`__: TST: avoid distutils.sysconfig in runtests.py
+* `#19652 <https://github.com/numpy/numpy/pull/19652>`__: MAINT: add missing dunder method to nditer type hints
+* `#19656 <https://github.com/numpy/numpy/pull/19656>`__: BLD, SIMD: Fix testing extra checks when ``-Werror`` isn't applicable...
+* `#19657 <https://github.com/numpy/numpy/pull/19657>`__: BUG: Remove logical object ufuncs with bool output
+* `#19658 <https://github.com/numpy/numpy/pull/19658>`__: MAINT: Include .coveragerc in source distributions to support...
+* `#19659 <https://github.com/numpy/numpy/pull/19659>`__: BUG: Fix bad write in masked iterator output copy paths
+* `#19660 <https://github.com/numpy/numpy/pull/19660>`__: ENH: Add support for windows on arm targets
+* `#19661 <https://github.com/numpy/numpy/pull/19661>`__: BUG: add base to templated arguments for platlib
+* `#19662 <https://github.com/numpy/numpy/pull/19662>`__: BUG,DEP: Non-default UFunc signature/dtype usage should be deprecated
+* `#19666 <https://github.com/numpy/numpy/pull/19666>`__: MAINT: Add Python 3.10 to supported versions.
+* `#19668 <https://github.com/numpy/numpy/pull/19668>`__: TST,BUG: Sanitize path-separators when running ``runtest.py``
+* `#19671 <https://github.com/numpy/numpy/pull/19671>`__: BLD: load extra flags when checking for libflame
+* `#19676 <https://github.com/numpy/numpy/pull/19676>`__: BLD: update circleCI docker image
+* `#19677 <https://github.com/numpy/numpy/pull/19677>`__: REL: Prepare for 1.21.2 release.
diff --git a/doc/neps/nep-0047-array-api-standard.rst b/doc/neps/nep-0047-array-api-standard.rst
index 19965c20d..3e63602cc 100644
--- a/doc/neps/nep-0047-array-api-standard.rst
+++ b/doc/neps/nep-0047-array-api-standard.rst
@@ -91,7 +91,7 @@ In addition to those use cases, the new namespace contains functionality that
is widely used and supported by many array libraries. As such, it is a good
set of functions to teach to newcomers to NumPy and recommend as "best
practice". That contrasts with NumPy's main namespace, which contains many
-functions and objects that have been superceded or we consider mistakes - but
+functions and objects that have been superseded or we consider mistakes - but
that we can't remove because of backwards compatibility reasons.
The usage of the ``numpy.array_api`` namespace by downstream libraries is
@@ -104,7 +104,7 @@ Adoption in downstream libraries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The prototype implementation of the ``array_api`` namespace will be used with
-SciPy, scikit-learn and other libraries of interest that depend on NumPy, in
+SciPy, scikit-learn, and other libraries of interest that depend on NumPy, in
order to get more experience with the design and find out if any important
parts are missing.
@@ -179,35 +179,41 @@ have a direct NumPy equivalent. This figure shows what is included at a high lev
The most important changes compared to what NumPy currently offers are:
-- A new array object which:
+- A new array object, ``numpy.array_api.Array`` which:
- - conforms to the casting rules and indexing behaviour specified by the
+ - is a thin pure Python (non-subclass) wrapper around ``np.ndarray``,
+ - conforms to the casting rules and indexing behavior specified by the
standard,
- does not have methods other than dunder methods,
- - does not support the full range of NumPy indexing behaviour. Advanced
- indexing with integers is not supported. Only boolean indexing
- with a single (possibly multi-dimensional) boolean array is supported.
- An indexing expression that selects a single element returns a 0-D array
- rather than a scalar.
+ - does not support the full range of NumPy indexing behavior (see
+ :ref:`indexing` below),
+ - does not have distinct scalar objects, only 0-D arrays,
+ - cannot be constructed directly. Instead array construction functions
+ like ``asarray()`` should be used.
- Functions in the ``array_api`` namespace:
- - do not accept ``array_like`` inputs, only NumPy arrays and Python scalars
+ - do not accept ``array_like`` inputs, only ``numpy.array_api`` array
+ objects, with Python scalars only being supported in dunder operators on
+ the array object,
- do not support ``__array_ufunc__`` and ``__array_function__``,
- use positional-only and keyword-only parameters in their signatures,
- have inline type annotations,
- may have minor changes to signatures and semantics of individual
functions compared to their equivalents already present in NumPy,
- only support dtype literals, not format strings or other ways of
- specifying dtypes
+ specifying dtypes,
+ - generally may only support a restricted set of dtypes compared to their
+ NumPy counterparts.
- DLPack_ support will be added to NumPy,
- New syntax for "device support" will be added, through a ``.device``
attribute on the new array object, and ``device=`` keywords in array creation
functions in the ``array_api`` namespace,
-- Casting rules that differ from those NumPy currently has. Output dtypes can
+- Casting rules will differ from those NumPy currently has. Output dtypes can
be derived from input dtypes (i.e. no value-based casting), and 0-D arrays
- are treated like >=1-D arrays.
+ are treated like >=1-D arrays. Cross-kind casting (e.g., int to float) is
+ not allowed.
- Not all dtypes NumPy has are part of the standard. Only boolean, signed and
unsigned integers, and floating-point dtypes up to ``float64`` are supported.
Complex dtypes are expected to be added in the next version of the standard.
@@ -220,6 +226,31 @@ Improvements to existing NumPy functionality that are needed include:
that are currently missing such support.
- Add the ``keepdims`` keyword to ``np.argmin`` and ``np.argmax``.
- Add a "never copy" mode to ``np.asarray``.
+- Add smallest_normal to ``np.finfo()``.
+- DLPack_ support.
+
+Additionally, the ``numpy.array_api`` implementation was chosen to be a
+*minimal* implementation of the array API standard. This means that it not
+only conforms to all the requirements of the array API, but it explicitly does
+not include any APIs or behaviors not explicitly required by it. The standard
+itself does not require implementations to be so restrictive, but doing this
+with the NumPy array API implementation will allow it to become a canonical
+implementation of the array API standard. Anyone who wants to make use of the
+array API standard can use the NumPy implementation and be sure that their
+code is not making use of behaviors that will not be in other conforming
+implementations.
+
+In particular, this means
+
+- ``numpy.array_api`` will only include those functions that are listed in the
+ standard. This also applies to methods on the ``Array`` object,
+- Functions will only accept input dtypes that are required by the standard
+ (e.g., transcendental functions like ``cos`` will not accept integer dtypes
+ because the standard only requires them to accept floating-point dtypes),
+- Type promotion will only occur for combinations of dtypes required by the
+ standard (see the :ref:`dtypes-and-casting-rules` section below),
+- Indexing is limited to a subset of possible index types (see :ref:`indexing`
+ below).
Functions in the ``array_api`` namespace
@@ -228,41 +259,56 @@ Functions in the ``array_api`` namespace
Let's start with an example of a function implementation that shows the most
important differences with the equivalent function in the main namespace::
- def max(x: array, /, *,
- axis: Optional[Union[int, Tuple[int, ...]]] = None,
- keepdims: bool = False
- ) -> array:
+ def matmul(x1: Array, x2: Array, /) -> Array:
"""
- Array API compatible wrapper for :py:func:`np.max <numpy.max>`.
+ Array API compatible wrapper for :py:func:`np.matmul <numpy.matmul>`.
+ See its docstring for more information.
"""
- return np.max._implementation(x, axis=axis, keepdims=keepdims)
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in matmul")
-This function does not accept ``array_like`` inputs, only ``ndarray``. There
-are multiple reasons for this. Other array libraries all work like this.
-Letting the user do coercion of lists, generators, or other foreign objects
-separately results in a cleaner design with less unexpected behaviour.
-It's higher-performance - less overhead from ``asarray`` calls. Static typing
-is easier. Subclasses will work as expected. And the slight increase in verbosity
-because users have to explicitly coerce to ``ndarray`` on rare occasions
-seems like a small price to pay.
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+
+ return Array._new(np.matmul(x1._array, x2._array))
+
+This function does not accept ``array_like`` inputs, only
+``numpy.array_api.Array``. There are multiple reasons for this. Other array
+libraries all work like this. Requiring the user to do coercion of Python
+scalars, lists, generators, or other foreign objects explicitly results in a
+cleaner design with less unexpected behavior. It is higher-performance---less
+overhead from ``asarray`` calls. Static typing is easier. Subclasses will work
+as expected. And the slight increase in verbosity because users have to
+explicitly coerce to ``ndarray`` on rare occasions seems like a small price to
+pay.
This function does not support ``__array_ufunc__`` nor ``__array_function__``.
These protocols serve a similar purpose as the array API standard module itself,
-but through a different mechanisms. Because only ``ndarray`` instances are accepted,
+but through a different mechanisms. Because only ``Array`` instances are accepted,
dispatching via one of these protocols isn't useful anymore.
-This function uses positional-only parameters in its signature. This makes code
-more portable - writing ``max(x=x, ...)`` is no longer valid, hence if other
-libraries call the first parameter ``input`` rather than ``x``, that is fine.
-The rationale for keyword-only parameters (not shown in the above example) is
-two-fold: clarity of end user code, and it being easier to extend the signature
-in the future with keywords in the desired order.
+This function uses positional-only parameters in its signature. This makes
+code more portable---writing, for instance, ``max(a=a, ...)`` is no longer
+valid, hence if other libraries call the first parameter ``input`` rather than
+``a``, that is fine. Note that NumPy already uses positional-only arguments
+for functions that are ufuncs. The rationale for keyword-only parameters (not
+shown in the above example) is two-fold: clarity of end user code, and it
+being easier to extend the signature in the future without worrying about the
+order of keywords.
This function has inline type annotations. Inline annotations are far easier to
maintain than separate stub files. And because the types are simple, this will
not result in a large amount of clutter with type aliases or unions like in the
current stub files NumPy has.
+This function only accepts numeric dtypes (i.e., not ``bool``). It also does
+not allow the input dtypes to be of different kinds (the internal
+``_result_type()`` function will raise ``TypeError`` on cross-kind type
+combinations like ``_result_type(int32, float64)``). This allows the
+implementation to be minimal. Preventing combinations that work in NumPy but
+are not required by the array API specification lets users of the submodule
+know they are not relying on NumPy specific behavior that may not be present
+in array API conforming implementations from other libraries.
DLPack support for zero-copy data interchange
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -283,16 +329,16 @@ the options already present in NumPy are:
ROCm drivers, or OpenCL devices). NumPy is CPU-only, but other array
libraries are not. Having one protocol per device isn't tenable, hence
device support is a must.
-2. Widespread support. DLPack has the widest adoption of all protocols, only
- NumPy is missing support. And the experiences of other libraries with it
+2. Widespread support. DLPack has the widest adoption of all protocols. Only
+ NumPy is missing support, and the experiences of other libraries with it
are positive. This contrasts with the protocols NumPy does support, which
- are used very little - when other libraries want to interoperate with
+ are used very little---when other libraries want to interoperate with
NumPy, they typically use the (more limited, and NumPy-specific)
``__array__`` protocol.
Adding support for DLPack to NumPy entails:
-- Adding a ``ndarray.__dlpack__`` method
+- Adding a ``ndarray.__dlpack__`` method.
- Adding a ``from_dlpack`` function, which takes as input an object
supporting ``__dlpack__``, and returns an ``ndarray``.
@@ -307,7 +353,7 @@ NumPy itself is CPU-only, so it clearly doesn't have a need for device support.
However, other libraries (e.g. TensorFlow, PyTorch, JAX, MXNet) support
multiple types of devices: CPU, GPU, TPU, and more exotic hardware.
To write portable code on systems with multiple devices, it's often necessary
-to create new arrays on the same device as some other array, or check that
+to create new arrays on the same device as some other array, or to check that
two arrays live on the same device. Hence syntax for that is needed.
The array object will have a ``.device`` attribute which enables comparing
@@ -317,16 +363,19 @@ from the same library and it's the same hardware device). Furthermore,
def empty(shape: Union[int, Tuple[int, ...]], /, *,
dtype: Optional[dtype] = None,
- device: Optional[device] = None) -> array:
+ device: Optional[device] = None) -> Array:
"""
Array API compatible wrapper for :py:func:`np.empty <numpy.empty>`.
"""
- return np.empty(shape, dtype=dtype, device=device)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.empty(shape, dtype=dtype))
-The implementation for NumPy may be as simple as setting the device attribute to
-the string ``'cpu'`` and raising an exception if array creation functions
+The implementation for NumPy is as simple as setting the device attribute to
+the string ``"cpu"`` and raising an exception if array creation functions
encounter any other value.
+.. _dtypes-and-casting-rules:
Dtypes and casting rules
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -345,79 +394,101 @@ for more details).
Specifying dtypes to functions, e.g. via the ``dtype=`` keyword, is expected
to only use the dtype literals. Format strings, Python builtin dtypes, or
-string representations of the dtype literals are not accepted - this will
-improve readability and portability of code at little cost.
-
-Casting rules are only defined between different dtypes of the same kind. The
-rationale for this is that mixed-kind (e.g., integer to floating-point)
-casting behavior differs between libraries. NumPy's mixed-kind casting
-behavior doesn't need to be changed or restricted, it only needs to be
-documented that if users use mixed-kind casting, their code may not be
-portable.
+string representations of the dtype literals are not accepted. This will
+improve readability and portability of code at little cost. Furthermore, no
+behavior is expected of these dtype literals themselves other than basic
+equality comparison. In particular, since the array API does not have scalar
+objects, syntax like ``float32(0.0)`` is not allowed (a 0-D array can be
+created with ``asarray(0.0, dtype=float32)``).
+
+Casting rules are only defined between different dtypes of the same kind
+(i.e., boolean to boolean, integer to integer, or floating-point to
+floating-point). This also means omitting integer-uint64 combinations that
+would upcast to float64 in NumPy. The rationale for this is that mixed-kind
+(e.g., integer to floating-point) casting behaviors differ between libraries.
.. image:: _static/nep-0047-casting-rules-lattice.png
*Type promotion diagram. Promotion between any two types is given by their
-join on this lattice. Only the types of participating arrays matter, not
-their values. Dashed lines indicate that behaviour for Python scalars is
-undefined on overflow. Boolean, integer and floating-point dtypes are not
-connected, indicating mixed-kind promotion is undefined.*
+join on this lattice. Only the types of participating arrays matter, not their
+values. Dashed lines indicate that behavior for Python scalars is undefined on
+overflow. The Python scalars themselves are only allowed in operators on the
+array object, not inside of functions. Boolean, integer and floating-point
+dtypes are not connected, indicating mixed-kind promotion is undefined (for
+the NumPy implementation, these raise an exception).*
The most important difference between the casting rules in NumPy and in the
-array API standard is how scalars and 0-dimensional arrays are handled. In
-the standard, array scalars do not exist and 0-dimensional arrays follow the
-same casting rules as higher-dimensional arrays.
+array API standard is how scalars and 0-dimensional arrays are handled. In the
+standard, array scalars do not exist and 0-dimensional arrays follow the same
+casting rules as higher-dimensional arrays. Furthermore, there is no
+value-based casting in the standard. The result type of an operation can be
+predicted entirely from its input arrays' dtypes, regardless of their shapes
+or values. Python scalars are only allowed in dunder operations (like
+``__add__``), and only if they are of the same kind as the array dtype. They
+always cast to the dtype of the array, regardless of value. Overflow behavior
+is undefined.
See the `Type Promotion Rules section of the array API standard <https://data-apis.github.io/array-api/latest/API_specification/type_promotion.html>`__
for more details.
-.. note::
-
- It is not clear what the best way is to support the different casting rules
- for 0-dimensional arrays and no value-based casting. One option may be to
- implement this second set of casting rules, keep them private, mark the
- array API functions with a private attribute that says they adhere to
- these different rules, and let the casting machinery check whether for
- that attribute.
+In the implementation, this means
- This needs discussion.
+- Ensuring any operation that would produce an scalar object in NumPy is
+ converted to a 0-D array in the ``Array`` constructor,
+- Checking for combinations that would apply value-based casting and
+ ensuring they promote to the correct type. This can be achieved, e.g., by
+ manually broadcasting 0-D inputs (preventing them from participating in
+ value-based casting), or by explicitly passing the ``signature`` argument
+ to the underlying ufunc,
+- In dunder operator methods, manually converting Python scalar inputs to 0-D
+ arrays of the matching dtype if they are the same kind, and raising otherwise. For scalars out of
+ bounds of the given dtype (for which the behavior is undefined by the spec),
+ the behavior of ``np.array(scalar, dtype=dtype)`` is used (either cast or
+ raise OverflowError).
+.. _indexing:
Indexing
~~~~~~~~
An indexing expression that would return a scalar with ``ndarray``, e.g.
-``arr_2d[0, 0]``, will return a 0-D array with the new array object. There are
-several reasons for that: array scalars are largely considered a design mistake
+``arr_2d[0, 0]``, will return a 0-D array with the new ``Array`` object. There are
+several reasons for this: array scalars are largely considered a design mistake
which no other array library copied; it works better for non-CPU libraries
(typically arrays can live on the device, scalars live on the host); and it's
-simply a consistent design. To get a Python scalar out of a 0-D array, one can
-simply use the builtin for the type, e.g. ``float(arr_0d)``.
+simply a more consistent design. To get a Python scalar out of a 0-D array, one can
+use the builtin for the type, e.g. ``float(arr_0d)``.
The other `indexing modes in the standard <https://data-apis.github.io/array-api/latest/API_specification/indexing.html>`__
do work largely the same as they do for ``numpy.ndarray``. One noteworthy
difference is that clipping in slice indexing (e.g., ``a[:n]`` where ``n`` is
-larger than the size of the first axis) is unspecified behaviour, because
+larger than the size of the first axis) is unspecified behavior, because
that kind of check can be expensive on accelerators.
-The lack of advanced indexing, and boolean indexing being limited to a single
-n-D boolean array, is due to those indexing modes not being suitable for all
-types of arrays or JIT compilation. Their absence does not seem to be
+The standard omits advanced indexing (indexing by an integer array), and boolean indexing is limited to a
+single n-D boolean array. This is due to those indexing modes not being
+suitable for all types of arrays or JIT compilation. Furthermore, some
+advanced NumPy indexing semantics, such as the semantics for mixing advanced
+and non-advanced indices in a single index, are considered design mistakes in
+NumPy. The absence of these more advanced index types does not seem to be
problematic; if a user or library author wants to use them, they can do so
through zero-copy conversion to ``numpy.ndarray``. This will signal correctly
to whomever reads the code that it is then NumPy-specific rather than portable
to all conforming array types.
-
+Being a minimal implementation, ``numpy.array_api`` will explicitly disallow
+slices with clipped bounds, advanced indexing, and boolean indices mixed with
+other indices.
The array object
~~~~~~~~~~~~~~~~
The array object in the standard does not have methods other than dunder
-methods. The rationale for that is that not all array libraries have methods
-on their array object (e.g., TensorFlow does not). It also provides only a
-single way of doing something, rather than have functions and methods that
-are effectively duplicate.
+methods. It also does not allow direct construction, preferring instead array
+construction methods like ``asarray``. The rationale for that is that not all
+array libraries have methods on their array object (e.g., TensorFlow does
+not). It also provides only a single way of doing something, rather than have
+functions and methods that are effectively duplicate.
Mixing operations that may produce views (e.g., indexing, ``nonzero``)
in combination with mutation (e.g., item or slice assignment) is
@@ -425,41 +496,60 @@ in combination with mutation (e.g., item or slice assignment) is
This cannot easily be prohibited in the array object itself; instead this will
be guidance to the user via documentation.
-The standard current does not prescribe a name for the array object itself.
-We propose to simply name it ``ndarray``. This is the most obvious name, and
-because of the separate namespace should not clash with ``numpy.ndarray``.
-
+The standard current does not prescribe a name for the array object itself. We
+propose to name it ``Array``. This uses proper PEP 8 capitalization for a
+class, and does not conflict with any existing NumPy class names. [3]_ Note
+that the actual name of the array class does not actually matter that much as
+it is not itself included in the top-level namespace, and cannot be directly
+constructed.
Implementation
--------------
-.. note::
-
- This section needs a lot more detail, which will gradually be added when
- the implementation progresses.
-
A prototype of the ``array_api`` namespace can be found in
-https://github.com/data-apis/numpy/tree/array-api/numpy/_array_api.
-The docstring in its ``__init__.py`` has notes on completeness of the
-implementation. The code for the wrapper functions also contains ``# Note:``
-comments everywhere there is a difference with the NumPy API.
-Two important parts that are not implemented yet are the new array object and
-DLPack support. Functions may need changes to ensure the changed casting rules
-are respected.
-
-The array object
-~~~~~~~~~~~~~~~~
-
-Regarding the array object implementation, we plan to start with a regular
-Python class that wraps a ``numpy.ndarray`` instance. Attributes and methods
-can forward to that wrapped instance, applying input validation and
-implementing changed behaviour as needed.
-
-The casting rules are probably the most challenging part. The in-progress
-dtype system refactor (NEPs 40-43) should make implementing the correct casting
-behaviour easier - it is already moving away from value-based casting for
-example.
-
+https://github.com/numpy/numpy/pull/18585. The docstring in its
+``__init__.py`` has several important notes about implementation details. The
+code for the wrapper functions also contains ``# Note:`` comments everywhere
+there is a difference with the NumPy API. The
+implementation is entirely in pure Python, and consists primarily of wrapper
+classes/functions that pass through to the corresponding NumPy functions after
+applying input validation and any changed behavior. One important part that is not
+implemented yet is DLPack_ support, as its implementation in ``np.ndarray`` is
+still in progress (https://github.com/numpy/numpy/pull/19083).
+
+The ``numpy.array_api`` module is considered experimental. This means that
+importing it will issue a ``UserWarning``. The alternative to this was naming
+the module ``numpy._array_api``, but the warning was chosen instead so that it
+does not become necessary to rename the module in the future, potentially
+breaking user code. The module also requires Python 3.8 or greater due to
+extensive use of the positional-only argument syntax.
+
+The experimental nature of the module also means that it is not yet mentioned
+anywhere in the NumPy documentation, outside of its module docstring and this
+NEP. Documentation for the implementation is itself a challenging problem.
+Presently every docstring in the implementation simply references the
+underlying NumPy function it implements. However, this is not ideal, as the
+underlying NumPy function may have different behavior from the corresponding
+function in the array API, for instance, additional keyword arguments that are
+not present in the array API. It has been suggested that documentation may be
+pulled directly from the spec itself, but support for this would require
+making some technical changes to the way the spec is written, and so the
+current implementation does not yet make any attempt to do this.
+
+The array API specification is accompanied by an in-progress `official test
+suite <https://github.com/data-apis/array-api-tests>`_, which is designed to
+test conformance of any library to the array API specification. The tests
+included with the implementation will therefore be minimal, as the majority of
+the behavior will be verified by this test suite. The tests in NumPy itself
+for the ``array_api`` submodule will only include testing for behavior not
+covered by the array API test suite, for instance, tests that the
+implementation is minimal and properly rejects things like disallowed type
+combinations. A CI job will be added to the array API test suite repository to
+regularly test it against the NumPy implementation. The array API test suite
+is designed to be vendored if libraries wish to do that, but this idea was
+rejected for NumPy because the time taken by it is significant relative to the
+existing NumPy test suite, and because the test suite is itself still
+a work in progress.
The dtype objects
~~~~~~~~~~~~~~~~~
@@ -476,16 +566,20 @@ Dtypes should not be assumed to have a class hierarchy by users, however we are
free to implement it with a class hierarchy if that's convenient. We considered
the following options to implement dtype objects:
-1. Alias dtypes to those in the main namespace. E.g., ``np.array_api.float32 =
+1. Alias dtypes to those in the main namespace, e.g., ``np.array_api.float32 =
np.float32``.
-2. Make the dtypes instances of ``np.dtype``. E.g., ``np.array_api.float32 =
+2. Make the dtypes instances of ``np.dtype``, e.g., ``np.array_api.float32 =
np.dtype(np.float32)``.
3. Create new singleton classes with only the required methods/attributes
(currently just ``__eq__``).
It seems like (2) would be easiest from the perspective of interacting with
-functions outside the main namespace. And (3) would adhere best to the
-standard.
+functions outside the main namespace and (3) would adhere best to the
+standard. (2) does not prevent users from accessing NumPy-specific attributes
+of the dtype objects like (3) would, although unlike (1), it does disallow
+creating scalar objects like ``float32(0.0)``. (2) also keeps only one object
+per dtype---with (1), ``arr.dtype`` would be still be a dtype instance. The
+implementation currently uses (2).
TBD: the standard does not yet have a good way to inspect properties of a
dtype, to ask questions like "is this an integer dtype?". Perhaps this is easy
@@ -533,8 +627,11 @@ between many array libraries rather than only between NumPy and one other librar
Alternatives
------------
-
-
+It was proposed to have the NumPy array API implementation as a separate
+library from NumPy. This was rejected because keeping it separate will make it
+less likely for people to review it, and including it in NumPy itself as an
+experimental submodule will make it easier for end users and library authors
+who already depend on NumPy to access the implementation.
Appendix - a possible ``get_namespace`` implementation
------------------------------------------------------
@@ -569,6 +666,11 @@ Discussion
- `First discussion on the mailing list about the array API standard <https://mail.python.org/pipermail/numpy-discussion/2020-November/081181.html>`__
+- `Discussion of NEP 47 on the mailing list
+ <https://mail.python.org/pipermail/numpy-discussion/2021-February/081530.html>`_
+
+- `PR #18585 implementing numpy.array_api
+ <https://github.com/numpy/numpy/pull/18585>`_
References and Footnotes
------------------------
@@ -583,6 +685,7 @@ References and Footnotes
.. [2] https://data-apis.org/blog/array_api_standard_release/
+.. [3] https://github.com/numpy/numpy/pull/18585#discussion_r641370294
Copyright
---------
diff --git a/doc/neps/nep-0049.rst b/doc/neps/nep-0049.rst
index 4a5edae81..6a17f33b5 100644
--- a/doc/neps/nep-0049.rst
+++ b/doc/neps/nep-0049.rst
@@ -144,6 +144,12 @@ NumPy C-API functions
The consumer of the `PyDataMemAllocator` interface must keep track of ``size`` and make sure it is
consistent with the parameter passed to the ``(m|c|re)alloc`` functions.
+ NumPy itself may violate this requirement when the shape of the requested
+ array contains a ``0``, so authors of PyDataMemAllocators should relate to
+ the ``size`` parameter as a best-guess. Work to fix this is ongoing in PRs
+ 15780_ and 15788_ but has not yet been resolved. When it is this NEP should
+ be revisited.
+
.. c:function:: const PyDataMem_Handler * PyDataMem_SetHandler(PyDataMem_Handler *handler)
Sets a new allocation policy. If the input value is ``NULL``, will reset
@@ -153,12 +159,26 @@ NumPy C-API functions
hooks. All the function pointers must be filled in, ``NULL`` is not
accepted.
-.. c:function:: const char * PyDataMem_GetHandlerName(PyArrayObject *obj)
+.. c:function:: const PyDataMem_Handler * PyDataMem_GetHandler(PyArrayObject *obj)
- Return the const char name of the ``PyDataMem_Handler`` used by the
- ``PyArrayObject``. If ``NULL``, return the name of the current global policy
+ Return the ``PyDataMem_Handler`` used by the
+ ``PyArrayObject``. If ``NULL``, return the handler
that will be used to allocate data for the next ``PyArrayObject``.
+``PyDataMem_Handler`` thread safety and lifetime
+================================================
+The active handler is stored in the current :py:class:`~contextvars.Context`
+via a :py:class:`~contextvars.ContextVar`. This ensures it can be configured both
+per-thread and per-async-coroutine.
+
+There is currently no lifetime management of ``PyDataMem_Handler``.
+The user of `PyDataMem_SetHandler` must ensure that the argument remains alive
+for as long as any objects allocated with it, and while it is the active handler.
+In practice, this means the handler must be immortal.
+
+As an implementation detail, currently this ``ContextVar`` contains a ``PyCapsule``
+object storing a pointer to a ``PyDataMem_Handler`` with no destructor,
+but this should not be relied upon.
Sample code
===========
@@ -219,7 +239,7 @@ the ``sz`` argument is correct.
if (i != sz) {
fprintf(stderr, "uh-oh, unmatched shift_free"
"(ptr, %ld) but allocated %ld\\n", sz, i);
- /* This happens in some places, only print */
+ /* This happens when the shape has a 0, only print */
ctx->free(real);
}
else {
@@ -266,7 +286,6 @@ the ``sz`` argument is correct.
shift_free /* free */
}
};
- '''
Related Work
------------
@@ -310,6 +329,8 @@ References and Footnotes
.. _`PR 17582`: https://github.com/numpy/numpy/pull/17582
.. _`PR 5457`: https://github.com/numpy/numpy/pull/5457
.. _`PR 5470`: https://github.com/numpy/numpy/pull/5470
+.. _`15780`: https://github.com/numpy/numpy/pull/15780
+.. _`15788`: https://github.com/numpy/numpy/pull/15788
.. _`PR 390`: https://github.com/numpy/numpy/pull/390
.. _`issue 17467`: https://github.com/numpy/numpy/issues/17467
.. _`comment in the PR`: https://github.com/numpy/numpy/pull/17582#issuecomment-809145547
diff --git a/doc/release/upcoming_changes/18585.new_feature.rst b/doc/release/upcoming_changes/18585.new_feature.rst
new file mode 100644
index 000000000..bb83d755c
--- /dev/null
+++ b/doc/release/upcoming_changes/18585.new_feature.rst
@@ -0,0 +1,15 @@
+Implementation of the NEP 47 (adopting the array API standard)
+--------------------------------------------------------------
+
+An initial implementation of `NEP 47`_ (adoption the array API standard) has
+been added as ``numpy.array_api``. The implementation is experimental and will
+issue a UserWarning on import, as the `array API standard
+<https://data-apis.org/array-api/latest/index.html>`_ is still in draft state.
+``numpy.array_api`` is a conforming implementation of the array API standard,
+which is also minimal, meaning that only those functions and behaviors that
+are required by the standard are implemented (see the NEP for more info).
+Libraries wishing to make use of the array API standard are encouraged to use
+``numpy.array_api`` to check that they are only using functionality that is
+guaranteed to be present in standard conforming implementations.
+
+.. _`NEP 47`: https://numpy.org/neps/nep-0047-array-api-standard.html
diff --git a/doc/release/upcoming_changes/19527.new_feature.rst b/doc/release/upcoming_changes/19527.new_feature.rst
new file mode 100644
index 000000000..3967f1841
--- /dev/null
+++ b/doc/release/upcoming_changes/19527.new_feature.rst
@@ -0,0 +1,3 @@
+Added support for LoongArch
+------------------------------------------------
+LoongArch is a new instruction set, numpy compilation failure on LoongArch architecture, so add the commit.
diff --git a/doc/release/upcoming_changes/19539.expired.rst b/doc/release/upcoming_changes/19539.expired.rst
new file mode 100644
index 000000000..6e94f175d
--- /dev/null
+++ b/doc/release/upcoming_changes/19539.expired.rst
@@ -0,0 +1,2 @@
+* Using the strings ``"Bytes0"``, ``"Datetime64"``, ``"Str0"``, ``"Uint32"``,
+ and ``"Uint64"`` as a dtype will now raise a ``TypeError``. \ No newline at end of file
diff --git a/doc/release/upcoming_changes/19615.expired.rst b/doc/release/upcoming_changes/19615.expired.rst
new file mode 100644
index 000000000..4e02771e3
--- /dev/null
+++ b/doc/release/upcoming_changes/19615.expired.rst
@@ -0,0 +1,8 @@
+Expired deprecations for ``loads``, ``ndfromtxt``, and ``mafromtxt`` in npyio
+-----------------------------------------------------------------------------
+
+``numpy.loads`` was deprecated in v1.15, with the recommendation that users
+use `pickle.loads` instead.
+``ndfromtxt`` and ``mafromtxt`` were both deprecated in v1.17 - users should
+use `numpy.genfromtxt` instead with the appropriate value for the
+``usemask`` parameter.
diff --git a/doc/release/upcoming_changes/19655.change.rst b/doc/release/upcoming_changes/19655.change.rst
new file mode 100644
index 000000000..2c2315dd2
--- /dev/null
+++ b/doc/release/upcoming_changes/19655.change.rst
@@ -0,0 +1,4 @@
+Python 3.7 is no longer supported
+---------------------------------
+Python support has been dropped. This is rather strict, there are
+changes that require Python >=3.8.
diff --git a/doc/release/upcoming_changes/19680.improvement.rst b/doc/release/upcoming_changes/19680.improvement.rst
new file mode 100644
index 000000000..1a2a3496b
--- /dev/null
+++ b/doc/release/upcoming_changes/19680.improvement.rst
@@ -0,0 +1,5 @@
+`numpy.fromregex` now accepts ``os.PathLike`` implementations
+-------------------------------------------------------------
+
+`numpy.fromregex` now accepts objects implementing the `__fspath__<os.PathLike>`
+protocol, *e.g.* `pathlib.Path`.
diff --git a/doc/release/upcoming_changes/19803.new_feature.rst b/doc/release/upcoming_changes/19803.new_feature.rst
new file mode 100644
index 000000000..942325822
--- /dev/null
+++ b/doc/release/upcoming_changes/19803.new_feature.rst
@@ -0,0 +1,14 @@
+``is_integer`` is now available to `numpy.floating` and `numpy.integer`
+-----------------------------------------------------------------------
+Based on its counterpart in `float` and `int`, the numpy floating point and
+integer types now support `~float.is_integer`. Returns ``True`` if the
+number is finite with integral value, and ``False`` otherwise.
+
+.. code-block:: python
+
+ >>> np.float32(-2.0).is_integer()
+ True
+ >>> np.float64(3.2).is_integer()
+ False
+ >>> np.int32(-2).is_integer()
+ True
diff --git a/doc/source/_templates/indexcontent.html b/doc/source/_templates/indexcontent.html
index 4cbe7d4d5..450cb1e2e 100644
--- a/doc/source/_templates/indexcontent.html
+++ b/doc/source/_templates/indexcontent.html
@@ -41,8 +41,8 @@
<span class="linkdescr">Contributing to NumPy</span></p>
<p class="biglink"><a class="biglink" href="{{ pathto("dev/underthehood") }}">Under-the-hood docs</a><br/>
<span class="linkdescr">Specialized, in-depth documentation</span></p>
- <p class="biglink"><a class="biglink" href="{{ pathto("docs/howto_document") }}">A guide to NumPy documentation</a><br/>
- <p class="biglink"><a class="biglink" href="{{ pathto("docs/howto_build_docs") }}">Building the NumPy API and reference docs</a><br/>
+ <p class="biglink"><a class="biglink" href="{{ pathto("dev/howto-docs") }}">How to contribute to the NumPy documentation</a><br/>
+ <p class="biglink"><a class="biglink" href="{{ pathto("dev/howto_build_docs") }}">Building the NumPy API and reference docs</a><br/>
<p class="biglink"><a class="biglink" href="{{ pathto("benchmarking") }}">Benchmarking</a><br/>
<span class="linkdescr">benchmarking NumPy</span></p>
<p class="biglink"><a class="biglink" href="https://www.numpy.org/neps/index.html">NumPy Enhancement Proposals</a><br/>
@@ -55,7 +55,6 @@
<p class="biglink"><a class="biglink" href="{{ pathto("bugs") }}">Reporting bugs</a></p>
<p class="biglink"><a class="biglink" href="{{ pathto("release") }}">Release notes</a></p>
</td><td width="50%">
- <p class="biglink"><a class="biglink" href="{{ pathto("doc_conventions") }}">Document conventions</a></p>
<p class="biglink"><a class="biglink" href="{{ pathto("license") }}">License of NumPy</a></p>
</td></tr>
</table>
@@ -70,7 +69,7 @@
</p>
<p>
The preferred way to update the documentation is by submitting a pull
- request on GitHub (see the <a href="{{ pathto("docs/index") }}">Documentation index</a>).
+ request on GitHub (see the <a href="{{ pathto("dev/howto-docs") }}">Documentation guide</a>).
Please help us to further improve the NumPy documentation!
</p>
{% endblock %}
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 587307cf9..41b5cee25 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -300,6 +300,7 @@ intersphinx_mapping = {
'scipy-lecture-notes': ('https://scipy-lectures.org', None),
'pytest': ('https://docs.pytest.org/en/stable', None),
'numpy-tutorials': ('https://numpy.org/numpy-tutorials', None),
+ 'numpydoc': ('https://numpydoc.readthedocs.io/en/latest', None),
}
diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst
index 9354357e8..3156d3452 100644
--- a/doc/source/dev/howto-docs.rst
+++ b/doc/source/dev/howto-docs.rst
@@ -153,6 +153,69 @@ if you write a tutorial on your blog, create a YouTube video, or answer question
on Stack Overflow and other sites.
+.. _howto-document:
+
+*******************
+Documentation style
+*******************
+
+.. _userdoc_guide:
+
+User documentation
+==================
+
+- In general, we follow the
+ `Google developer documentation style guide <https://developers.google.com/style>`_
+ for the User Guide.
+
+- NumPy style governs cases where:
+
+ - Google has no guidance, or
+ - We prefer not to use the Google style
+
+ Our current rules:
+
+ - We pluralize *index* as *indices* rather than
+ `indexes <https://developers.google.com/style/word-list#letter-i>`_,
+ following the precedent of :func:`numpy.indices`.
+
+ - For consistency we also pluralize *matrix* as *matrices*.
+
+- Grammatical issues inadequately addressed by the NumPy or Google rules are
+ decided by the section on "Grammar and Usage" in the most recent edition of
+ the `Chicago Manual of Style
+ <https://en.wikipedia.org/wiki/The_Chicago_Manual_of_Style>`_.
+
+- We welcome being
+ `alerted <https://github.com/numpy/numpy/issues>`_ to cases
+ we should add to the NumPy style rules.
+
+.. _docstring_intro:
+
+Docstrings
+==========
+
+When using `Sphinx <http://www.sphinx-doc.org/>`_ in combination with the
+NumPy conventions, you should use the ``numpydoc`` extension so that your
+docstrings will be handled correctly. For example, Sphinx will extract the
+``Parameters`` section from your docstring and convert it into a field
+list. Using ``numpydoc`` will also avoid the reStructuredText errors produced
+by plain Sphinx when it encounters NumPy docstring conventions like
+section headers (e.g. ``-------------``) that sphinx does not expect to
+find in docstrings.
+
+It is available from:
+
+* `numpydoc on PyPI <https://pypi.python.org/pypi/numpydoc>`_
+* `numpydoc on GitHub <https://github.com/numpy/numpydoc/>`_
+
+Note that for documentation within NumPy, it is not necessary to do
+``import numpy as np`` at the beginning of an example.
+
+Please use the ``numpydoc`` :ref:`formatting standard <numpydoc:format>` as
+shown in their :ref:`example <numpydoc:example>`.
+
+
*********************
Documentation reading
*********************
diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/dev/howto_build_docs.rst
index 884cf7935..884cf7935 100644
--- a/doc/source/docs/howto_build_docs.rst
+++ b/doc/source/dev/howto_build_docs.rst
diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst
index b9347a58c..a8c969267 100644
--- a/doc/source/dev/index.rst
+++ b/doc/source/dev/index.rst
@@ -257,6 +257,7 @@ The rest of the story
Git Basics <gitwash/index>
development_environment
development_gitpod
+ howto_build_docs
development_workflow
development_advanced_debugging
reviewer_guidelines
diff --git a/doc/source/doc_conventions.rst b/doc/source/doc_conventions.rst
deleted file mode 100644
index e2bc419d1..000000000
--- a/doc/source/doc_conventions.rst
+++ /dev/null
@@ -1,23 +0,0 @@
-.. _documentation_conventions:
-
-##############################################################################
-Documentation conventions
-##############################################################################
-
-- Names that look like :func:`numpy.array` are links to detailed
- documentation.
-
-- Examples often include the Python prompt ``>>>``. This is not part of the
- code and will cause an error if typed or pasted into the Python
- shell. It can be safely typed or pasted into the IPython shell; the ``>>>``
- is ignored.
-
-- Examples often use ``np`` as an alias for ``numpy``; that is, they assume
- you've run::
-
- >>> import numpy as np
-
-- If you're a code contributor writing a docstring, see :ref:`docstring_intro`.
-
-- If you're a writer contributing ordinary (non-docstring) documentation, see
- :ref:`userdoc_guide`.
diff --git a/doc/source/docs/howto_document.rst b/doc/source/docs/howto_document.rst
deleted file mode 100644
index ff726c67c..000000000
--- a/doc/source/docs/howto_document.rst
+++ /dev/null
@@ -1,75 +0,0 @@
-.. _howto-document:
-
-
-A Guide to NumPy Documentation
-==============================
-
-.. _userdoc_guide:
-
-User documentation
-******************
-- In general, we follow the
- `Google developer documentation style guide <https://developers.google.com/style>`_.
-
-- NumPy style governs cases where:
-
- - Google has no guidance, or
- - We prefer not to use the Google style
-
- Our current rules:
-
- - We pluralize *index* as *indices* rather than
- `indexes <https://developers.google.com/style/word-list#letter-i>`_,
- following the precedent of :func:`numpy.indices`.
-
- - For consistency we also pluralize *matrix* as *matrices*.
-
-- Grammatical issues inadequately addressed by the NumPy or Google rules are
- decided by the section on "Grammar and Usage" in the most recent edition of
- the `Chicago Manual of Style
- <https://en.wikipedia.org/wiki/The_Chicago_Manual_of_Style>`_.
-
-- We welcome being
- `alerted <https://github.com/numpy/numpy/issues>`_ to cases
- we should add to the NumPy style rules.
-
-
-
-.. _docstring_intro:
-
-Docstrings
-**********
-
-When using `Sphinx <http://www.sphinx-doc.org/>`__ in combination with the
-numpy conventions, you should use the ``numpydoc`` extension so that your
-docstrings will be handled correctly. For example, Sphinx will extract the
-``Parameters`` section from your docstring and convert it into a field
-list. Using ``numpydoc`` will also avoid the reStructuredText errors produced
-by plain Sphinx when it encounters numpy docstring conventions like
-section headers (e.g. ``-------------``) that sphinx does not expect to
-find in docstrings.
-
-Some features described in this document require a recent version of
-``numpydoc``. For example, the **Yields** section was added in
-``numpydoc`` 0.6.
-
-It is available from:
-
-* `numpydoc on PyPI <https://pypi.python.org/pypi/numpydoc>`_
-* `numpydoc on GitHub <https://github.com/numpy/numpydoc/>`_
-
-Note that for documentation within numpy, it is not necessary to do
-``import numpy as np`` at the beginning of an example. However, some
-sub-modules, such as ``fft``, are not imported by default, and you have to
-include them explicitly::
-
- import numpy.fft
-
-after which you may use it::
-
- np.fft.fft2(...)
-
-Please use the numpydoc `formatting standard`_ as shown in their example_
-
-.. _`formatting standard`: https://numpydoc.readthedocs.io/en/latest/format.html
-.. _example: https://numpydoc.readthedocs.io/en/latest/example.html
diff --git a/doc/source/docs/index.rst b/doc/source/docs/index.rst
deleted file mode 100644
index 7d8b1bcb4..000000000
--- a/doc/source/docs/index.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-.. _documentation:
-
-NumPy's Documentation
-=====================
-
-.. toctree::
- :maxdepth: 2
-
- howto_document
- howto_build_docs
-
diff --git a/doc/source/reference/arrays.indexing.rst b/doc/source/reference/arrays.indexing.rst
index 9f82875ea..100d22e02 100644
--- a/doc/source/reference/arrays.indexing.rst
+++ b/doc/source/reference/arrays.indexing.rst
@@ -1,561 +1,70 @@
-.. for doctests
- >>> import numpy as np
-
+.. _routines.indexing:
.. _arrays.indexing:
-Indexing
-========
-
-.. seealso::
+Indexing routines
+=================
- :ref:`Indexing basics <basics.indexing>`
-
-.. sectionauthor:: adapted from "Guide to NumPy" by Travis E. Oliphant
+.. seealso:: :ref:`basics.indexing`
.. currentmodule:: numpy
-.. index:: indexing, slicing
-
-:class:`ndarrays <ndarray>` can be indexed using the standard Python
-``x[obj]`` syntax, where *x* is the array and *obj* the selection.
-There are three kinds of indexing available: field access, basic
-slicing, advanced indexing. Which one occurs depends on *obj*.
-
-.. note::
-
- In Python, ``x[(exp1, exp2, ..., expN)]`` is equivalent to
- ``x[exp1, exp2, ..., expN]``; the latter is just syntactic sugar
- for the former.
-
-
-Basic Slicing and Indexing
+Generating index arrays
+-----------------------
+.. autosummary::
+ :toctree: generated/
+
+ c_
+ r_
+ s_
+ nonzero
+ where
+ indices
+ ix_
+ ogrid
+ ravel_multi_index
+ unravel_index
+ diag_indices
+ diag_indices_from
+ mask_indices
+ tril_indices
+ tril_indices_from
+ triu_indices
+ triu_indices_from
+
+Indexing-like operations
+------------------------
+.. autosummary::
+ :toctree: generated/
+
+ take
+ take_along_axis
+ choose
+ compress
+ diag
+ diagonal
+ select
+ lib.stride_tricks.sliding_window_view
+ lib.stride_tricks.as_strided
+
+Inserting data into arrays
--------------------------
-
-Basic slicing extends Python's basic concept of slicing to N
-dimensions. Basic slicing occurs when *obj* is a :class:`slice` object
-(constructed by ``start:stop:step`` notation inside of brackets), an
-integer, or a tuple of slice objects and integers. :py:data:`Ellipsis`
-and :const:`newaxis` objects can be interspersed with these as
-well.
-
-.. deprecated:: 1.15.0
-
- In order to remain backward compatible with a common usage in
- Numeric, basic slicing is also initiated if the selection object is
- any non-ndarray and non-tuple sequence (such as a :class:`list`) containing
- :class:`slice` objects, the :py:data:`Ellipsis` object, or the :const:`newaxis`
- object, but not for integer arrays or other embedded sequences.
-
-.. index::
- triple: ndarray; special methods; getitem
- triple: ndarray; special methods; setitem
- single: ellipsis
- single: newaxis
-
-The simplest case of indexing with *N* integers returns an :ref:`array
-scalar <arrays.scalars>` representing the corresponding item. As in
-Python, all indices are zero-based: for the *i*-th index :math:`n_i`,
-the valid range is :math:`0 \le n_i < d_i` where :math:`d_i` is the
-*i*-th element of the shape of the array. Negative indices are
-interpreted as counting from the end of the array (*i.e.*, if
-:math:`n_i < 0`, it means :math:`n_i + d_i`).
-
-
-All arrays generated by basic slicing are always :term:`views <view>`
-of the original array.
-
-.. note::
-
- NumPy slicing creates a :term:`view` instead of a copy as in the case of
- builtin Python sequences such as string, tuple and list.
- Care must be taken when extracting
- a small portion from a large array which becomes useless after the
- extraction, because the small portion extracted contains a reference
- to the large original array whose memory will not be released until
- all arrays derived from it are garbage-collected. In such cases an
- explicit ``copy()`` is recommended.
-
-The standard rules of sequence slicing apply to basic slicing on a
-per-dimension basis (including using a step index). Some useful
-concepts to remember include:
-
-- The basic slice syntax is ``i:j:k`` where *i* is the starting index,
- *j* is the stopping index, and *k* is the step (:math:`k\neq0`).
- This selects the *m* elements (in the corresponding dimension) with
- index values *i*, *i + k*, ..., *i + (m - 1) k* where
- :math:`m = q + (r\neq0)` and *q* and *r* are the quotient and remainder
- obtained by dividing *j - i* by *k*: *j - i = q k + r*, so that
- *i + (m - 1) k < j*.
-
- .. admonition:: Example
-
- >>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
- >>> x[1:7:2]
- array([1, 3, 5])
-
-- Negative *i* and *j* are interpreted as *n + i* and *n + j* where
- *n* is the number of elements in the corresponding dimension.
- Negative *k* makes stepping go towards smaller indices.
-
- .. admonition:: Example
-
- >>> x[-2:10]
- array([8, 9])
- >>> x[-3:3:-1]
- array([7, 6, 5, 4])
-
-- Assume *n* is the number of elements in the dimension being
- sliced. Then, if *i* is not given it defaults to 0 for *k > 0* and
- *n - 1* for *k < 0* . If *j* is not given it defaults to *n* for *k > 0*
- and *-n-1* for *k < 0* . If *k* is not given it defaults to 1. Note that
- ``::`` is the same as ``:`` and means select all indices along this
- axis.
-
- .. admonition:: Example
-
- >>> x[5:]
- array([5, 6, 7, 8, 9])
-
-- If the number of objects in the selection tuple is less than
- *N*, then ``:`` is assumed for any subsequent dimensions.
-
- .. admonition:: Example
-
- >>> x = np.array([[[1],[2],[3]], [[4],[5],[6]]])
- >>> x.shape
- (2, 3, 1)
- >>> x[1:2]
- array([[[4],
- [5],
- [6]]])
-
-- :py:data:`Ellipsis` expands to the number of ``:`` objects needed for the
- selection tuple to index all dimensions. In most cases, this means that
- length of the expanded selection tuple is ``x.ndim``. There may only be a
- single ellipsis present.
-
- .. admonition:: Example
-
- >>> x[...,0]
- array([[1, 2, 3],
- [4, 5, 6]])
-
-- Each :const:`newaxis` object in the selection tuple serves to expand
- the dimensions of the resulting selection by one unit-length
- dimension. The added dimension is the position of the :const:`newaxis`
- object in the selection tuple.
-
- .. admonition:: Example
-
- >>> x[:,np.newaxis,:,:].shape
- (2, 1, 3, 1)
-
-- An integer, *i*, returns the same values as ``i:i+1``
- **except** the dimensionality of the returned object is reduced by
- 1. In particular, a selection tuple with the *p*-th
- element an integer (and all other entries ``:``) returns the
- corresponding sub-array with dimension *N - 1*. If *N = 1*
- then the returned object is an array scalar. These objects are
- explained in :ref:`arrays.scalars`.
-
-- If the selection tuple has all entries ``:`` except the
- *p*-th entry which is a slice object ``i:j:k``,
- then the returned array has dimension *N* formed by
- concatenating the sub-arrays returned by integer indexing of
- elements *i*, *i+k*, ..., *i + (m - 1) k < j*,
-
-- Basic slicing with more than one non-``:`` entry in the slicing
- tuple, acts like repeated application of slicing using a single
- non-``:`` entry, where the non-``:`` entries are successively taken
- (with all other non-``:`` entries replaced by ``:``). Thus,
- ``x[ind1,...,ind2,:]`` acts like ``x[ind1][...,ind2,:]`` under basic
- slicing.
-
- .. warning:: The above is **not** true for advanced indexing.
-
-- You may use slicing to set values in the array, but (unlike lists) you
- can never grow the array. The size of the value to be set in
- ``x[obj] = value`` must be (broadcastable) to the same shape as
- ``x[obj]``.
-
-.. index::
- pair: ndarray; view
-
-.. note::
-
- Remember that a slicing tuple can always be constructed as *obj*
- and used in the ``x[obj]`` notation. Slice objects can be used in
- the construction in place of the ``[start:stop:step]``
- notation. For example, ``x[1:10:5,::-1]`` can also be implemented
- as ``obj = (slice(1,10,5), slice(None,None,-1)); x[obj]`` . This
- can be useful for constructing generic code that works on arrays
- of arbitrary dimension.
-
-.. data:: newaxis
- :noindex:
-
- The :const:`newaxis` object can be used in all slicing operations to
- create an axis of length one. :const:`newaxis` is an alias for
- 'None', and 'None' can be used in place of this with the same result.
-
-.. _advanced-indexing:
-
-Advanced Indexing
------------------
-
-Advanced indexing is triggered when the selection object, *obj*, is a
-non-tuple sequence object, an :class:`ndarray` (of data type integer or bool),
-or a tuple with at least one sequence object or ndarray (of data type
-integer or bool). There are two types of advanced indexing: integer
-and Boolean.
-
-Advanced indexing always returns a *copy* of the data (contrast with
-basic slicing that returns a :term:`view`).
-
-.. warning::
-
- The definition of advanced indexing means that ``x[(1,2,3),]`` is
- fundamentally different than ``x[(1,2,3)]``. The latter is
- equivalent to ``x[1,2,3]`` which will trigger basic selection while
- the former will trigger advanced indexing. Be sure to understand
- why this occurs.
-
- Also recognize that ``x[[1,2,3]]`` will trigger advanced indexing,
- whereas due to the deprecated Numeric compatibility mentioned above,
- ``x[[1,2,slice(None)]]`` will trigger basic slicing.
-
-Integer array indexing
-^^^^^^^^^^^^^^^^^^^^^^
-
-Integer array indexing allows selection of arbitrary items in the array
-based on their *N*-dimensional index. Each integer array represents a number
-of indexes into that dimension.
-
-Purely integer array indexing
-"""""""""""""""""""""""""""""
-
-When the index consists of as many integer arrays as the array being indexed
-has dimensions, the indexing is straight forward, but different from slicing.
-
-Advanced indexes always are :ref:`broadcast<ufuncs.broadcasting>` and
-iterated as *one*::
-
- result[i_1, ..., i_M] == x[ind_1[i_1, ..., i_M], ind_2[i_1, ..., i_M],
- ..., ind_N[i_1, ..., i_M]]
-
-Note that the result shape is identical to the (broadcast) indexing array
-shapes ``ind_1, ..., ind_N``.
-
-.. admonition:: Example
-
- From each row, a specific element should be selected. The row index is just
- ``[0, 1, 2]`` and the column index specifies the element to choose for the
- corresponding row, here ``[0, 1, 0]``. Using both together the task
- can be solved using advanced indexing:
-
- >>> x = np.array([[1, 2], [3, 4], [5, 6]])
- >>> x[[0, 1, 2], [0, 1, 0]]
- array([1, 4, 5])
-
-To achieve a behaviour similar to the basic slicing above, broadcasting can be
-used. The function :func:`ix_` can help with this broadcasting. This is best
-understood with an example.
-
-.. admonition:: Example
-
- From a 4x3 array the corner elements should be selected using advanced
- indexing. Thus all elements for which the column is one of ``[0, 2]`` and
- the row is one of ``[0, 3]`` need to be selected. To use advanced indexing
- one needs to select all elements *explicitly*. Using the method explained
- previously one could write:
-
- >>> x = np.array([[ 0, 1, 2],
- ... [ 3, 4, 5],
- ... [ 6, 7, 8],
- ... [ 9, 10, 11]])
- >>> rows = np.array([[0, 0],
- ... [3, 3]], dtype=np.intp)
- >>> columns = np.array([[0, 2],
- ... [0, 2]], dtype=np.intp)
- >>> x[rows, columns]
- array([[ 0, 2],
- [ 9, 11]])
-
- However, since the indexing arrays above just repeat themselves,
- broadcasting can be used (compare operations such as
- ``rows[:, np.newaxis] + columns``) to simplify this:
-
- >>> rows = np.array([0, 3], dtype=np.intp)
- >>> columns = np.array([0, 2], dtype=np.intp)
- >>> rows[:, np.newaxis]
- array([[0],
- [3]])
- >>> x[rows[:, np.newaxis], columns]
- array([[ 0, 2],
- [ 9, 11]])
-
- This broadcasting can also be achieved using the function :func:`ix_`:
-
- >>> x[np.ix_(rows, columns)]
- array([[ 0, 2],
- [ 9, 11]])
-
- Note that without the ``np.ix_`` call, only the diagonal elements would
- be selected, as was used in the previous example. This difference is the
- most important thing to remember about indexing with multiple advanced
- indexes.
-
-.. _combining-advanced-and-basic-indexing:
-
-Combining advanced and basic indexing
-"""""""""""""""""""""""""""""""""""""
-
-When there is at least one slice (``:``), ellipsis (``...``) or :const:`newaxis`
-in the index (or the array has more dimensions than there are advanced indexes),
-then the behaviour can be more complicated. It is like concatenating the
-indexing result for each advanced index element
-
-In the simplest case, there is only a *single* advanced index. A single
-advanced index can for example replace a slice and the result array will be
-the same, however, it is a copy and may have a different memory layout.
-A slice is preferable when it is possible.
-
-.. admonition:: Example
-
- >>> x[1:2, 1:3]
- array([[4, 5]])
- >>> x[1:2, [1, 2]]
- array([[4, 5]])
-
-The easiest way to understand the situation may be to think in
-terms of the result shape. There are two parts to the indexing operation,
-the subspace defined by the basic indexing (excluding integers) and the
-subspace from the advanced indexing part. Two cases of index combination
-need to be distinguished:
-
-* The advanced indexes are separated by a slice, :py:data:`Ellipsis` or :const:`newaxis`.
- For example ``x[arr1, :, arr2]``.
-* The advanced indexes are all next to each other.
- For example ``x[..., arr1, arr2, :]`` but *not* ``x[arr1, :, 1]``
- since ``1`` is an advanced index in this regard.
-
-In the first case, the dimensions resulting from the advanced indexing
-operation come first in the result array, and the subspace dimensions after
-that.
-In the second case, the dimensions from the advanced indexing operations
-are inserted into the result array at the same spot as they were in the
-initial array (the latter logic is what makes simple advanced indexing
-behave just like slicing).
-
-.. admonition:: Example
-
- Suppose ``x.shape`` is (10,20,30) and ``ind`` is a (2,3,4)-shaped
- indexing :class:`intp` array, then ``result = x[...,ind,:]`` has
- shape (10,2,3,4,30) because the (20,)-shaped subspace has been
- replaced with a (2,3,4)-shaped broadcasted indexing subspace. If
- we let *i, j, k* loop over the (2,3,4)-shaped subspace then
- ``result[...,i,j,k,:] = x[...,ind[i,j,k],:]``. This example
- produces the same result as :meth:`x.take(ind, axis=-2) <ndarray.take>`.
-
-.. admonition:: Example
-
- Let ``x.shape`` be (10,20,30,40,50) and suppose ``ind_1``
- and ``ind_2`` can be broadcast to the shape (2,3,4). Then
- ``x[:,ind_1,ind_2]`` has shape (10,2,3,4,40,50) because the
- (20,30)-shaped subspace from X has been replaced with the
- (2,3,4) subspace from the indices. However,
- ``x[:,ind_1,:,ind_2]`` has shape (2,3,4,10,30,50) because there
- is no unambiguous place to drop in the indexing subspace, thus
- it is tacked-on to the beginning. It is always possible to use
- :meth:`.transpose() <ndarray.transpose>` to move the subspace
- anywhere desired. Note that this example cannot be replicated
- using :func:`take`.
-
-
-Boolean array indexing
-^^^^^^^^^^^^^^^^^^^^^^
-
-This advanced indexing occurs when obj is an array object of Boolean
-type, such as may be returned from comparison operators. A single
-boolean index array is practically identical to ``x[obj.nonzero()]`` where,
-as described above, :meth:`obj.nonzero() <ndarray.nonzero>` returns a
-tuple (of length :attr:`obj.ndim <ndarray.ndim>`) of integer index
-arrays showing the :py:data:`True` elements of *obj*. However, it is
-faster when ``obj.shape == x.shape``.
-
-If ``obj.ndim == x.ndim``, ``x[obj]`` returns a 1-dimensional array
-filled with the elements of *x* corresponding to the :py:data:`True`
-values of *obj*. The search order will be :term:`row-major`,
-C-style. If *obj* has :py:data:`True` values at entries that are outside
-of the bounds of *x*, then an index error will be raised. If *obj* is
-smaller than *x* it is identical to filling it with :py:data:`False`.
-
-.. admonition:: Example
-
- A common use case for this is filtering for desired element values.
- For example one may wish to select all entries from an array which
- are not NaN:
-
- >>> x = np.array([[1., 2.], [np.nan, 3.], [np.nan, np.nan]])
- >>> x[~np.isnan(x)]
- array([1., 2., 3.])
-
- Or wish to add a constant to all negative elements:
-
- >>> x = np.array([1., -1., -2., 3])
- >>> x[x < 0] += 20
- >>> x
- array([ 1., 19., 18., 3.])
-
-In general if an index includes a Boolean array, the result will be
-identical to inserting ``obj.nonzero()`` into the same position
-and using the integer array indexing mechanism described above.
-``x[ind_1, boolean_array, ind_2]`` is equivalent to
-``x[(ind_1,) + boolean_array.nonzero() + (ind_2,)]``.
-
-If there is only one Boolean array and no integer indexing array present,
-this is straight forward. Care must only be taken to make sure that the
-boolean index has *exactly* as many dimensions as it is supposed to work
-with.
-
-.. admonition:: Example
-
- From an array, select all rows which sum up to less or equal two:
-
- >>> x = np.array([[0, 1], [1, 1], [2, 2]])
- >>> rowsum = x.sum(-1)
- >>> x[rowsum <= 2, :]
- array([[0, 1],
- [1, 1]])
-
-
-Combining multiple Boolean indexing arrays or a Boolean with an integer
-indexing array can best be understood with the
-:meth:`obj.nonzero() <ndarray.nonzero>` analogy. The function :func:`ix_`
-also supports boolean arrays and will work without any surprises.
-
-.. admonition:: Example
-
- Use boolean indexing to select all rows adding up to an even
- number. At the same time columns 0 and 2 should be selected with an
- advanced integer index. Using the :func:`ix_` function this can be done
- with:
-
- >>> x = np.array([[ 0, 1, 2],
- ... [ 3, 4, 5],
- ... [ 6, 7, 8],
- ... [ 9, 10, 11]])
- >>> rows = (x.sum(-1) % 2) == 0
- >>> rows
- array([False, True, False, True])
- >>> columns = [0, 2]
- >>> x[np.ix_(rows, columns)]
- array([[ 3, 5],
- [ 9, 11]])
-
- Without the ``np.ix_`` call, only the diagonal elements would be
- selected.
-
- Or without ``np.ix_`` (compare the integer array examples):
-
- >>> rows = rows.nonzero()[0]
- >>> x[rows[:, np.newaxis], columns]
- array([[ 3, 5],
- [ 9, 11]])
-
-Detailed notes
---------------
-
-These are some detailed notes, which are not of importance for day to day
-indexing (in no particular order):
-
-* The native NumPy indexing type is ``intp`` and may differ from the
- default integer array type. ``intp`` is the smallest data type
- sufficient to safely index any array; for advanced indexing it may be
- faster than other types.
-* For advanced assignments, there is in general no guarantee for the
- iteration order. This means that if an element is set more than once,
- it is not possible to predict the final result.
-* An empty (tuple) index is a full scalar index into a zero dimensional array.
- ``x[()]`` returns a *scalar* if ``x`` is zero dimensional and a view
- otherwise. On the other hand ``x[...]`` always returns a view.
-* If a zero dimensional array is present in the index *and* it is a full
- integer index the result will be a *scalar* and not a zero dimensional array.
- (Advanced indexing is not triggered.)
-* When an ellipsis (``...``) is present but has no size (i.e. replaces zero
- ``:``) the result will still always be an array. A view if no advanced index
- is present, otherwise a copy.
-* the ``nonzero`` equivalence for Boolean arrays does not hold for zero
- dimensional boolean arrays.
-* When the result of an advanced indexing operation has no elements but an
- individual index is out of bounds, whether or not an ``IndexError`` is
- raised is undefined (e.g. ``x[[], [123]]`` with ``123`` being out of bounds).
-* When a *casting* error occurs during assignment (for example updating a
- numerical array using a sequence of strings), the array being assigned
- to may end up in an unpredictable partially updated state.
- However, if any other error (such as an out of bounds index) occurs, the
- array will remain unchanged.
-* The memory layout of an advanced indexing result is optimized for each
- indexing operation and no particular memory order can be assumed.
-* When using a subclass (especially one which manipulates its shape), the
- default ``ndarray.__setitem__`` behaviour will call ``__getitem__`` for
- *basic* indexing but not for *advanced* indexing. For such a subclass it may
- be preferable to call ``ndarray.__setitem__`` with a *base class* ndarray
- view on the data. This *must* be done if the subclasses ``__getitem__`` does
- not return views.
-
-.. _arrays.indexing.fields:
-
-
-Field Access
--------------
-
-.. seealso:: :ref:`arrays.dtypes`, :ref:`arrays.scalars`
-
-If the :class:`ndarray` object is a structured array the :term:`fields <field>`
-of the array can be accessed by indexing the array with strings,
-dictionary-like.
-
-Indexing ``x['field-name']`` returns a new :term:`view` to the array,
-which is of the same shape as *x* (except when the field is a
-sub-array) but of data type ``x.dtype['field-name']`` and contains
-only the part of the data in the specified field. Also
-:ref:`record array <arrays.classes.rec>` scalars can be "indexed" this way.
-
-Indexing into a structured array can also be done with a list of field names,
-*e.g.* ``x[['field-name1','field-name2']]``. As of NumPy 1.16 this returns a
-view containing only those fields. In older versions of numpy it returned a
-copy. See the user guide section on :ref:`structured_arrays` for more
-information on multifield indexing.
-
-If the accessed field is a sub-array, the dimensions of the sub-array
-are appended to the shape of the result.
-
-.. admonition:: Example
-
- >>> x = np.zeros((2,2), dtype=[('a', np.int32), ('b', np.float64, (3,3))])
- >>> x['a'].shape
- (2, 2)
- >>> x['a'].dtype
- dtype('int32')
- >>> x['b'].shape
- (2, 2, 3, 3)
- >>> x['b'].dtype
- dtype('float64')
-
-
-Flat Iterator indexing
-----------------------
-
-:attr:`x.flat <ndarray.flat>` returns an iterator that will iterate
-over the entire array (in C-contiguous style with the last index
-varying the fastest). This iterator object can also be indexed using
-basic slicing or advanced indexing as long as the selection object is
-not a tuple. This should be clear from the fact that :attr:`x.flat
-<ndarray.flat>` is a 1-dimensional view. It can be used for integer
-indexing with 1-dimensional C-style-flat indices. The shape of any
-returned array is therefore the shape of the integer indexing object.
-
-.. index::
- single: indexing
- single: ndarray
+.. autosummary::
+ :toctree: generated/
+
+ place
+ put
+ put_along_axis
+ putmask
+ fill_diagonal
+
+Iterating over arrays
+---------------------
+.. autosummary::
+ :toctree: generated/
+
+ nditer
+ ndenumerate
+ ndindex
+ nested_iters
+ flatiter
+ lib.Arrayterator
diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst
index 54a1e09e1..39a17cc72 100644
--- a/doc/source/reference/c-api/types-and-structures.rst
+++ b/doc/source/reference/c-api/types-and-structures.rst
@@ -225,7 +225,7 @@ PyArrayDescr_Type and PyArray_Descr
compatibility:
- Never declare a non-pointer instance of the struct
- - Never perform pointer arithmatic
+ - Never perform pointer arithmetic
- Never use ``sizof(PyArray_Descr)``
It has the following structure:
diff --git a/doc/source/reference/routines.indexing.rst b/doc/source/reference/routines.indexing.rst
deleted file mode 100644
index eebbf4989..000000000
--- a/doc/source/reference/routines.indexing.rst
+++ /dev/null
@@ -1,69 +0,0 @@
-.. _routines.indexing:
-
-Indexing routines
-=================
-
-.. seealso:: :ref:`Indexing <arrays.indexing>`
-
-.. currentmodule:: numpy
-
-Generating index arrays
------------------------
-.. autosummary::
- :toctree: generated/
-
- c_
- r_
- s_
- nonzero
- where
- indices
- ix_
- ogrid
- ravel_multi_index
- unravel_index
- diag_indices
- diag_indices_from
- mask_indices
- tril_indices
- tril_indices_from
- triu_indices
- triu_indices_from
-
-Indexing-like operations
-------------------------
-.. autosummary::
- :toctree: generated/
-
- take
- take_along_axis
- choose
- compress
- diag
- diagonal
- select
- lib.stride_tricks.sliding_window_view
- lib.stride_tricks.as_strided
-
-Inserting data into arrays
---------------------------
-.. autosummary::
- :toctree: generated/
-
- place
- put
- put_along_axis
- putmask
- fill_diagonal
-
-Iterating over arrays
----------------------
-.. autosummary::
- :toctree: generated/
-
- nditer
- ndenumerate
- ndindex
- nested_iters
- flatiter
- lib.Arrayterator
diff --git a/doc/source/reference/routines.rst b/doc/source/reference/routines.rst
index 5d6a823b7..593d017cc 100644
--- a/doc/source/reference/routines.rst
+++ b/doc/source/reference/routines.rst
@@ -30,7 +30,6 @@ indentation.
routines.fft
routines.functional
routines.help
- routines.indexing
routines.io
routines.linalg
routines.logic
diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst
index 3eae4e159..b832dad04 100644
--- a/doc/source/reference/ufuncs.rst
+++ b/doc/source/reference/ufuncs.rst
@@ -8,292 +8,16 @@
Universal functions (:class:`ufunc`)
************************************
-.. note: XXX: section might need to be made more reference-guideish...
-
-.. index: ufunc, universal function, arithmetic, operation
+.. seealso:: :ref:`ufuncs-basics`
A universal function (or :term:`ufunc` for short) is a function that
-operates on :class:`ndarrays <ndarray>` in an element-by-element fashion,
+operates on :class:`ndarrays <numpy.ndarray>` in an element-by-element fashion,
supporting :ref:`array broadcasting <ufuncs.broadcasting>`, :ref:`type
casting <ufuncs.casting>`, and several other standard features. That
-is, a ufunc is a ":term:`vectorized <vectorization>`" wrapper for a function that
-takes a fixed number of specific inputs and produces a fixed number of
-specific outputs.
-
-In NumPy, universal functions are instances of the
-:class:`numpy.ufunc` class. Many of the built-in functions are
-implemented in compiled C code. The basic ufuncs operate on scalars, but
-there is also a generalized kind for which the basic elements are sub-arrays
-(vectors, matrices, etc.), and broadcasting is done over other dimensions.
-One can also produce custom :class:`ufunc` instances using the
-:func:`frompyfunc` factory function.
-
-
-.. _ufuncs.broadcasting:
-
-Broadcasting
-============
-
-.. index:: broadcasting
-
-Each universal function takes array inputs and produces array outputs
-by performing the core function element-wise on the inputs (where an
-element is generally a scalar, but can be a vector or higher-order
-sub-array for generalized ufuncs). Standard
-broadcasting rules are applied so that inputs not sharing exactly the
-same shapes can still be usefully operated on. Broadcasting can be
-understood by four rules:
-
-1. All input arrays with :attr:`ndim <ndarray.ndim>` smaller than the
- input array of largest :attr:`ndim <ndarray.ndim>`, have 1's
- prepended to their shapes.
-
-2. The size in each dimension of the output shape is the maximum of all
- the input sizes in that dimension.
-
-3. An input can be used in the calculation if its size in a particular
- dimension either matches the output size in that dimension, or has
- value exactly 1.
-
-4. If an input has a dimension size of 1 in its shape, the first data
- entry in that dimension will be used for all calculations along
- that dimension. In other words, the stepping machinery of the
- :term:`ufunc` will simply not step along that dimension (the
- :ref:`stride <memory-layout>` will be 0 for that dimension).
-
-Broadcasting is used throughout NumPy to decide how to handle
-disparately shaped arrays; for example, all arithmetic operations (``+``,
-``-``, ``*``, ...) between :class:`ndarrays <ndarray>` broadcast the
-arrays before operation.
-
-.. _arrays.broadcasting.broadcastable:
-
-.. index:: broadcastable
-
-A set of arrays is called "broadcastable" to the same shape if
-the above rules produce a valid result, *i.e.*, one of the following
-is true:
-
-1. The arrays all have exactly the same shape.
-
-2. The arrays all have the same number of dimensions and the length of
- each dimensions is either a common length or 1.
-
-3. The arrays that have too few dimensions can have their shapes prepended
- with a dimension of length 1 to satisfy property 2.
-
-.. admonition:: Example
-
- If ``a.shape`` is (5,1), ``b.shape`` is (1,6), ``c.shape`` is (6,)
- and ``d.shape`` is () so that *d* is a scalar, then *a*, *b*, *c*,
- and *d* are all broadcastable to dimension (5,6); and
-
- - *a* acts like a (5,6) array where ``a[:,0]`` is broadcast to the other
- columns,
-
- - *b* acts like a (5,6) array where ``b[0,:]`` is broadcast
- to the other rows,
-
- - *c* acts like a (1,6) array and therefore like a (5,6) array
- where ``c[:]`` is broadcast to every row, and finally,
-
- - *d* acts like a (5,6) array where the single value is repeated.
-
-
-.. _ufuncs-output-type:
-
-Output type determination
-=========================
-
-The output of the ufunc (and its methods) is not necessarily an
-:class:`ndarray`, if all input arguments are not :class:`ndarrays <ndarray>`.
-Indeed, if any input defines an :obj:`~class.__array_ufunc__` method,
-control will be passed completely to that function, i.e., the ufunc is
-:ref:`overridden <ufuncs.overrides>`.
-
-If none of the inputs overrides the ufunc, then
-all output arrays will be passed to the :obj:`~class.__array_prepare__` and
-:obj:`~class.__array_wrap__` methods of the input (besides
-:class:`ndarrays <ndarray>`, and scalars) that defines it **and** has
-the highest :obj:`~class.__array_priority__` of any other input to the
-universal function. The default :obj:`~class.__array_priority__` of the
-ndarray is 0.0, and the default :obj:`~class.__array_priority__` of a subtype
-is 0.0. Matrices have :obj:`~class.__array_priority__` equal to 10.0.
-
-All ufuncs can also take output arguments. If necessary, output will
-be cast to the data-type(s) of the provided output array(s). If a class
-with an :obj:`~class.__array__` method is used for the output, results will be
-written to the object returned by :obj:`~class.__array__`. Then, if the class
-also has an :obj:`~class.__array_prepare__` method, it is called so metadata
-may be determined based on the context of the ufunc (the context
-consisting of the ufunc itself, the arguments passed to the ufunc, and
-the ufunc domain.) The array object returned by
-:obj:`~class.__array_prepare__` is passed to the ufunc for computation.
-Finally, if the class also has an :obj:`~class.__array_wrap__` method, the returned
-:class:`ndarray` result will be passed to that method just before
-passing control back to the caller.
-
-Use of internal buffers
-=======================
-
-.. index:: buffers
-
-Internally, buffers are used for misaligned data, swapped data, and
-data that has to be converted from one data type to another. The size
-of internal buffers is settable on a per-thread basis. There can
-be up to :math:`2 (n_{\mathrm{inputs}} + n_{\mathrm{outputs}})`
-buffers of the specified size created to handle the data from all the
-inputs and outputs of a ufunc. The default size of a buffer is
-10,000 elements. Whenever buffer-based calculation would be needed,
-but all input arrays are smaller than the buffer size, those
-misbehaved or incorrectly-typed arrays will be copied before the
-calculation proceeds. Adjusting the size of the buffer may therefore
-alter the speed at which ufunc calculations of various sorts are
-completed. A simple interface for setting this variable is accessible
-using the function
-
-.. autosummary::
- :toctree: generated/
-
- setbufsize
-
-
-Error handling
-==============
-
-.. index:: error handling
-
-Universal functions can trip special floating-point status registers
-in your hardware (such as divide-by-zero). If available on your
-platform, these registers will be regularly checked during
-calculation. Error handling is controlled on a per-thread basis,
-and can be configured using the functions
-
-.. autosummary::
- :toctree: generated/
-
- seterr
- seterrcall
-
-.. _ufuncs.casting:
-
-Casting Rules
-=============
-
-.. index::
- pair: ufunc; casting rules
-
-.. note::
-
- In NumPy 1.6.0, a type promotion API was created to encapsulate the
- mechanism for determining output types. See the functions
- :func:`result_type`, :func:`promote_types`, and
- :func:`min_scalar_type` for more details.
-
-At the core of every ufunc is a one-dimensional strided loop that
-implements the actual function for a specific type combination. When a
-ufunc is created, it is given a static list of inner loops and a
-corresponding list of type signatures over which the ufunc operates.
-The ufunc machinery uses this list to determine which inner loop to
-use for a particular case. You can inspect the :attr:`.types
-<ufunc.types>` attribute for a particular ufunc to see which type
-combinations have a defined inner loop and which output type they
-produce (:ref:`character codes <arrays.scalars.character-codes>` are used
-in said output for brevity).
-
-Casting must be done on one or more of the inputs whenever the ufunc
-does not have a core loop implementation for the input types provided.
-If an implementation for the input types cannot be found, then the
-algorithm searches for an implementation with a type signature to
-which all of the inputs can be cast "safely." The first one it finds
-in its internal list of loops is selected and performed, after all
-necessary type casting. Recall that internal copies during ufuncs (even
-for casting) are limited to the size of an internal buffer (which is user
-settable).
-
-.. note::
-
- Universal functions in NumPy are flexible enough to have mixed type
- signatures. Thus, for example, a universal function could be defined
- that works with floating-point and integer values. See :func:`ldexp`
- for an example.
-
-By the above description, the casting rules are essentially
-implemented by the question of when a data type can be cast "safely"
-to another data type. The answer to this question can be determined in
-Python with a function call: :func:`can_cast(fromtype, totype)
-<can_cast>`. The Figure below shows the results of this call for
-the 24 internally supported types on the author's 64-bit system. You
-can generate this table for your system with the code given in the Figure.
-
-.. admonition:: Figure
-
- Code segment showing the "can cast safely" table for a 64-bit system.
- Generally the output depends on the system; your system might result in
- a different table.
-
- >>> mark = {False: ' -', True: ' Y'}
- >>> def print_table(ntypes):
- ... print('X ' + ' '.join(ntypes))
- ... for row in ntypes:
- ... print(row, end='')
- ... for col in ntypes:
- ... print(mark[np.can_cast(row, col)], end='')
- ... print()
- ...
- >>> print_table(np.typecodes['All'])
- X ? b h i l q p B H I L Q P e f d g F D G S U V O M m
- ? Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y
- b - Y Y Y Y Y Y - - - - - - Y Y Y Y Y Y Y Y Y Y Y - Y
- h - - Y Y Y Y Y - - - - - - - Y Y Y Y Y Y Y Y Y Y - Y
- i - - - Y Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
- l - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
- q - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
- p - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
- B - - Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y
- H - - - Y Y Y Y - Y Y Y Y Y - Y Y Y Y Y Y Y Y Y Y - Y
- I - - - - Y Y Y - - Y Y Y Y - - Y Y - Y Y Y Y Y Y - Y
- L - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - -
- Q - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - -
- P - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - -
- e - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y Y - -
- f - - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y - -
- d - - - - - - - - - - - - - - - Y Y - Y Y Y Y Y Y - -
- g - - - - - - - - - - - - - - - - Y - - Y Y Y Y Y - -
- F - - - - - - - - - - - - - - - - - Y Y Y Y Y Y Y - -
- D - - - - - - - - - - - - - - - - - - Y Y Y Y Y Y - -
- G - - - - - - - - - - - - - - - - - - - Y Y Y Y Y - -
- S - - - - - - - - - - - - - - - - - - - - Y Y Y Y - -
- U - - - - - - - - - - - - - - - - - - - - - Y Y Y - -
- V - - - - - - - - - - - - - - - - - - - - - - Y Y - -
- O - - - - - - - - - - - - - - - - - - - - - - - Y - -
- M - - - - - - - - - - - - - - - - - - - - - - Y Y Y -
- m - - - - - - - - - - - - - - - - - - - - - - Y Y - Y
-
-You should note that, while included in the table for completeness,
-the 'S', 'U', and 'V' types cannot be operated on by ufuncs. Also,
-note that on a 32-bit system the integer types may have different
-sizes, resulting in a slightly altered table.
-
-Mixed scalar-array operations use a different set of casting rules
-that ensure that a scalar cannot "upcast" an array unless the scalar is
-of a fundamentally different kind of data (*i.e.*, under a different
-hierarchy in the data-type hierarchy) than the array. This rule
-enables you to use scalar constants in your code (which, as Python
-types, are interpreted accordingly in ufuncs) without worrying about
-whether the precision of the scalar constant will cause upcasting on
-your large (small precision) array.
-
-
-.. _ufuncs.overrides:
-
-Overriding Ufunc behavior
-=========================
-
-Classes (including ndarray subclasses) can override how ufuncs act on
-them by defining certain special methods. For details, see
-:ref:`arrays.classes`.
-
+is, a ufunc is a ":term:`vectorized <vectorization>`" wrapper for a function
+that takes a fixed number of specific inputs and produces a fixed number of
+specific outputs. For detailed information on universal functions, see
+:ref:`ufuncs-basics`.
:class:`ufunc`
==============
@@ -314,171 +38,171 @@ advanced usage and will not typically be used.
.. index::
pair: ufunc; keyword arguments
-*out*
+.. rubric:: *out*
- .. versionadded:: 1.6
+.. versionadded:: 1.6
- The first output can be provided as either a positional or a keyword
- parameter. Keyword 'out' arguments are incompatible with positional
- ones.
+The first output can be provided as either a positional or a keyword
+parameter. Keyword 'out' arguments are incompatible with positional
+ones.
- .. versionadded:: 1.10
+.. versionadded:: 1.10
- The 'out' keyword argument is expected to be a tuple with one entry per
- output (which can be None for arrays to be allocated by the ufunc).
- For ufuncs with a single output, passing a single array (instead of a
- tuple holding a single array) is also valid.
+The 'out' keyword argument is expected to be a tuple with one entry per
+output (which can be None for arrays to be allocated by the ufunc).
+For ufuncs with a single output, passing a single array (instead of a
+tuple holding a single array) is also valid.
- Passing a single array in the 'out' keyword argument to a ufunc with
- multiple outputs is deprecated, and will raise a warning in numpy 1.10,
- and an error in a future release.
+Passing a single array in the 'out' keyword argument to a ufunc with
+multiple outputs is deprecated, and will raise a warning in numpy 1.10,
+and an error in a future release.
- If 'out' is None (the default), a uninitialized return array is created.
- The output array is then filled with the results of the ufunc in the places
- that the broadcast 'where' is True. If 'where' is the scalar True (the
- default), then this corresponds to the entire output being filled.
- Note that outputs not explicitly filled are left with their
- uninitialized values.
+If 'out' is None (the default), a uninitialized return array is created.
+The output array is then filled with the results of the ufunc in the places
+that the broadcast 'where' is True. If 'where' is the scalar True (the
+default), then this corresponds to the entire output being filled.
+Note that outputs not explicitly filled are left with their
+uninitialized values.
- .. versionadded:: 1.13
+.. versionadded:: 1.13
- Operations where ufunc input and output operands have memory overlap are
- defined to be the same as for equivalent operations where there
- is no memory overlap. Operations affected make temporary copies
- as needed to eliminate data dependency. As detecting these cases
- is computationally expensive, a heuristic is used, which may in rare
- cases result in needless temporary copies. For operations where the
- data dependency is simple enough for the heuristic to analyze,
- temporary copies will not be made even if the arrays overlap, if it
- can be deduced copies are not necessary. As an example,
- ``np.add(a, b, out=a)`` will not involve copies.
+Operations where ufunc input and output operands have memory overlap are
+defined to be the same as for equivalent operations where there
+is no memory overlap. Operations affected make temporary copies
+as needed to eliminate data dependency. As detecting these cases
+is computationally expensive, a heuristic is used, which may in rare
+cases result in needless temporary copies. For operations where the
+data dependency is simple enough for the heuristic to analyze,
+temporary copies will not be made even if the arrays overlap, if it
+can be deduced copies are not necessary. As an example,
+``np.add(a, b, out=a)`` will not involve copies.
-*where*
+.. rubric:: *where*
- .. versionadded:: 1.7
+.. versionadded:: 1.7
- Accepts a boolean array which is broadcast together with the operands.
- Values of True indicate to calculate the ufunc at that position, values
- of False indicate to leave the value in the output alone. This argument
- cannot be used for generalized ufuncs as those take non-scalar input.
+Accepts a boolean array which is broadcast together with the operands.
+Values of True indicate to calculate the ufunc at that position, values
+of False indicate to leave the value in the output alone. This argument
+cannot be used for generalized ufuncs as those take non-scalar input.
- Note that if an uninitialized return array is created, values of False
- will leave those values **uninitialized**.
+Note that if an uninitialized return array is created, values of False
+will leave those values **uninitialized**.
-*axes*
+.. rubric:: *axes*
- .. versionadded:: 1.15
+.. versionadded:: 1.15
- A list of tuples with indices of axes a generalized ufunc should operate
- on. For instance, for a signature of ``(i,j),(j,k)->(i,k)`` appropriate
- for matrix multiplication, the base elements are two-dimensional matrices
- and these are taken to be stored in the two last axes of each argument.
- The corresponding axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``.
- For simplicity, for generalized ufuncs that operate on 1-dimensional arrays
- (vectors), a single integer is accepted instead of a single-element tuple,
- and for generalized ufuncs for which all outputs are scalars, the output
- tuples can be omitted.
+A list of tuples with indices of axes a generalized ufunc should operate
+on. For instance, for a signature of ``(i,j),(j,k)->(i,k)`` appropriate
+for matrix multiplication, the base elements are two-dimensional matrices
+and these are taken to be stored in the two last axes of each argument.
+The corresponding axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``.
+For simplicity, for generalized ufuncs that operate on 1-dimensional arrays
+(vectors), a single integer is accepted instead of a single-element tuple,
+and for generalized ufuncs for which all outputs are scalars, the output
+tuples can be omitted.
-*axis*
+.. rubric:: *axis*
- .. versionadded:: 1.15
+.. versionadded:: 1.15
- A single axis over which a generalized ufunc should operate. This is a
- short-cut for ufuncs that operate over a single, shared core dimension,
- equivalent to passing in ``axes`` with entries of ``(axis,)`` for each
- single-core-dimension argument and ``()`` for all others. For instance,
- for a signature ``(i),(i)->()``, it is equivalent to passing in
- ``axes=[(axis,), (axis,), ()]``.
+A single axis over which a generalized ufunc should operate. This is a
+short-cut for ufuncs that operate over a single, shared core dimension,
+equivalent to passing in ``axes`` with entries of ``(axis,)`` for each
+single-core-dimension argument and ``()`` for all others. For instance,
+for a signature ``(i),(i)->()``, it is equivalent to passing in
+``axes=[(axis,), (axis,), ()]``.
-*keepdims*
+.. rubric:: *keepdims*
- .. versionadded:: 1.15
+.. versionadded:: 1.15
- If this is set to `True`, axes which are reduced over will be left in the
- result as a dimension with size one, so that the result will broadcast
- correctly against the inputs. This option can only be used for generalized
- ufuncs that operate on inputs that all have the same number of core
- dimensions and with outputs that have no core dimensions, i.e., with
- signatures like ``(i),(i)->()`` or ``(m,m)->()``. If used, the location of
- the dimensions in the output can be controlled with ``axes`` and ``axis``.
+If this is set to `True`, axes which are reduced over will be left in the
+result as a dimension with size one, so that the result will broadcast
+correctly against the inputs. This option can only be used for generalized
+ufuncs that operate on inputs that all have the same number of core
+dimensions and with outputs that have no core dimensions, i.e., with
+signatures like ``(i),(i)->()`` or ``(m,m)->()``. If used, the location of
+the dimensions in the output can be controlled with ``axes`` and ``axis``.
-*casting*
+.. rubric:: *casting*
- .. versionadded:: 1.6
+.. versionadded:: 1.6
- May be 'no', 'equiv', 'safe', 'same_kind', or 'unsafe'.
- See :func:`can_cast` for explanations of the parameter values.
+May be 'no', 'equiv', 'safe', 'same_kind', or 'unsafe'.
+See :func:`can_cast` for explanations of the parameter values.
- Provides a policy for what kind of casting is permitted. For compatibility
- with previous versions of NumPy, this defaults to 'unsafe' for numpy < 1.7.
- In numpy 1.7 a transition to 'same_kind' was begun where ufuncs produce a
- DeprecationWarning for calls which are allowed under the 'unsafe'
- rules, but not under the 'same_kind' rules. From numpy 1.10 and
- onwards, the default is 'same_kind'.
+Provides a policy for what kind of casting is permitted. For compatibility
+with previous versions of NumPy, this defaults to 'unsafe' for numpy < 1.7.
+In numpy 1.7 a transition to 'same_kind' was begun where ufuncs produce a
+DeprecationWarning for calls which are allowed under the 'unsafe'
+rules, but not under the 'same_kind' rules. From numpy 1.10 and
+onwards, the default is 'same_kind'.
-*order*
+.. rubric:: *order*
- .. versionadded:: 1.6
+.. versionadded:: 1.6
- Specifies the calculation iteration order/memory layout of the output array.
- Defaults to 'K'. 'C' means the output should be C-contiguous, 'F' means
- F-contiguous, 'A' means F-contiguous if the inputs are F-contiguous and
- not also not C-contiguous, C-contiguous otherwise, and 'K' means to match
- the element ordering of the inputs as closely as possible.
+Specifies the calculation iteration order/memory layout of the output array.
+Defaults to 'K'. 'C' means the output should be C-contiguous, 'F' means
+F-contiguous, 'A' means F-contiguous if the inputs are F-contiguous and
+not also not C-contiguous, C-contiguous otherwise, and 'K' means to match
+the element ordering of the inputs as closely as possible.
-*dtype*
+.. rubric:: *dtype*
- .. versionadded:: 1.6
+.. versionadded:: 1.6
- Overrides the DType of the output arrays the same way as the *signature*.
- This should ensure a matching precision of the calculation. The exact
- calculation DTypes chosen may depend on the ufunc and the inputs may be
- cast to this DType to perform the calculation.
+Overrides the DType of the output arrays the same way as the *signature*.
+This should ensure a matching precision of the calculation. The exact
+calculation DTypes chosen may depend on the ufunc and the inputs may be
+cast to this DType to perform the calculation.
-*subok*
+.. rubric:: *subok*
- .. versionadded:: 1.6
+.. versionadded:: 1.6
- Defaults to true. If set to false, the output will always be a strict
- array, not a subtype.
+Defaults to true. If set to false, the output will always be a strict
+array, not a subtype.
-*signature*
+.. rubric:: *signature*
- Either a Dtype, a tuple of DTypes, or a special signature string
- indicating the input and output types of a ufunc.
+Either a Dtype, a tuple of DTypes, or a special signature string
+indicating the input and output types of a ufunc.
- This argument allows the user to specify exact DTypes to be used for the
- calculation. Casting will be used as necessary. The actual DType of the
- input arrays is not considered unless ``signature`` is ``None`` for
- that array.
+This argument allows the user to specify exact DTypes to be used for the
+calculation. Casting will be used as necessary. The actual DType of the
+input arrays is not considered unless ``signature`` is ``None`` for
+that array.
- When all DTypes are fixed, a specific loop is chosen or an error raised
- if no matching loop exists.
- If some DTypes are not specified and left ``None``, the behaviour may
- depend on the ufunc.
- At this time, a list of available signatures is provided by the **types**
- attribute of the ufunc. (This list may be missing DTypes not defined
- by NumPy.)
+When all DTypes are fixed, a specific loop is chosen or an error raised
+if no matching loop exists.
+If some DTypes are not specified and left ``None``, the behaviour may
+depend on the ufunc.
+At this time, a list of available signatures is provided by the **types**
+attribute of the ufunc. (This list may be missing DTypes not defined
+by NumPy.)
- The ``signature`` only specifies the DType class/type. For example, it
- can specifiy that the operation should be ``datetime64`` or ``float64``
- operation. It does not specify the ``datetime64`` time-unit or the
- ``float64`` byte-order.
+The ``signature`` only specifies the DType class/type. For example, it
+can specifiy that the operation should be ``datetime64`` or ``float64``
+operation. It does not specify the ``datetime64`` time-unit or the
+``float64`` byte-order.
- For backwards compatibility this argument can also be provided as *sig*,
- although the long form is preferred. Note that this should not be
- confused with the generalized ufunc :ref:`signature <details-of-signature>`
- that is stored in the **signature** attribute of the of the ufunc object.
+For backwards compatibility this argument can also be provided as *sig*,
+although the long form is preferred. Note that this should not be
+confused with the generalized ufunc :ref:`signature <details-of-signature>`
+that is stored in the **signature** attribute of the of the ufunc object.
-*extobj*
+.. rubric:: *extobj*
- a list of length 3 specifying the ufunc buffer-size, the error
- mode integer, and the error call-back function. Normally, these
- values are looked up in a thread-specific dictionary. Passing them
- here circumvents that look up and uses the low-level specification
- provided for the error mode. This may be useful, for example, as
- an optimization for calculations requiring many ufunc calls on
- small arrays in a loop.
+A list of length 3 specifying the ufunc buffer-size, the error
+mode integer, and the error call-back function. Normally, these
+values are looked up in a thread-specific dictionary. Passing them
+here circumvents that look up and uses the low-level specification
+provided for the error mode. This may be useful, for example, as
+an optimization for calculations requiring many ufunc calls on
+small arrays in a loop.
@@ -517,39 +241,6 @@ possess. None of the attributes can be set.
Methods
-------
-All ufuncs have four methods. However, these methods only make sense on scalar
-ufuncs that take two input arguments and return one output argument.
-Attempting to call these methods on other ufuncs will cause a
-:exc:`ValueError`. The reduce-like methods all take an *axis* keyword, a *dtype*
-keyword, and an *out* keyword, and the arrays must all have dimension >= 1.
-The *axis* keyword specifies the axis of the array over which the reduction
-will take place (with negative values counting backwards). Generally, it is an
-integer, though for :meth:`ufunc.reduce`, it can also be a tuple of `int` to
-reduce over several axes at once, or None, to reduce over all axes.
-The *dtype* keyword allows you to manage a very common problem that arises
-when naively using :meth:`ufunc.reduce`. Sometimes you may
-have an array of a certain data type and wish to add up all of its
-elements, but the result does not fit into the data type of the
-array. This commonly happens if you have an array of single-byte
-integers. The *dtype* keyword allows you to alter the data type over which
-the reduction takes place (and therefore the type of the output). Thus,
-you can ensure that the output is a data type with precision large enough
-to handle your output. The responsibility of altering the reduce type is
-mostly up to you. There is one exception: if no *dtype* is given for a
-reduction on the "add" or "multiply" operations, then if the input type is
-an integer (or Boolean) data-type and smaller than the size of the
-:class:`int_` data type, it will be internally upcast to the :class:`int_`
-(or :class:`uint`) data-type. Finally, the *out* keyword allows you to provide
-an output array (for single-output ufuncs, which are currently the only ones
-supported; for future extension, however, a tuple with a single argument
-can be passed in). If *out* is given, the *dtype* argument is ignored.
-
-Ufuncs also have a fifth method that allows in place operations to be
-performed using fancy indexing. No buffering is used on the dimensions where
-fancy indexing is used, so the fancy index can list an item more than once and
-the operation will be performed on the result of the previous operation for
-that item.
-
.. index::
pair: ufunc; methods
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 484181a4f..e9057a531 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -6,6 +6,7 @@ Release Notes
:maxdepth: 3
1.22.0 <release/1.22.0-notes>
+ 1.21.2 <release/1.21.2-notes>
1.21.1 <release/1.21.1-notes>
1.21.0 <release/1.21.0-notes>
1.20.3 <release/1.20.3-notes>
diff --git a/doc/source/release/1.21.2-notes.rst b/doc/source/release/1.21.2-notes.rst
new file mode 100644
index 000000000..bc17c069e
--- /dev/null
+++ b/doc/source/release/1.21.2-notes.rst
@@ -0,0 +1,59 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.21.2 Release Notes
+==========================
+
+The NumPy 1.21.2 is a maintenance release that fixes bugs discovered after
+1.21.1. It also provides 64 bit manylinux Python 3.10.0rc1 wheels for
+downstream testing. Note that Python 3.10 is not yet final. It also has
+preliminary support for Windows on ARM64, but there is no OpenBLAS for that
+platform and no wheels are available.
+
+The Python versions supported for this release are 3.7-3.9. The 1.21.x series
+is compatible with Python 3.10.0rc1 and Python 3.10 will be officially
+supported after it is released. The previous problems with gcc-11.1 have been
+fixed by gcc-11.2, check your version if you are using gcc-11.
+
+
+Contributors
+============
+
+A total of 10 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Bas van Beek
+* Carl Johnsen +
+* Charles Harris
+* Gwyn Ciesla +
+* Matthieu Dartiailh
+* Matti Picus
+* Niyas Sait +
+* Ralf Gommers
+* Sayed Adel
+* Sebastian Berg
+
+
+Pull requests merged
+====================
+
+A total of 18 pull requests were merged for this release.
+
+* `#19497 <https://github.com/numpy/numpy/pull/19497>`__: MAINT: set Python version for 1.21.x to ``<3.11``
+* `#19533 <https://github.com/numpy/numpy/pull/19533>`__: BUG: Fix an issue wherein importing ``numpy.typing`` could raise
+* `#19646 <https://github.com/numpy/numpy/pull/19646>`__: MAINT: Update Cython version for Python 3.10.
+* `#19648 <https://github.com/numpy/numpy/pull/19648>`__: TST: Bump the python 3.10 test version from beta4 to rc1
+* `#19651 <https://github.com/numpy/numpy/pull/19651>`__: TST: avoid distutils.sysconfig in runtests.py
+* `#19652 <https://github.com/numpy/numpy/pull/19652>`__: MAINT: add missing dunder method to nditer type hints
+* `#19656 <https://github.com/numpy/numpy/pull/19656>`__: BLD, SIMD: Fix testing extra checks when ``-Werror`` isn't applicable...
+* `#19657 <https://github.com/numpy/numpy/pull/19657>`__: BUG: Remove logical object ufuncs with bool output
+* `#19658 <https://github.com/numpy/numpy/pull/19658>`__: MAINT: Include .coveragerc in source distributions to support...
+* `#19659 <https://github.com/numpy/numpy/pull/19659>`__: BUG: Fix bad write in masked iterator output copy paths
+* `#19660 <https://github.com/numpy/numpy/pull/19660>`__: ENH: Add support for windows on arm targets
+* `#19661 <https://github.com/numpy/numpy/pull/19661>`__: BUG: add base to templated arguments for platlib
+* `#19662 <https://github.com/numpy/numpy/pull/19662>`__: BUG,DEP: Non-default UFunc signature/dtype usage should be deprecated
+* `#19666 <https://github.com/numpy/numpy/pull/19666>`__: MAINT: Add Python 3.10 to supported versions.
+* `#19668 <https://github.com/numpy/numpy/pull/19668>`__: TST,BUG: Sanitize path-separators when running ``runtest.py``
+* `#19671 <https://github.com/numpy/numpy/pull/19671>`__: BLD: load extra flags when checking for libflame
+* `#19676 <https://github.com/numpy/numpy/pull/19676>`__: BLD: update circleCI docker image
+* `#19677 <https://github.com/numpy/numpy/pull/19677>`__: REL: Prepare for 1.21.2 release.
diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst
index 499e9ec5c..bb570f622 100644
--- a/doc/source/user/absolute_beginners.rst
+++ b/doc/source/user/absolute_beginners.rst
@@ -83,9 +83,12 @@ If you aren't familiar with this style, it's very easy to understand.
If you see ``>>>``, you're looking at **input**, or the code that
you would enter. Everything that doesn't have ``>>>`` in front of it
is **output**, or the results of running your code. This is the style
-you see when you run ``python`` on the command line, but if you're using IPython, you might see a different style.
+you see when you run ``python`` on the command line, but if you're using
+IPython, you might see a different style. Note that it is not part of the
+code and will cause an error if typed or pasted into the Python
+shell. It can be safely typed or pasted into the IPython shell; the ``>>>``
+is ignored.
-For more information, see :ref:`documentation_conventions`.
What’s the difference between a Python list and a NumPy array?
--------------------------------------------------------------
diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst
index c31fc98bd..5a252122f 100644
--- a/doc/source/user/basics.broadcasting.rst
+++ b/doc/source/user/basics.broadcasting.rst
@@ -61,6 +61,8 @@ The code in the second example is more efficient than that in the first
because broadcasting moves less memory around during the multiplication
(``b`` is a scalar rather than an array).
+.. _general-broadcasting-rules:
+
General Broadcasting Rules
==========================
When operating on two arrays, NumPy compares their shapes element-wise.
@@ -98,6 +100,32 @@ operation::
B (3d array): 7 x 1 x 5
Result (4d array): 8 x 7 x 6 x 5
+
+.. _arrays.broadcasting.broadcastable:
+
+Broadcastable arrays
+====================
+
+.. index:: broadcastable
+
+A set of arrays is called "broadcastable" to the same shape if
+the above rules produce a valid result.
+
+For example, if ``a.shape`` is (5,1), ``b.shape`` is (1,6), ``c.shape`` is (6,)
+and ``d.shape`` is () so that *d* is a scalar, then *a*, *b*, *c*,
+and *d* are all broadcastable to dimension (5,6); and
+
+- *a* acts like a (5,6) array where ``a[:,0]`` is broadcast to the other
+ columns,
+
+- *b* acts like a (5,6) array where ``b[0,:]`` is broadcast
+ to the other rows,
+
+- *c* acts like a (1,6) array and therefore like a (5,6) array
+ where ``c[:]`` is broadcast to every row, and finally,
+
+- *d* acts like a (5,6) array where the single value is repeated.
+
Here are some more examples::
A (2d array): 5 x 4
diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst
index ccd6de184..a68def887 100644
--- a/doc/source/user/basics.creation.rst
+++ b/doc/source/user/basics.creation.rst
@@ -59,8 +59,8 @@ in overflow. This feature can often be misunderstood. If you
perform calculations with mismatching ``dtypes``, you can get unwanted
results, for example::
- >>> a = array([2, 3, 4], dtype = np.uint32)
- >>> b = array([5, 6, 7], dtype = np.uint32)
+ >>> a = np.array([2, 3, 4], dtype=np.uint32)
+ >>> b = np.array([5, 6, 7], dtype=np.uint32)
>>> c_unsigned32 = a - b
>>> print('unsigned c:', c_unsigned32, c_unsigned32.dtype)
unsigned c: [4294967293 4294967293 4294967293] uint32
@@ -235,7 +235,7 @@ library. Below, two arrays are created with shapes (2,3) and (2,3,2),
respectively. The seed is set to 42 so you can reproduce these
pseudorandom numbers::
- >>> import numpy.random.default_rng
+ >>> from numpy.random import default_rng
>>> default_rng(42).random((2,3))
array([[0.77395605, 0.43887844, 0.85859792],
[0.69736803, 0.09417735, 0.97562235]])
@@ -296,7 +296,7 @@ There are a number of routines to join existing arrays e.g. :func:`numpy.vstack`
arrays into a 4-by-4 array using ``block``::
>>> A = np.ones((2, 2))
- >>> B = np.eye((2, 2))
+ >>> B = np.eye(2, 2)
>>> C = np.zeros((2, 2))
>>> D = np.diag((-3, -4))
>>> np.block([[A, B],
diff --git a/doc/source/user/basics.indexing.rst b/doc/source/user/basics.indexing.rst
index 7ee61b130..264c3d721 100644
--- a/doc/source/user/basics.indexing.rst
+++ b/doc/source/user/basics.indexing.rst
@@ -1,35 +1,43 @@
.. _basics.indexing:
-********
-Indexing
-********
+****************************************
+Indexing on :class:`ndarrays <.ndarray>`
+****************************************
.. seealso::
- :ref:`Indexing <arrays.indexing>`
-
:ref:`Indexing routines <routines.indexing>`
-Array indexing refers to any use of the square brackets ([]) to index
-array values. There are many options to indexing, which give NumPy
-indexing great power, but with power comes some complexity and the
-potential for confusion. This section is just an overview of the
-various options and issues related to indexing. Aside from single
-element indexing, the details on most of these options are to be
-found in related sections.
+.. sectionauthor:: adapted from "Guide to NumPy" by Travis E. Oliphant
+
+.. currentmodule:: numpy
+
+.. index:: indexing, slicing
-Assignment vs referencing
-=========================
+:class:`ndarrays <ndarray>` can be indexed using the standard Python
+``x[obj]`` syntax, where *x* is the array and *obj* the selection.
+There are different kinds of indexing available depending on *obj*:
+basic indexing, advanced indexing and field access.
Most of the following examples show the use of indexing when
referencing data in an array. The examples work just as well
-when assigning to an array. See the section at the end for
+when assigning to an array. See :ref:`assigning-values-to-indexed-arrays` for
specific examples and explanations on how assignments work.
+Note that in Python, ``x[(exp1, exp2, ..., expN)]`` is equivalent to
+``x[exp1, exp2, ..., expN]``; the latter is just syntactic sugar
+for the former.
+
+
+Basic indexing
+--------------
+
+.. _single-element-indexing:
+
Single element indexing
-=======================
+^^^^^^^^^^^^^^^^^^^^^^^
-Single element indexing for a 1-D array is what one expects. It work
+Single element indexing works
exactly like that for other standard Python sequences. It is 0-based,
and accepts negative indices for indexing from the end of the array. ::
@@ -39,14 +47,13 @@ and accepts negative indices for indexing from the end of the array. ::
>>> x[-2]
8
-Unlike lists and tuples, NumPy arrays support multidimensional indexing
-for multidimensional arrays. That means that it is not necessary to
+It is not necessary to
separate each dimension's index into its own set of square brackets. ::
- >>> x.shape = (2,5) # now x is 2-dimensional
- >>> x[1,3]
+ >>> x.shape = (2, 5) # now x is 2-dimensional
+ >>> x[1, 3]
8
- >>> x[1,-1]
+ >>> x[1, -1]
9
Note that if one indexes a multidimensional array with fewer indices
@@ -59,8 +66,9 @@ That is, each index specified selects the array corresponding to the
rest of the dimensions selected. In the above example, choosing 0
means that the remaining dimension of length 5 is being left unspecified,
and that what is returned is an array of that dimensionality and size.
-It must be noted that the returned array is not a copy of the original,
-but points to the same values in memory as does the original array.
+It must be noted that the returned array is a :term:`view`, i.e., it is not a
+copy of the original, but points to the same values in memory as does the
+original array.
In this case, the 1-D array at the first position (0) is returned.
So using a single index on the returned array, results in a single
element being returned. That is: ::
@@ -68,304 +76,694 @@ element being returned. That is: ::
>>> x[0][2]
2
-So note that ``x[0,2] = x[0][2]`` though the second case is more
+So note that ``x[0, 2] == x[0][2]`` though the second case is more
inefficient as a new temporary array is created after the first index
that is subsequently indexed by 2.
-Note to those used to IDL or Fortran memory order as it relates to
-indexing. NumPy uses C-order indexing. That means that the last
-index usually represents the most rapidly changing memory location,
-unlike Fortran or IDL, where the first index represents the most
-rapidly changing location in memory. This difference represents a
-great potential for confusion.
-
-Other indexing options
-======================
-
-It is possible to slice and stride arrays to extract arrays of the
-same number of dimensions, but of different sizes than the original.
-The slicing and striding works exactly the same way it does for lists
-and tuples except that they can be applied to multiple dimensions as
-well. A few examples illustrates best: ::
-
- >>> x = np.arange(10)
- >>> x[2:5]
- array([2, 3, 4])
- >>> x[:-7]
- array([0, 1, 2])
- >>> x[1:7:2]
- array([1, 3, 5])
- >>> y = np.arange(35).reshape(5,7)
- >>> y[1:5:2,::3]
- array([[ 7, 10, 13],
- [21, 24, 27]])
-
-Note that slices of arrays do not copy the internal array data but
-only produce new views of the original data. This is different from
-list or tuple slicing and an explicit ``copy()`` is recommended if
-the original data is not required anymore.
-
-It is possible to index arrays with other arrays for the purposes of
-selecting lists of values out of arrays into new arrays. There are
-two different ways of accomplishing this. One uses one or more arrays
-of index values. The other involves giving a boolean array of the proper
-shape to indicate the values to be selected. Index arrays are a very
-powerful tool that allow one to avoid looping over individual elements in
-arrays and thus greatly improve performance.
-
-It is possible to use special features to effectively increase the
-number of dimensions in an array through indexing so the resulting
-array acquires the shape needed for use in an expression or with a
-specific function.
-
-Index arrays
-============
-
-NumPy arrays may be indexed with other arrays (or any other sequence-
-like object that can be converted to an array, such as lists, with the
-exception of tuples; see the end of this document for why this is). The
-use of index arrays ranges from simple, straightforward cases to
-complex, hard-to-understand cases. For all cases of index arrays, what
-is returned is a copy of the original data, not a view as one gets for
-slices.
-
-Index arrays must be of integer type. Each value in the array indicates
-which value in the array to use in place of the index. To illustrate: ::
-
- >>> x = np.arange(10,1,-1)
- >>> x
- array([10, 9, 8, 7, 6, 5, 4, 3, 2])
- >>> x[np.array([3, 3, 1, 8])]
- array([7, 7, 9, 2])
-
-
-The index array consisting of the values 3, 3, 1 and 8 correspondingly
-create an array of length 4 (same as the index array) where each index
-is replaced by the value the index array has in the array being indexed.
-
-Negative values are permitted and work as they do with single indices
-or slices: ::
-
- >>> x[np.array([3,3,-3,8])]
- array([7, 7, 4, 2])
-
-It is an error to have index values out of bounds: ::
-
- >>> x[np.array([3, 3, 20, 8])]
- <type 'exceptions.IndexError'>: index 20 out of bounds 0<=index<9
-
-Generally speaking, what is returned when index arrays are used is
-an array with the same shape as the index array, but with the type
-and values of the array being indexed. As an example, we can use a
-multidimensional index array instead: ::
-
- >>> x[np.array([[1,1],[2,3]])]
- array([[9, 9],
- [8, 7]])
+.. note::
+
+ NumPy uses C-order indexing. That means that the last
+ index usually represents the most rapidly changing memory location,
+ unlike Fortran or IDL, where the first index represents the most
+ rapidly changing location in memory. This difference represents a
+ great potential for confusion.
+
+
+Slicing and striding
+^^^^^^^^^^^^^^^^^^^^
+
+Basic slicing extends Python's basic concept of slicing to N
+dimensions. Basic slicing occurs when *obj* is a :class:`slice` object
+(constructed by ``start:stop:step`` notation inside of brackets), an
+integer, or a tuple of slice objects and integers. :py:data:`Ellipsis`
+and :const:`newaxis` objects can be interspersed with these as
+well.
+
+.. deprecated:: 1.15.0
+
+ In order to remain backward compatible with a common usage in
+ Numeric, basic slicing is also initiated if the selection object is
+ any non-ndarray and non-tuple sequence (such as a :class:`list`) containing
+ :class:`slice` objects, the :py:data:`Ellipsis` object, or the :const:`newaxis`
+ object, but not for integer arrays or other embedded sequences.
+
+.. index::
+ triple: ndarray; special methods; getitem
+ triple: ndarray; special methods; setitem
+ single: ellipsis
+ single: newaxis
+
+The simplest case of indexing with *N* integers returns an :ref:`array
+scalar <arrays.scalars>` representing the corresponding item. As in
+Python, all indices are zero-based: for the *i*-th index :math:`n_i`,
+the valid range is :math:`0 \le n_i < d_i` where :math:`d_i` is the
+*i*-th element of the shape of the array. Negative indices are
+interpreted as counting from the end of the array (*i.e.*, if
+:math:`n_i < 0`, it means :math:`n_i + d_i`).
+
+
+All arrays generated by basic slicing are always :term:`views <view>`
+of the original array.
+
+.. note::
+
+ NumPy slicing creates a :term:`view` instead of a copy as in the case of
+ built-in Python sequences such as string, tuple and list.
+ Care must be taken when extracting
+ a small portion from a large array which becomes useless after the
+ extraction, because the small portion extracted contains a reference
+ to the large original array whose memory will not be released until
+ all arrays derived from it are garbage-collected. In such cases an
+ explicit ``copy()`` is recommended.
+
+The standard rules of sequence slicing apply to basic slicing on a
+per-dimension basis (including using a step index). Some useful
+concepts to remember include:
+
+- The basic slice syntax is ``i:j:k`` where *i* is the starting index,
+ *j* is the stopping index, and *k* is the step (:math:`k\neq0`).
+ This selects the *m* elements (in the corresponding dimension) with
+ index values *i*, *i + k*, ..., *i + (m - 1) k* where
+ :math:`m = q + (r\neq0)` and *q* and *r* are the quotient and remainder
+ obtained by dividing *j - i* by *k*: *j - i = q k + r*, so that
+ *i + (m - 1) k < j*.
+ For example::
+
+ >>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> x[1:7:2]
+ array([1, 3, 5])
+
+- Negative *i* and *j* are interpreted as *n + i* and *n + j* where
+ *n* is the number of elements in the corresponding dimension.
+ Negative *k* makes stepping go towards smaller indices.
+ From the above example::
+
+ >>> x[-2:10]
+ array([8, 9])
+ >>> x[-3:3:-1]
+ array([7, 6, 5, 4])
+
+- Assume *n* is the number of elements in the dimension being
+ sliced. Then, if *i* is not given it defaults to 0 for *k > 0* and
+ *n - 1* for *k < 0* . If *j* is not given it defaults to *n* for *k > 0*
+ and *-n-1* for *k < 0* . If *k* is not given it defaults to 1. Note that
+ ``::`` is the same as ``:`` and means select all indices along this
+ axis.
+ From the above example::
+
+ >>> x[5:]
+ array([5, 6, 7, 8, 9])
+
+- If the number of objects in the selection tuple is less than
+ *N*, then ``:`` is assumed for any subsequent dimensions.
+ For example::
+
+ >>> x = np.array([[[1],[2],[3]], [[4],[5],[6]]])
+ >>> x.shape
+ (2, 3, 1)
+ >>> x[1:2]
+ array([[[4],
+ [5],
+ [6]]])
+
+- An integer, *i*, returns the same values as ``i:i+1``
+ **except** the dimensionality of the returned object is reduced by
+ 1. In particular, a selection tuple with the *p*-th
+ element an integer (and all other entries ``:``) returns the
+ corresponding sub-array with dimension *N - 1*. If *N = 1*
+ then the returned object is an array scalar. These objects are
+ explained in :ref:`arrays.scalars`.
+
+- If the selection tuple has all entries ``:`` except the
+ *p*-th entry which is a slice object ``i:j:k``,
+ then the returned array has dimension *N* formed by
+ concatenating the sub-arrays returned by integer indexing of
+ elements *i*, *i+k*, ..., *i + (m - 1) k < j*,
+
+- Basic slicing with more than one non-``:`` entry in the slicing
+ tuple, acts like repeated application of slicing using a single
+ non-``:`` entry, where the non-``:`` entries are successively taken
+ (with all other non-``:`` entries replaced by ``:``). Thus,
+ ``x[ind1, ..., ind2,:]`` acts like ``x[ind1][..., ind2, :]`` under basic
+ slicing.
+
+ .. warning:: The above is **not** true for advanced indexing.
+
+- You may use slicing to set values in the array, but (unlike lists) you
+ can never grow the array. The size of the value to be set in
+ ``x[obj] = value`` must be (broadcastable) to the same shape as
+ ``x[obj]``.
+
+- A slicing tuple can always be constructed as *obj*
+ and used in the ``x[obj]`` notation. Slice objects can be used in
+ the construction in place of the ``[start:stop:step]``
+ notation. For example, ``x[1:10:5, ::-1]`` can also be implemented
+ as ``obj = (slice(1, 10, 5), slice(None, None, -1)); x[obj]`` . This
+ can be useful for constructing generic code that works on arrays
+ of arbitrary dimensions. See :ref:`dealing-with-variable-indices`
+ for more information.
+
+.. index::
+ pair: ndarray; view
+
+
+Dimensional indexing tools
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are some tools to facilitate the easy matching of array shapes with
+expressions and in assignments.
+
+:py:data:`Ellipsis` expands to the number of ``:`` objects needed for the
+selection tuple to index all dimensions. In most cases, this means that the
+length of the expanded selection tuple is ``x.ndim``. There may only be a
+single ellipsis present.
+From the above example::
+
+ >>> x[..., 0]
+ array([[1, 2, 3],
+ [4, 5, 6]])
-Indexing Multi-dimensional arrays
-=================================
-
-Things become more complex when multidimensional arrays are indexed,
-particularly with multidimensional index arrays. These tend to be
-more unusual uses, but they are permitted, and they are useful for some
-problems. We'll start with the simplest multidimensional case (using
-the array y from the previous examples): ::
-
- >>> y[np.array([0,2,4]), np.array([0,1,2])]
- array([ 0, 15, 30])
+This is equivalent to::
-In this case, if the index arrays have a matching shape, and there is
-an index array for each dimension of the array being indexed, the
-resultant array has the same shape as the index arrays, and the values
-correspond to the index set for each position in the index arrays. In
-this example, the first index value is 0 for both index arrays, and
-thus the first value of the resultant array is y[0,0]. The next value
-is y[2,1], and the last is y[4,2].
+ >>> x[:, :, 0]
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+Each :const:`newaxis` object in the selection tuple serves to expand
+the dimensions of the resulting selection by one unit-length
+dimension. The added dimension is the position of the :const:`newaxis`
+object in the selection tuple. :const:`newaxis` is an alias for
+``None``, and ``None`` can be used in place of this with the same result.
+From the above example::
+
+ >>> x[:, np.newaxis, :, :].shape
+ (2, 1, 3, 1)
+ >>> x[:, None, :, :].shape
+ (2, 1, 3, 1)
+
+This can be handy to combine two
+arrays in a way that otherwise would require explicit reshaping
+operations. For example::
+
+ >>> x = np.arange(5)
+ >>> x[:, np.newaxis] + x[np.newaxis, :]
+ array([[0, 1, 2, 3, 4],
+ [1, 2, 3, 4, 5],
+ [2, 3, 4, 5, 6],
+ [3, 4, 5, 6, 7],
+ [4, 5, 6, 7, 8]])
+
+
+.. _advanced-indexing:
+
+Advanced indexing
+-----------------
+
+Advanced indexing is triggered when the selection object, *obj*, is a
+non-tuple sequence object, an :class:`ndarray` (of data type integer or bool),
+or a tuple with at least one sequence object or ndarray (of data type
+integer or bool). There are two types of advanced indexing: integer
+and Boolean.
+
+Advanced indexing always returns a *copy* of the data (contrast with
+basic slicing that returns a :term:`view`).
+
+.. warning::
+
+ The definition of advanced indexing means that ``x[(1, 2, 3),]`` is
+ fundamentally different than ``x[(1, 2, 3)]``. The latter is
+ equivalent to ``x[1, 2, 3]`` which will trigger basic selection while
+ the former will trigger advanced indexing. Be sure to understand
+ why this occurs.
+
+ Also recognize that ``x[[1, 2, 3]]`` will trigger advanced indexing,
+ whereas due to the deprecated Numeric compatibility mentioned above,
+ ``x[[1, 2, slice(None)]]`` will trigger basic slicing.
+
+Integer array indexing
+^^^^^^^^^^^^^^^^^^^^^^
+
+Integer array indexing allows selection of arbitrary items in the array
+based on their *N*-dimensional index. Each integer array represents a number
+of indices into that dimension.
+
+Negative values are permitted in the index arrays and work as they do with
+single indices or slices::
+
+ >>> x = np.arange(10, 1, -1)
+ >>> x
+ array([10, 9, 8, 7, 6, 5, 4, 3, 2])
+ >>> x[np.array([3, 3, 1, 8])]
+ array([7, 7, 9, 2])
+ >>> x[np.array([3, 3, -3, 8])]
+ array([7, 7, 4, 2])
+
+If the index values are out of bounds then an ``IndexError`` is thrown::
+
+ >>> x = np.array([[1, 2], [3, 4], [5, 6]])
+ >>> x[np.array([1, -1])]
+ array([[3, 4],
+ [5, 6]])
+ >>> x[np.array([3, 4])]
+ IndexError: index 3 is out of bounds for axis 0 with size 3
+
+When the index consists of as many integer arrays as dimensions of the array
+being indexed, the indexing is straightforward, but different from slicing.
+
+Advanced indices always are :ref:`broadcast<basics.broadcasting>` and
+iterated as *one*::
+
+ result[i_1, ..., i_M] == x[ind_1[i_1, ..., i_M], ind_2[i_1, ..., i_M],
+ ..., ind_N[i_1, ..., i_M]]
+
+Note that the resulting shape is identical to the (broadcast) indexing array
+shapes ``ind_1, ..., ind_N``. If the indices cannot be broadcast to the
+same shape, an exception ``IndexError: shape mismatch: indexing arrays could
+not be broadcast together with shapes...`` is raised.
+
+Indexing with multidimensional index arrays tend
+to be more unusual uses, but they are permitted, and they are useful for some
+problems. We’ll start with the simplest multidimensional case::
+
+ >>> y = np.arange(35).reshape(5, 7)
+ >>> y
+ array([[ 0, 1, 2, 3, 4, 5, 6],
+ [ 7, 8, 9, 10, 11, 12, 13],
+ [14, 15, 16, 17, 18, 19, 20],
+ [21, 22, 23, 24, 25, 26, 27],
+ [28, 29, 30, 31, 32, 33, 34]])
+ >>> y[np.array([0, 2, 4]), np.array([0, 1, 2])]
+ array([ 0, 15, 30])
+
+In this case, if the index arrays have a matching shape, and there is an
+index array for each dimension of the array being indexed, the resultant
+array has the same shape as the index arrays, and the values correspond
+to the index set for each position in the index arrays. In this example,
+the first index value is 0 for both index arrays, and thus the first value
+of the resultant array is ``y[0, 0]``. The next value is ``y[2, 1]``, and
+the last is ``y[4, 2]``.
If the index arrays do not have the same shape, there is an attempt to
-broadcast them to the same shape. If they cannot be broadcast to the
-same shape, an exception is raised: ::
+broadcast them to the same shape. If they cannot be broadcast to the same
+shape, an exception is raised::
- >>> y[np.array([0,2,4]), np.array([0,1])]
- <type 'exceptions.ValueError'>: shape mismatch: objects cannot be
- broadcast to a single shape
+ >>> y[np.array([0, 2, 4]), np.array([0, 1])]
+ IndexError: shape mismatch: indexing arrays could not be broadcast
+ together with shapes (3,) (2,)
The broadcasting mechanism permits index arrays to be combined with
scalars for other indices. The effect is that the scalar value is used
-for all the corresponding values of the index arrays: ::
-
- >>> y[np.array([0,2,4]), 1]
- array([ 1, 15, 29])
-
-Jumping to the next level of complexity, it is possible to only
-partially index an array with index arrays. It takes a bit of thought
-to understand what happens in such cases. For example if we just use
-one index array with y: ::
-
- >>> y[np.array([0,2,4])]
- array([[ 0, 1, 2, 3, 4, 5, 6],
- [14, 15, 16, 17, 18, 19, 20],
- [28, 29, 30, 31, 32, 33, 34]])
-
-What results is the construction of a new array where each value of
-the index array selects one row from the array being indexed and the
-resultant array has the resulting shape (number of index elements,
-size of row).
-
-An example of where this may be useful is for a color lookup table
-where we want to map the values of an image into RGB triples for
+for all the corresponding values of the index arrays::
+
+ >>> y[np.array([0, 2, 4]), 1]
+ array([ 1, 15, 29])
+
+Jumping to the next level of complexity, it is possible to only partially
+index an array with index arrays. It takes a bit of thought to understand
+what happens in such cases. For example if we just use one index array
+with y::
+
+ >>> y[np.array([0, 2, 4])]
+ array([[ 0, 1, 2, 3, 4, 5, 6],
+ [14, 15, 16, 17, 18, 19, 20],
+ [28, 29, 30, 31, 32, 33, 34]])
+
+It results in the construction of a new array where each value of the
+index array selects one row from the array being indexed and the resultant
+array has the resulting shape (number of index elements, size of row).
+
+In general, the shape of the resultant array will be the concatenation of
+the shape of the index array (or the shape that all the index arrays were
+broadcast to) with the shape of any unused dimensions (those not indexed)
+in the array being indexed.
+
+.. rubric:: Example
+
+From each row, a specific element should be selected. The row index is just
+``[0, 1, 2]`` and the column index specifies the element to choose for the
+corresponding row, here ``[0, 1, 0]``. Using both together the task
+can be solved using advanced indexing::
+
+ >>> x = np.array([[1, 2], [3, 4], [5, 6]])
+ >>> x[[0, 1, 2], [0, 1, 0]]
+ array([1, 4, 5])
+
+To achieve a behaviour similar to the basic slicing above, broadcasting can be
+used. The function :func:`ix_` can help with this broadcasting. This is best
+understood with an example.
+
+.. rubric:: Example
+
+From a 4x3 array the corner elements should be selected using advanced
+indexing. Thus all elements for which the column is one of ``[0, 2]`` and
+the row is one of ``[0, 3]`` need to be selected. To use advanced indexing
+one needs to select all elements *explicitly*. Using the method explained
+previously one could write::
+
+ >>> x = np.array([[ 0, 1, 2],
+ ... [ 3, 4, 5],
+ ... [ 6, 7, 8],
+ ... [ 9, 10, 11]])
+ >>> rows = np.array([[0, 0],
+ ... [3, 3]], dtype=np.intp)
+ >>> columns = np.array([[0, 2],
+ ... [0, 2]], dtype=np.intp)
+ >>> x[rows, columns]
+ array([[ 0, 2],
+ [ 9, 11]])
+
+However, since the indexing arrays above just repeat themselves,
+broadcasting can be used (compare operations such as
+``rows[:, np.newaxis] + columns``) to simplify this::
+
+ >>> rows = np.array([0, 3], dtype=np.intp)
+ >>> columns = np.array([0, 2], dtype=np.intp)
+ >>> rows[:, np.newaxis]
+ array([[0],
+ [3]])
+ >>> x[rows[:, np.newaxis], columns]
+ array([[ 0, 2],
+ [ 9, 11]])
+
+This broadcasting can also be achieved using the function :func:`ix_`:
+
+ >>> x[np.ix_(rows, columns)]
+ array([[ 0, 2],
+ [ 9, 11]])
+
+Note that without the ``np.ix_`` call, only the diagonal elements would
+be selected::
+
+ >>> x[rows, columns]
+ array([ 0, 11])
+
+This difference is the most important thing to remember about
+indexing with multiple advanced indices.
+
+.. rubric:: Example
+
+A real-life example of where advanced indexing may be useful is for a color
+lookup table where we want to map the values of an image into RGB triples for
display. The lookup table could have a shape (nlookup, 3). Indexing
such an array with an image with shape (ny, nx) with dtype=np.uint8
(or any integer type so long as values are with the bounds of the
lookup table) will result in an array of shape (ny, nx, 3) where a
triple of RGB values is associated with each pixel location.
-In general, the shape of the resultant array will be the concatenation
-of the shape of the index array (or the shape that all the index arrays
-were broadcast to) with the shape of any unused dimensions (those not
-indexed) in the array being indexed.
-Boolean or "mask" index arrays
-==============================
+Boolean array indexing
+^^^^^^^^^^^^^^^^^^^^^^
+
+This advanced indexing occurs when *obj* is an array object of Boolean
+type, such as may be returned from comparison operators. A single
+boolean index array is practically identical to ``x[obj.nonzero()]`` where,
+as described above, :meth:`obj.nonzero() <ndarray.nonzero>` returns a
+tuple (of length :attr:`obj.ndim <ndarray.ndim>`) of integer index
+arrays showing the :py:data:`True` elements of *obj*. However, it is
+faster when ``obj.shape == x.shape``.
+
+If ``obj.ndim == x.ndim``, ``x[obj]`` returns a 1-dimensional array
+filled with the elements of *x* corresponding to the :py:data:`True`
+values of *obj*. The search order will be :term:`row-major`,
+C-style. If *obj* has :py:data:`True` values at entries that are outside
+of the bounds of *x*, then an index error will be raised. If *obj* is
+smaller than *x* it is identical to filling it with :py:data:`False`.
+
+A common use case for this is filtering for desired element values.
+For example, one may wish to select all entries from an array which
+are not :const:`NaN`::
+
+ >>> x = np.array([[1., 2.], [np.nan, 3.], [np.nan, np.nan]])
+ >>> x[~np.isnan(x)]
+ array([1., 2., 3.])
+
+Or wish to add a constant to all negative elements::
+
+ >>> x = np.array([1., -1., -2., 3])
+ >>> x[x < 0] += 20
+ >>> x
+ array([1., 19., 18., 3.])
+
+In general if an index includes a Boolean array, the result will be
+identical to inserting ``obj.nonzero()`` into the same position
+and using the integer array indexing mechanism described above.
+``x[ind_1, boolean_array, ind_2]`` is equivalent to
+``x[(ind_1,) + boolean_array.nonzero() + (ind_2,)]``.
+
+If there is only one Boolean array and no integer indexing array present,
+this is straightforward. Care must only be taken to make sure that the
+boolean index has *exactly* as many dimensions as it is supposed to work
+with.
+
+In general, when the boolean array has fewer dimensions than the array being
+indexed, this is equivalent to ``x[b, ...]``, which means x is indexed by b
+followed by as many ``:`` as are needed to fill out the rank of x. Thus the
+shape of the result is one dimension containing the number of True elements of
+the boolean array, followed by the remaining dimensions of the array being
+indexed::
+
+ >>> x = np.arange(35).reshape(5, 7)
+ >>> b = x > 20
+ >>> b[:, 5]
+ array([False, False, False, True, True])
+ >>> x[b[:, 5]]
+ array([[21, 22, 23, 24, 25, 26, 27],
+ [28, 29, 30, 31, 32, 33, 34]])
+
+Here the 4th and 5th rows are selected from the indexed array and
+combined to make a 2-D array.
+
+.. rubric:: Example
-Boolean arrays used as indices are treated in a different manner
-entirely than index arrays. Boolean arrays must be of the same shape
-as the initial dimensions of the array being indexed. In the
-most straightforward case, the boolean array has the same shape: ::
+From an array, select all rows which sum up to less or equal two::
- >>> b = y>20
- >>> y[b]
- array([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])
+ >>> x = np.array([[0, 1], [1, 1], [2, 2]])
+ >>> rowsum = x.sum(-1)
+ >>> x[rowsum <= 2, :]
+ array([[0, 1],
+ [1, 1]])
-Unlike in the case of integer index arrays, in the boolean case, the
-result is a 1-D array containing all the elements in the indexed array
-corresponding to all the true elements in the boolean array. The
-elements in the indexed array are always iterated and returned in
-:term:`row-major` (C-style) order. The result is also identical to
-``y[np.nonzero(b)]``. As with index arrays, what is returned is a copy
-of the data, not a view as one gets with slices.
-The result will be multidimensional if y has more dimensions than b.
-For example: ::
+Combining multiple Boolean indexing arrays or a Boolean with an integer
+indexing array can best be understood with the
+:meth:`obj.nonzero() <ndarray.nonzero>` analogy. The function :func:`ix_`
+also supports boolean arrays and will work without any surprises.
- >>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y
- array([False, False, False, True, True])
- >>> y[b[:,5]]
- array([[21, 22, 23, 24, 25, 26, 27],
- [28, 29, 30, 31, 32, 33, 34]])
+.. rubric:: Example
-Here the 4th and 5th rows are selected from the indexed array and
-combined to make a 2-D array.
+Use boolean indexing to select all rows adding up to an even
+number. At the same time columns 0 and 2 should be selected with an
+advanced integer index. Using the :func:`ix_` function this can be done
+with::
-In general, when the boolean array has fewer dimensions than the array
-being indexed, this is equivalent to y[b, ...], which means
-y is indexed by b followed by as many : as are needed to fill
-out the rank of y.
-Thus the shape of the result is one dimension containing the number
-of True elements of the boolean array, followed by the remaining
-dimensions of the array being indexed.
+ >>> x = np.array([[ 0, 1, 2],
+ ... [ 3, 4, 5],
+ ... [ 6, 7, 8],
+ ... [ 9, 10, 11]])
+ >>> rows = (x.sum(-1) % 2) == 0
+ >>> rows
+ array([False, True, False, True])
+ >>> columns = [0, 2]
+ >>> x[np.ix_(rows, columns)]
+ array([[ 3, 5],
+ [ 9, 11]])
-For example, using a 2-D boolean array of shape (2,3)
-with four True elements to select rows from a 3-D array of shape
-(2,3,5) results in a 2-D result of shape (4,5): ::
+Without the ``np.ix_`` call, only the diagonal elements would be
+selected.
- >>> x = np.arange(30).reshape(2,3,5)
- >>> x
- array([[[ 0, 1, 2, 3, 4],
- [ 5, 6, 7, 8, 9],
- [10, 11, 12, 13, 14]],
- [[15, 16, 17, 18, 19],
- [20, 21, 22, 23, 24],
- [25, 26, 27, 28, 29]]])
- >>> b = np.array([[True, True, False], [False, True, True]])
- >>> x[b]
- array([[ 0, 1, 2, 3, 4],
- [ 5, 6, 7, 8, 9],
- [20, 21, 22, 23, 24],
- [25, 26, 27, 28, 29]])
-
-For further details, consult the NumPy reference documentation on array indexing.
-
-Combining index arrays with slices
-==================================
-
-Index arrays may be combined with slices. For example: ::
-
- >>> y[np.array([0, 2, 4]), 1:3]
- array([[ 1, 2],
- [15, 16],
- [29, 30]])
-
-In effect, the slice and index array operation are independent.
-The slice operation extracts columns with index 1 and 2,
-(i.e. the 2nd and 3rd columns),
-followed by the index array operation which extracts rows with
-index 0, 2 and 4 (i.e the first, third and fifth rows).
+Or without ``np.ix_`` (compare the integer array examples)::
-This is equivalent to::
+ >>> rows = rows.nonzero()[0]
+ >>> x[rows[:, np.newaxis], columns]
+ array([[ 3, 5],
+ [ 9, 11]])
- >>> y[:, 1:3][np.array([0, 2, 4]), :]
- array([[ 1, 2],
- [15, 16],
- [29, 30]])
-
-Likewise, slicing can be combined with broadcasted boolean indices: ::
-
- >>> b = y > 20
- >>> b
- array([[False, False, False, False, False, False, False],
- [False, False, False, False, False, False, False],
- [False, False, False, False, False, False, False],
- [ True, True, True, True, True, True, True],
- [ True, True, True, True, True, True, True]])
- >>> y[b[:,5],1:3]
- array([[22, 23],
- [29, 30]])
-
-Structural indexing tools
-=========================
-
-To facilitate easy matching of array shapes with expressions and in
-assignments, the np.newaxis object can be used within array indices
-to add new dimensions with a size of 1. For example: ::
-
- >>> y.shape
- (5, 7)
- >>> y[:,np.newaxis,:].shape
- (5, 1, 7)
-
-Note that there are no new elements in the array, just that the
-dimensionality is increased. This can be handy to combine two
-arrays in a way that otherwise would require explicitly reshaping
-operations. For example: ::
-
- >>> x = np.arange(5)
- >>> x[:,np.newaxis] + x[np.newaxis,:]
- array([[0, 1, 2, 3, 4],
- [1, 2, 3, 4, 5],
- [2, 3, 4, 5, 6],
- [3, 4, 5, 6, 7],
- [4, 5, 6, 7, 8]])
-
-The ellipsis syntax maybe used to indicate selecting in full any
-remaining unspecified dimensions. For example: ::
-
- >>> z = np.arange(81).reshape(3,3,3,3)
- >>> z[1,...,2]
- array([[29, 32, 35],
- [38, 41, 44],
- [47, 50, 53]])
-
-This is equivalent to: ::
-
- >>> z[1,:,:,2]
- array([[29, 32, 35],
- [38, 41, 44],
- [47, 50, 53]])
+.. rubric:: Example
+
+Use a 2-D boolean array of shape (2, 3)
+with four True elements to select rows from a 3-D array of shape
+(2, 3, 5) results in a 2-D result of shape (4, 5)::
+
+ >>> x = np.arange(30).reshape(2, 3, 5)
+ >>> x
+ array([[[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14]],
+ [[15, 16, 17, 18, 19],
+ [20, 21, 22, 23, 24],
+ [25, 26, 27, 28, 29]]])
+ >>> b = np.array([[True, True, False], [False, True, True]])
+ >>> x[b]
+ array([[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [20, 21, 22, 23, 24],
+ [25, 26, 27, 28, 29]])
+
+
+.. _combining-advanced-and-basic-indexing:
+
+Combining advanced and basic indexing
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When there is at least one slice (``:``), ellipsis (``...``) or :const:`newaxis`
+in the index (or the array has more dimensions than there are advanced indices),
+then the behaviour can be more complicated. It is like concatenating the
+indexing result for each advanced index element.
+
+In the simplest case, there is only a *single* advanced index combined with
+a slice. For example::
+
+ >>> y = np.arange(35).reshape(5,7)
+ >>> y[np.array([0, 2, 4]), 1:3]
+ array([[ 1, 2],
+ [15, 16],
+ [29, 30]])
+
+In effect, the slice and index array operation are independent. The slice
+operation extracts columns with index 1 and 2, (i.e. the 2nd and 3rd columns),
+followed by the index array operation which extracts rows with index 0, 2 and 4
+(i.e the first, third and fifth rows). This is equivalent to::
+
+ >>> y[:, 1:3][np.array([0, 2, 4]), :]
+ array([[ 1, 2],
+ [15, 16],
+ [29, 30]])
+
+A single advanced index can, for example, replace a slice and the result array
+will be the same. However, it is a copy and may have a different memory layout.
+A slice is preferable when it is possible.
+For example::
+
+ >>> x = np.array([[ 0, 1, 2],
+ ... [ 3, 4, 5],
+ ... [ 6, 7, 8],
+ ... [ 9, 10, 11]])
+ >>> x[1:2, 1:3]
+ array([[4, 5]])
+ >>> x[1:2, [1, 2]]
+ array([[4, 5]])
+
+The easiest way to understand a combination of *multiple* advanced indices may
+be to think in terms of the resulting shape. There are two parts to the indexing
+operation, the subspace defined by the basic indexing (excluding integers) and
+the subspace from the advanced indexing part. Two cases of index combination
+need to be distinguished:
+
+* The advanced indices are separated by a slice, :py:data:`Ellipsis` or
+ :const:`newaxis`. For example ``x[arr1, :, arr2]``.
+* The advanced indices are all next to each other.
+ For example ``x[..., arr1, arr2, :]`` but *not* ``x[arr1, :, 1]``
+ since ``1`` is an advanced index in this regard.
+
+In the first case, the dimensions resulting from the advanced indexing
+operation come first in the result array, and the subspace dimensions after
+that.
+In the second case, the dimensions from the advanced indexing operations
+are inserted into the result array at the same spot as they were in the
+initial array (the latter logic is what makes simple advanced indexing
+behave just like slicing).
+
+.. rubric:: Example
+
+Suppose ``x.shape`` is (10, 20, 30) and ``ind`` is a (2, 3, 4)-shaped
+indexing :class:`intp` array, then ``result = x[..., ind, :]`` has
+shape (10, 2, 3, 4, 30) because the (20,)-shaped subspace has been
+replaced with a (2, 3, 4)-shaped broadcasted indexing subspace. If
+we let *i, j, k* loop over the (2, 3, 4)-shaped subspace then
+``result[..., i, j, k, :] = x[..., ind[i, j, k], :]``. This example
+produces the same result as :meth:`x.take(ind, axis=-2) <ndarray.take>`.
+
+.. rubric:: Example
+
+Let ``x.shape`` be (10, 20, 30, 40, 50) and suppose ``ind_1``
+and ``ind_2`` can be broadcast to the shape (2, 3, 4). Then
+``x[:, ind_1, ind_2]`` has shape (10, 2, 3, 4, 40, 50) because the
+(20, 30)-shaped subspace from X has been replaced with the
+(2, 3, 4) subspace from the indices. However,
+``x[:, ind_1, :, ind_2]`` has shape (2, 3, 4, 10, 30, 50) because there
+is no unambiguous place to drop in the indexing subspace, thus
+it is tacked-on to the beginning. It is always possible to use
+:meth:`.transpose() <ndarray.transpose>` to move the subspace
+anywhere desired. Note that this example cannot be replicated
+using :func:`take`.
+
+.. rubric:: Example
+
+Slicing can be combined with broadcasted boolean indices::
+
+ >>> x = np.arange(35).reshape(5, 7)
+ >>> b = x > 20
+ >>> b
+ array([[False, False, False, False, False, False, False],
+ [False, False, False, False, False, False, False],
+ [False, False, False, False, False, False, False],
+ [ True, True, True, True, True, True, True],
+ [ True, True, True, True, True, True, True]])
+ >>> x[b[:, 5], 1:3]
+ array([[22, 23],
+ [29, 30]])
+
+
+.. _arrays.indexing.fields:
+
+Field access
+-------------
+
+.. seealso:: :ref:`structured_arrays`
+
+If the :class:`ndarray` object is a structured array the :term:`fields <field>`
+of the array can be accessed by indexing the array with strings,
+dictionary-like.
+
+Indexing ``x['field-name']`` returns a new :term:`view` to the array,
+which is of the same shape as *x* (except when the field is a
+sub-array) but of data type ``x.dtype['field-name']`` and contains
+only the part of the data in the specified field. Also,
+:ref:`record array <arrays.classes.rec>` scalars can be "indexed" this way.
+
+Indexing into a structured array can also be done with a list of field names,
+e.g. ``x[['field-name1', 'field-name2']]``. As of NumPy 1.16, this returns a
+view containing only those fields. In older versions of NumPy, it returned a
+copy. See the user guide section on :ref:`structured_arrays` for more
+information on multifield indexing.
+
+If the accessed field is a sub-array, the dimensions of the sub-array
+are appended to the shape of the result.
+For example::
+
+ >>> x = np.zeros((2, 2), dtype=[('a', np.int32), ('b', np.float64, (3, 3))])
+ >>> x['a'].shape
+ (2, 2)
+ >>> x['a'].dtype
+ dtype('int32')
+ >>> x['b'].shape
+ (2, 2, 3, 3)
+ >>> x['b'].dtype
+ dtype('float64')
+
+.. _flat-iterator-indexing:
+
+Flat Iterator indexing
+----------------------
+
+:attr:`x.flat <ndarray.flat>` returns an iterator that will iterate
+over the entire array (in C-contiguous style with the last index
+varying the fastest). This iterator object can also be indexed using
+basic slicing or advanced indexing as long as the selection object is
+not a tuple. This should be clear from the fact that :attr:`x.flat
+<ndarray.flat>` is a 1-dimensional view. It can be used for integer
+indexing with 1-dimensional C-style-flat indices. The shape of any
+returned array is therefore the shape of the integer indexing object.
+
+.. index::
+ single: indexing
+ single: ndarray
+
+
+.. _assigning-values-to-indexed-arrays:
Assigning values to indexed arrays
-==================================
+----------------------------------
As mentioned, one can select a subset of an array to assign to using
a single index, slices, and index and mask arrays. The value being
@@ -405,26 +803,28 @@ example is often surprising to people: ::
array([ 0, 11, 20, 31, 40])
Where people expect that the 1st location will be incremented by 3.
-In fact, it will only be incremented by 1. The reason is because
+In fact, it will only be incremented by 1. The reason is that
a new array is extracted from the original (as a temporary) containing
the values at 1, 1, 3, 1, then the value 1 is added to the temporary,
and then the temporary is assigned back to the original array. Thus
-the value of the array at x[1]+1 is assigned to x[1] three times,
+the value of the array at ``x[1] + 1`` is assigned to ``x[1]`` three times,
rather than being incremented 3 times.
+.. _dealing-with-variable-indices:
+
Dealing with variable numbers of indices within programs
-========================================================
+--------------------------------------------------------
-The index syntax is very powerful but limiting when dealing with
+The indexing syntax is very powerful but limiting when dealing with
a variable number of indices. For example, if you want to write
a function that can handle arguments with various numbers of
dimensions without having to write special case code for each
number of possible dimensions, how can that be done? If one
supplies to the index a tuple, the tuple will be interpreted
-as a list of indices. For example (using the previous definition
-for the array z): ::
+as a list of indices. For example::
- >>> indices = (1,1,1,1)
+ >>> z = np.arange(81).reshape(3, 3, 3, 3)
+ >>> indices = (1, 1, 1, 1)
>>> z[indices]
40
@@ -434,30 +834,70 @@ and then use these within an index.
Slices can be specified within programs by using the slice() function
in Python. For example: ::
- >>> indices = (1,1,1,slice(0,2)) # same as [1,1,1,0:2]
+ >>> indices = (1, 1, 1, slice(0, 2)) # same as [1, 1, 1, 0:2]
>>> z[indices]
array([39, 40])
Likewise, ellipsis can be specified by code by using the Ellipsis
object: ::
- >>> indices = (1, Ellipsis, 1) # same as [1,...,1]
+ >>> indices = (1, Ellipsis, 1) # same as [1, ..., 1]
>>> z[indices]
array([[28, 31, 34],
[37, 40, 43],
[46, 49, 52]])
-For this reason it is possible to use the output from the np.nonzero()
-function directly as an index since it always returns a tuple of index
-arrays.
+For this reason, it is possible to use the output from the
+:meth:`np.nonzero() <ndarray.nonzero>` function directly as an index since
+it always returns a tuple of index arrays.
Because the special treatment of tuples, they are not automatically
converted to an array as a list would be. As an example: ::
- >>> z[[1,1,1,1]] # produces a large array
+ >>> z[[1, 1, 1, 1]] # produces a large array
array([[[[27, 28, 29],
[30, 31, 32], ...
- >>> z[(1,1,1,1)] # returns a single value
+ >>> z[(1, 1, 1, 1)] # returns a single value
40
+Detailed notes
+--------------
+
+These are some detailed notes, which are not of importance for day to day
+indexing (in no particular order):
+
+* The native NumPy indexing type is ``intp`` and may differ from the
+ default integer array type. ``intp`` is the smallest data type
+ sufficient to safely index any array; for advanced indexing it may be
+ faster than other types.
+* For advanced assignments, there is in general no guarantee for the
+ iteration order. This means that if an element is set more than once,
+ it is not possible to predict the final result.
+* An empty (tuple) index is a full scalar index into a zero-dimensional array.
+ ``x[()]`` returns a *scalar* if ``x`` is zero-dimensional and a view
+ otherwise. On the other hand, ``x[...]`` always returns a view.
+* If a zero-dimensional array is present in the index *and* it is a full
+ integer index the result will be a *scalar* and not a zero-dimensional array.
+ (Advanced indexing is not triggered.)
+* When an ellipsis (``...``) is present but has no size (i.e. replaces zero
+ ``:``) the result will still always be an array. A view if no advanced index
+ is present, otherwise a copy.
+* The ``nonzero`` equivalence for Boolean arrays does not hold for zero
+ dimensional boolean arrays.
+* When the result of an advanced indexing operation has no elements but an
+ individual index is out of bounds, whether or not an ``IndexError`` is
+ raised is undefined (e.g. ``x[[], [123]]`` with ``123`` being out of bounds).
+* When a *casting* error occurs during assignment (for example updating a
+ numerical array using a sequence of strings), the array being assigned
+ to may end up in an unpredictable partially updated state.
+ However, if any other error (such as an out of bounds index) occurs, the
+ array will remain unchanged.
+* The memory layout of an advanced indexing result is optimized for each
+ indexing operation and no particular memory order can be assumed.
+* When using a subclass (especially one which manipulates its shape), the
+ default ``ndarray.__setitem__`` behaviour will call ``__getitem__`` for
+ *basic* indexing but not for *advanced* indexing. For such a subclass it may
+ be preferable to call ``ndarray.__setitem__`` with a *base class* ndarray
+ view on the data. This *must* be done if the subclasses ``__getitem__`` does
+ not return views.
diff --git a/doc/source/user/basics.rst b/doc/source/user/basics.rst
index 66f3f9ee9..bcd51d983 100644
--- a/doc/source/user/basics.rst
+++ b/doc/source/user/basics.rst
@@ -18,3 +18,4 @@ fundamental NumPy ideas and philosophy.
basics.rec
basics.dispatch
basics.subclassing
+ basics.ufuncs
diff --git a/doc/source/user/basics.ufuncs.rst b/doc/source/user/basics.ufuncs.rst
new file mode 100644
index 000000000..083e31f70
--- /dev/null
+++ b/doc/source/user/basics.ufuncs.rst
@@ -0,0 +1,313 @@
+.. sectionauthor:: adapted from "Guide to NumPy" by Travis E. Oliphant
+
+.. _ufuncs-basics:
+
+********************************************
+Universal functions (:class:`.ufunc`) basics
+********************************************
+
+.. seealso:: :ref:`ufuncs`
+
+.. index: ufunc, universal function, arithmetic, operation
+
+A universal function (or :term:`ufunc` for short) is a function that
+operates on :class:`ndarrays <numpy.ndarray>` in an element-by-element fashion,
+supporting :ref:`array broadcasting <ufuncs.broadcasting>`, :ref:`type
+casting <ufuncs.casting>`, and several other standard features. That
+is, a ufunc is a ":term:`vectorized <vectorization>`" wrapper for a function
+that takes a fixed number of specific inputs and produces a fixed number of
+specific outputs.
+
+In NumPy, universal functions are instances of the
+:class:`numpy.ufunc` class. Many of the built-in functions are
+implemented in compiled C code. The basic ufuncs operate on scalars, but
+there is also a generalized kind for which the basic elements are sub-arrays
+(vectors, matrices, etc.), and broadcasting is done over other dimensions.
+The simplest example is the addition operator::
+
+ >>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
+ array([1, 3, 2, 6])
+
+One can also produce custom :class:`numpy.ufunc` instances using the
+:func:`numpy.frompyfunc` factory function.
+
+
+Ufunc methods
+=============
+
+All ufuncs have four methods. They can be found at
+:ref:`ufuncs.methods`. However, these methods only make sense on scalar
+ufuncs that take two input arguments and return one output argument.
+Attempting to call these methods on other ufuncs will cause a
+:exc:`ValueError`.
+
+The reduce-like methods all take an *axis* keyword, a *dtype*
+keyword, and an *out* keyword, and the arrays must all have dimension >= 1.
+The *axis* keyword specifies the axis of the array over which the reduction
+will take place (with negative values counting backwards). Generally, it is an
+integer, though for :meth:`numpy.ufunc.reduce`, it can also be a tuple of
+``int`` to reduce over several axes at once, or ``None``, to reduce over all
+axes. For example::
+
+ >>> x = np.arange(9).reshape(3,3)
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+ >>> np.add.reduce(x, 1)
+ array([ 3, 12, 21])
+ >>> np.add.reduce(x, (0, 1))
+ 36
+
+The *dtype* keyword allows you to manage a very common problem that arises
+when naively using :meth:`.ufunc.reduce`. Sometimes you may
+have an array of a certain data type and wish to add up all of its
+elements, but the result does not fit into the data type of the
+array. This commonly happens if you have an array of single-byte
+integers. The *dtype* keyword allows you to alter the data type over which
+the reduction takes place (and therefore the type of the output). Thus,
+you can ensure that the output is a data type with precision large enough
+to handle your output. The responsibility of altering the reduce type is
+mostly up to you. There is one exception: if no *dtype* is given for a
+reduction on the "add" or "multiply" operations, then if the input type is
+an integer (or Boolean) data-type and smaller than the size of the
+:class:`numpy.int_` data type, it will be internally upcast to the :class:`.int_`
+(or :class:`numpy.uint`) data-type. In the previous example::
+
+ >>> x.dtype
+ dtype('int64')
+ >>> np.multiply.reduce(x, dtype=float)
+ array([ 0., 28., 80.])
+
+Finally, the *out* keyword allows you to
+provide an output array (for single-output ufuncs, which are currently the only
+ones supported; for future extension, however, a tuple with a single argument
+can be passed in). If *out* is given, the *dtype* argument is ignored.
+Considering ``x`` from the previous example::
+
+ >>> y = np.zeros(3, dtype=int)
+ >>> y
+ array([0, 0, 0])
+ >>> np.multiply.reduce(x, dtype=float, out=y)
+ array([ 0, 28, 80]) # dtype argument is ignored
+
+Ufuncs also have a fifth method, :func:`numpy.ufunc.at`, that allows in place
+operations to be performed using advanced indexing. No
+:ref:`buffering <use-of-internal-buffers>` is used on the dimensions where
+advanced indexing is used, so the advanced index can
+list an item more than once and the operation will be performed on the result
+of the previous operation for that item.
+
+
+.. _ufuncs-output-type:
+
+Output type determination
+=========================
+
+The output of the ufunc (and its methods) is not necessarily an
+:class:`ndarray <numpy.ndarray>`, if all input arguments are not
+:class:`ndarrays <numpy.ndarray>`. Indeed, if any input defines an
+:obj:`~.class.__array_ufunc__` method,
+control will be passed completely to that function, i.e., the ufunc is
+:ref:`overridden <ufuncs.overrides>`.
+
+If none of the inputs overrides the ufunc, then
+all output arrays will be passed to the
+:obj:`~.class.__array_prepare__` and
+:obj:`~.class.__array_wrap__` methods of the input (besides
+:class:`ndarrays <.ndarray>`, and scalars) that defines it **and** has
+the highest :obj:`~.class.__array_priority__`
+of any other input to the universal function. The default
+:obj:`~.class.__array_priority__` of the
+ndarray is 0.0, and the default :obj:`~.class.__array_priority__` of a subtype
+is 0.0. Matrices have :obj:`~.class.__array_priority__` equal to 10.0.
+
+All ufuncs can also take output arguments. If necessary, output will
+be cast to the data-type(s) of the provided output array(s). If a class
+with an :obj:`~.class.__array__` method is used for the output,
+results will be written to the object returned by :obj:`~.class.__array__`.
+Then, if the class also has an :obj:`~.class.__array_prepare__` method, it is
+called so metadata may be determined based on the context of the ufunc (the
+context consisting of the ufunc itself, the arguments passed to the ufunc, and
+the ufunc domain.) The array object returned by
+:obj:`~.class.__array_prepare__` is passed to the ufunc for computation.
+Finally, if the class also has an :obj:`~.class.__array_wrap__` method, the
+returned :class:`.ndarray` result will be passed to that method just before
+passing control back to the caller.
+
+.. _ufuncs.broadcasting:
+
+Broadcasting
+============
+
+.. seealso:: :doc:`Broadcasting basics <basics.broadcasting>`
+
+.. index:: broadcasting
+
+Each universal function takes array inputs and produces array outputs
+by performing the core function element-wise on the inputs (where an
+element is generally a scalar, but can be a vector or higher-order
+sub-array for generalized ufuncs). Standard
+:ref:`broadcasting rules <general-broadcasting-rules>` are applied
+so that inputs not sharing exactly the
+same shapes can still be usefully operated on.
+
+By these rules, if an input has a dimension size of 1 in its shape, the
+first data entry in that dimension will be used for all calculations along
+that dimension. In other words, the stepping machinery of the
+:term:`ufunc` will simply not step along that dimension (the
+:ref:`stride <memory-layout>` will be 0 for that dimension).
+
+
+.. _ufuncs.casting:
+
+Type casting rules
+==================
+
+.. index::
+ pair: ufunc; casting rules
+
+.. note::
+
+ In NumPy 1.6.0, a type promotion API was created to encapsulate the
+ mechanism for determining output types. See the functions
+ :func:`numpy.result_type`, :func:`numpy.promote_types`, and
+ :func:`numpy.min_scalar_type` for more details.
+
+At the core of every ufunc is a one-dimensional strided loop that
+implements the actual function for a specific type combination. When a
+ufunc is created, it is given a static list of inner loops and a
+corresponding list of type signatures over which the ufunc operates.
+The ufunc machinery uses this list to determine which inner loop to
+use for a particular case. You can inspect the :attr:`.types
+<.ufunc.types>` attribute for a particular ufunc to see which type
+combinations have a defined inner loop and which output type they
+produce (:ref:`character codes <arrays.scalars.character-codes>` are used
+in said output for brevity).
+
+Casting must be done on one or more of the inputs whenever the ufunc
+does not have a core loop implementation for the input types provided.
+If an implementation for the input types cannot be found, then the
+algorithm searches for an implementation with a type signature to
+which all of the inputs can be cast "safely." The first one it finds
+in its internal list of loops is selected and performed, after all
+necessary type casting. Recall that internal copies during ufuncs (even
+for casting) are limited to the size of an internal buffer (which is user
+settable).
+
+.. note::
+
+ Universal functions in NumPy are flexible enough to have mixed type
+ signatures. Thus, for example, a universal function could be defined
+ that works with floating-point and integer values. See
+ :func:`numpy.ldexp` for an example.
+
+By the above description, the casting rules are essentially
+implemented by the question of when a data type can be cast "safely"
+to another data type. The answer to this question can be determined in
+Python with a function call: :func:`can_cast(fromtype, totype)
+<numpy.can_cast>`. The example below shows the results of this call for
+the 24 internally supported types on the author's 64-bit system. You
+can generate this table for your system with the code given in the example.
+
+.. rubric:: Example
+
+Code segment showing the "can cast safely" table for a 64-bit system.
+Generally the output depends on the system; your system might result in
+a different table.
+
+>>> mark = {False: ' -', True: ' Y'}
+>>> def print_table(ntypes):
+... print('X ' + ' '.join(ntypes))
+... for row in ntypes:
+... print(row, end='')
+... for col in ntypes:
+... print(mark[np.can_cast(row, col)], end='')
+... print()
+...
+>>> print_table(np.typecodes['All'])
+X ? b h i l q p B H I L Q P e f d g F D G S U V O M m
+? Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y
+b - Y Y Y Y Y Y - - - - - - Y Y Y Y Y Y Y Y Y Y Y - Y
+h - - Y Y Y Y Y - - - - - - - Y Y Y Y Y Y Y Y Y Y - Y
+i - - - Y Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+l - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+q - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+p - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+B - - Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y
+H - - - Y Y Y Y - Y Y Y Y Y - Y Y Y Y Y Y Y Y Y Y - Y
+I - - - - Y Y Y - - Y Y Y Y - - Y Y - Y Y Y Y Y Y - Y
+L - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - -
+Q - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - -
+P - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - -
+e - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y Y - -
+f - - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y - -
+d - - - - - - - - - - - - - - - Y Y - Y Y Y Y Y Y - -
+g - - - - - - - - - - - - - - - - Y - - Y Y Y Y Y - -
+F - - - - - - - - - - - - - - - - - Y Y Y Y Y Y Y - -
+D - - - - - - - - - - - - - - - - - - Y Y Y Y Y Y - -
+G - - - - - - - - - - - - - - - - - - - Y Y Y Y Y - -
+S - - - - - - - - - - - - - - - - - - - - Y Y Y Y - -
+U - - - - - - - - - - - - - - - - - - - - - Y Y Y - -
+V - - - - - - - - - - - - - - - - - - - - - - Y Y - -
+O - - - - - - - - - - - - - - - - - - - - - - - Y - -
+M - - - - - - - - - - - - - - - - - - - - - - Y Y Y -
+m - - - - - - - - - - - - - - - - - - - - - - Y Y - Y
+
+You should note that, while included in the table for completeness,
+the 'S', 'U', and 'V' types cannot be operated on by ufuncs. Also,
+note that on a 32-bit system the integer types may have different
+sizes, resulting in a slightly altered table.
+
+Mixed scalar-array operations use a different set of casting rules
+that ensure that a scalar cannot "upcast" an array unless the scalar is
+of a fundamentally different kind of data (i.e., under a different
+hierarchy in the data-type hierarchy) than the array. This rule
+enables you to use scalar constants in your code (which, as Python
+types, are interpreted accordingly in ufuncs) without worrying about
+whether the precision of the scalar constant will cause upcasting on
+your large (small precision) array.
+
+.. _use-of-internal-buffers:
+
+Use of internal buffers
+=======================
+
+.. index:: buffers
+
+Internally, buffers are used for misaligned data, swapped data, and
+data that has to be converted from one data type to another. The size
+of internal buffers is settable on a per-thread basis. There can
+be up to :math:`2 (n_{\mathrm{inputs}} + n_{\mathrm{outputs}})`
+buffers of the specified size created to handle the data from all the
+inputs and outputs of a ufunc. The default size of a buffer is
+10,000 elements. Whenever buffer-based calculation would be needed,
+but all input arrays are smaller than the buffer size, those
+misbehaved or incorrectly-typed arrays will be copied before the
+calculation proceeds. Adjusting the size of the buffer may therefore
+alter the speed at which ufunc calculations of various sorts are
+completed. A simple interface for setting this variable is accessible
+using the function :func:`numpy.setbufsize`.
+
+
+Error handling
+==============
+
+.. index:: error handling
+
+Universal functions can trip special floating-point status registers
+in your hardware (such as divide-by-zero). If available on your
+platform, these registers will be regularly checked during
+calculation. Error handling is controlled on a per-thread basis,
+and can be configured using the functions :func:`numpy.seterr` and
+:func:`numpy.seterrcall`.
+
+
+.. _ufuncs.overrides:
+
+Overriding ufunc behavior
+=========================
+
+Classes (including ndarray subclasses) can override how ufuncs act on
+them by defining certain special methods. For details, see
+:ref:`arrays.classes`.
diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst
index b3ecc3a95..e5c51351e 100644
--- a/doc/source/user/index.rst
+++ b/doc/source/user/index.rst
@@ -38,8 +38,6 @@ details are found in :ref:`reference`.
../f2py/index
../glossary
../dev/underthehood
- ../docs/index
../bugs
../release
- ../doc_conventions
../license
diff --git a/environment.yml b/environment.yml
index 5fb7d292d..188e29a4f 100644
--- a/environment.yml
+++ b/environment.yml
@@ -17,9 +17,8 @@ dependencies:
- pytest-cov
- pytest-xdist
- hypothesis
- # For type annotations
+ # For type annotations
- mypy=0.902
- - typing_extensions
# For building docs
- sphinx=4.1.1
- numpydoc=1.1.0
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 5e7f74059..ffef369e3 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -271,70 +271,54 @@ else:
oldnumeric = 'removed'
numarray = 'removed'
- if sys.version_info[:2] >= (3, 7):
- # module level getattr is only supported in 3.7 onwards
- # https://www.python.org/dev/peps/pep-0562/
- def __getattr__(attr):
- # Warn for expired attributes, and return a dummy function
- # that always raises an exception.
- try:
- msg = __expired_functions__[attr]
- except KeyError:
- pass
- else:
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
-
- def _expired(*args, **kwds):
- raise RuntimeError(msg)
-
- return _expired
-
- # Emit warnings for deprecated attributes
- try:
- val, msg = __deprecated_attrs__[attr]
- except KeyError:
- pass
- else:
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
- return val
-
- # Importing Tester requires importing all of UnitTest which is not a
- # cheap import Since it is mainly used in test suits, we lazy import it
- # here to save on the order of 10 ms of import time for most users
- #
- # The previous way Tester was imported also had a side effect of adding
- # the full `numpy.testing` namespace
- if attr == 'testing':
- import numpy.testing as testing
- return testing
- elif attr == 'Tester':
- from .testing import Tester
- return Tester
-
- raise AttributeError("module {!r} has no attribute "
- "{!r}".format(__name__, attr))
-
- def __dir__():
- return list(globals().keys() | {'Tester', 'testing'})
+ def __getattr__(attr):
+ # Warn for expired attributes, and return a dummy function
+ # that always raises an exception.
+ try:
+ msg = __expired_functions__[attr]
+ except KeyError:
+ pass
+ else:
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
- else:
- # We don't actually use this ourselves anymore, but I'm not 100% sure that
- # no-one else in the world is using it (though I hope not)
- from .testing import Tester
+ def _expired(*args, **kwds):
+ raise RuntimeError(msg)
- # We weren't able to emit a warning about these, so keep them around
- globals().update({
- k: v
- for k, (v, msg) in __deprecated_attrs__.items()
- })
+ return _expired
+ # Emit warnings for deprecated attributes
+ try:
+ val, msg = __deprecated_attrs__[attr]
+ except KeyError:
+ pass
+ else:
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ return val
+
+ # Importing Tester requires importing all of UnitTest which is not a
+ # cheap import Since it is mainly used in test suits, we lazy import it
+ # here to save on the order of 10 ms of import time for most users
+ #
+ # The previous way Tester was imported also had a side effect of adding
+ # the full `numpy.testing` namespace
+ if attr == 'testing':
+ import numpy.testing as testing
+ return testing
+ elif attr == 'Tester':
+ from .testing import Tester
+ return Tester
+
+ raise AttributeError("module {!r} has no attribute "
+ "{!r}".format(__name__, attr))
+
+ def __dir__():
+ return list(globals().keys() | {'Tester', 'testing'})
# Pytest testing
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
-
def _sanity_check():
"""
Quick sanity checks for common bugs caused by environment.
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 49c17a015..dafedeb56 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -165,6 +165,7 @@ from numpy.typing._extended_precision import (
)
from typing import (
+ Literal as L,
Any,
ByteString,
Callable,
@@ -190,13 +191,11 @@ from typing import (
Type,
TypeVar,
Union,
+ Protocol,
+ SupportsIndex,
+ Final,
)
-if sys.version_info >= (3, 8):
- from typing import Literal as L, Protocol, SupportsIndex, Final
-else:
- from typing_extensions import Literal as L, Protocol, SupportsIndex, Final
-
# Ensures that the stubs are picked up
from numpy import (
char as char,
@@ -517,7 +516,6 @@ from numpy.lib.npyio import (
recfromtxt as recfromtxt,
recfromcsv as recfromcsv,
load as load,
- loads as loads,
save as save,
savez as savez,
savez_compressed as savez_compressed,
@@ -624,6 +622,14 @@ from numpy.matrixlib import (
bmat as bmat,
)
+# Protocol for representing file-like-objects accepted
+# by `ndarray.tofile` and `fromfile`
+class _IOProtocol(Protocol):
+ def flush(self) -> object: ...
+ def fileno(self) -> int: ...
+ def tell(self) -> SupportsIndex: ...
+ def seek(self, offset: int, whence: int, /) -> object: ...
+
__all__: List[str]
__path__: List[str]
__version__: str
@@ -1187,9 +1193,9 @@ class flatiter(Generic[_NdArraySubClass]):
self, key: Union[_ArrayLikeInt, slice, ellipsis],
) -> _NdArraySubClass: ...
@overload
- def __array__(self: flatiter[ndarray[Any, _DType]], __dtype: None = ...) -> ndarray[Any, _DType]: ...
+ def __array__(self: flatiter[ndarray[Any, _DType]], dtype: None = ..., /) -> ndarray[Any, _DType]: ...
@overload
- def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ...
+ def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
_OrderKACF = Optional[L["K", "A", "C", "F"]]
_OrderACF = Optional[L["A", "C", "F"]]
@@ -1218,7 +1224,7 @@ class _ArrayOrScalarCommon:
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __copy__(self: _ArraySelf) -> _ArraySelf: ...
- def __deepcopy__(self: _ArraySelf, __memo: Optional[dict] = ...) -> _ArraySelf: ...
+ def __deepcopy__(self: _ArraySelf, memo: None | dict = ..., /) -> _ArraySelf: ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
@@ -1228,7 +1234,10 @@ class _ArrayOrScalarCommon:
# NOTE: `tostring()` is deprecated and therefore excluded
# def tostring(self, order=...): ...
def tofile(
- self, fid: Union[IO[bytes], str, bytes, os.PathLike[Any]], sep: str = ..., format: str = ...
+ self,
+ fid: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _IOProtocol,
+ sep: str = ...,
+ format: str = ...,
) -> None: ...
# generics and 0d arrays return builtin scalars
def tolist(self) -> Any: ...
@@ -1241,7 +1250,7 @@ class _ArrayOrScalarCommon:
def __array_priority__(self) -> float: ...
@property
def __array_struct__(self): ...
- def __setstate__(self, __state): ...
+ def __setstate__(self, state, /): ...
# a `bool_` is returned when `keepdims=True` and `self` is a 0d array
@overload
@@ -1651,7 +1660,7 @@ _ArrayNumber_co = NDArray[Union[bool_, number[Any]]]
_ArrayTD64_co = NDArray[Union[bool_, integer[Any], timedelta64]]
class _SupportsItem(Protocol[_T_co]):
- def item(self, __args: Any) -> _T_co: ...
+ def item(self, args: Any, /) -> _T_co: ...
class _SupportsReal(Protocol[_T_co]):
@property
@@ -1690,20 +1699,22 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
order: _OrderKACF = ...,
) -> _ArraySelf: ...
@overload
- def __array__(self, __dtype: None = ...) -> ndarray[Any, _DType_co]: ...
+ def __array__(self, dtype: None = ..., /) -> ndarray[Any, _DType_co]: ...
@overload
- def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ...
+ def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
def __array_wrap__(
self,
- __array: ndarray[_ShapeType2, _DType],
- __context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ array: ndarray[_ShapeType2, _DType],
+ context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ /,
) -> ndarray[_ShapeType2, _DType]: ...
def __array_prepare__(
self,
- __array: ndarray[_ShapeType2, _DType],
- __context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ array: ndarray[_ShapeType2, _DType],
+ context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ /,
) -> ndarray[_ShapeType2, _DType]: ...
@property
@@ -1730,16 +1741,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def item(
self: ndarray[Any, dtype[_SupportsItem[_T]]], # type: ignore[type-var]
- __args: Tuple[SupportsIndex, ...],
+ args: Tuple[SupportsIndex, ...],
+ /,
) -> _T: ...
@overload
- def itemset(self, __value: Any) -> None: ...
+ def itemset(self, value: Any, /) -> None: ...
@overload
- def itemset(self, __item: _ShapeLike, __value: Any) -> None: ...
+ def itemset(self, item: _ShapeLike, value: Any, /) -> None: ...
@overload
- def resize(self, __new_shape: _ShapeLike, *, refcheck: bool = ...) -> None: ...
+ def resize(self, new_shape: _ShapeLike, /, *, refcheck: bool = ...) -> None: ...
@overload
def resize(self, *new_shape: SupportsIndex, refcheck: bool = ...) -> None: ...
@@ -1759,7 +1771,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
) -> ndarray[Any, _DType_co]: ...
@overload
- def transpose(self: _ArraySelf, __axes: _ShapeLike) -> _ArraySelf: ...
+ def transpose(self: _ArraySelf, axes: _ShapeLike, /) -> _ArraySelf: ...
@overload
def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ...
@@ -1898,7 +1910,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def reshape(
- self, __shape: _ShapeLike, *, order: _OrderACF = ...
+ self, shape: _ShapeLike, /, *, order: _OrderACF = ...
) -> ndarray[Any, _DType_co]: ...
@overload
def reshape(
@@ -2904,9 +2916,9 @@ class generic(_ArrayOrScalarCommon):
@abstractmethod
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
@overload
- def __array__(self: _ScalarType, __dtype: None = ...) -> ndarray[Any, dtype[_ScalarType]]: ...
+ def __array__(self: _ScalarType, dtype: None = ..., /) -> ndarray[Any, dtype[_ScalarType]]: ...
@overload
- def __array__(self, __dtype: _DType) -> ndarray[Any, _DType]: ...
+ def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
@property
def base(self) -> None: ...
@property
@@ -2974,8 +2986,7 @@ class generic(_ArrayOrScalarCommon):
) -> Any: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> Any: ...
@overload
@@ -3021,7 +3032,7 @@ class generic(_ArrayOrScalarCommon):
@overload
def reshape(
- self: _ScalarType, __shape: _ShapeLike, *, order: _OrderACF = ...
+ self: _ScalarType, shape: _ShapeLike, /, *, order: _OrderACF = ...
) -> ndarray[Any, dtype[_ScalarType]]: ...
@overload
def reshape(
@@ -3031,7 +3042,7 @@ class generic(_ArrayOrScalarCommon):
def squeeze(
self: _ScalarType, axis: Union[L[0], Tuple[()]] = ...
) -> _ScalarType: ...
- def transpose(self: _ScalarType, __axes: Tuple[()] = ...) -> _ScalarType: ...
+ def transpose(self: _ScalarType, axes: Tuple[()] = ..., /) -> _ScalarType: ...
# Keep `dtype` at the bottom to avoid name conflicts with `np.dtype`
@property
def dtype(self: _ScalarType) -> dtype[_ScalarType]: ...
@@ -3066,10 +3077,9 @@ class number(generic, Generic[_NBit1]): # type: ignore
__ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
class bool_(generic):
- def __init__(self, __value: object = ...) -> None: ...
+ def __init__(self, value: object = ..., /) -> None: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> bool: ...
def tolist(self) -> bool: ...
@property
@@ -3115,7 +3125,7 @@ class bool_(generic):
bool8 = bool_
class object_(generic):
- def __init__(self, __value: object = ...) -> None: ...
+ def __init__(self, value: object = ..., /) -> None: ...
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
@@ -3144,14 +3154,16 @@ class datetime64(generic):
@overload
def __init__(
self,
- __value: Union[None, datetime64, _CharLike_co, _DatetimeScalar] = ...,
- __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ...,
+ value: None | datetime64 | _CharLike_co | _DatetimeScalar = ...,
+ format: _CharLike_co | Tuple[_CharLike_co, _IntLike_co] = ...,
+ /,
) -> None: ...
@overload
def __init__(
self,
- __value: int,
- __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]]
+ value: int,
+ format: _CharLike_co | Tuple[_CharLike_co, _IntLike_co],
+ /,
) -> None: ...
def __add__(self, other: _TD64Like_co) -> datetime64: ...
def __radd__(self, other: _TD64Like_co) -> datetime64: ...
@@ -3165,28 +3177,16 @@ class datetime64(generic):
__gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
__ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
-# Support for `__index__` was added in python 3.8 (bpo-20092)
-if sys.version_info >= (3, 8):
- _IntValue = Union[SupportsInt, _CharLike_co, SupportsIndex]
- _FloatValue = Union[None, _CharLike_co, SupportsFloat, SupportsIndex]
- _ComplexValue = Union[
- None,
- _CharLike_co,
- SupportsFloat,
- SupportsComplex,
- SupportsIndex,
- complex, # `complex` is not a subtype of `SupportsComplex`
- ]
-else:
- _IntValue = Union[SupportsInt, _CharLike_co]
- _FloatValue = Union[None, _CharLike_co, SupportsFloat]
- _ComplexValue = Union[
- None,
- _CharLike_co,
- SupportsFloat,
- SupportsComplex,
- complex,
- ]
+_IntValue = Union[SupportsInt, _CharLike_co, SupportsIndex]
+_FloatValue = Union[None, _CharLike_co, SupportsFloat, SupportsIndex]
+_ComplexValue = Union[
+ None,
+ _CharLike_co,
+ SupportsFloat,
+ SupportsComplex,
+ SupportsIndex,
+ complex, # `complex` is not a subtype of `SupportsComplex`
+]
class integer(number[_NBit1]): # type: ignore
@property
@@ -3201,10 +3201,10 @@ class integer(number[_NBit1]): # type: ignore
# NOTE: `__index__` is technically defined in the bottom-most
# sub-classes (`int64`, `uint32`, etc)
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> int: ...
def tolist(self) -> int: ...
+ def is_integer(self) -> L[True]: ...
def __index__(self) -> int: ...
__truediv__: _IntTrueDiv[_NBit1]
__rtruediv__: _IntTrueDiv[_NBit1]
@@ -3224,7 +3224,7 @@ class integer(number[_NBit1]): # type: ignore
def __rxor__(self, other: _IntLike_co) -> integer: ...
class signedinteger(integer[_NBit1]):
- def __init__(self, __value: _IntValue = ...) -> None: ...
+ def __init__(self, value: _IntValue = ..., /) -> None: ...
__add__: _SignedIntOp[_NBit1]
__radd__: _SignedIntOp[_NBit1]
__sub__: _SignedIntOp[_NBit1]
@@ -3268,8 +3268,9 @@ longlong = signedinteger[_NBitLongLong]
class timedelta64(generic):
def __init__(
self,
- __value: Union[None, int, _CharLike_co, dt.timedelta, timedelta64] = ...,
- __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ...,
+ value: None | int | _CharLike_co | dt.timedelta | timedelta64 = ...,
+ format: _CharLike_co | Tuple[_CharLike_co, _IntLike_co] = ...,
+ /,
) -> None: ...
@property
def numerator(self: _ScalarType) -> _ScalarType: ...
@@ -3305,7 +3306,7 @@ class timedelta64(generic):
class unsignedinteger(integer[_NBit1]):
# NOTE: `uint64 + signedinteger -> float64`
- def __init__(self, __value: _IntValue = ...) -> None: ...
+ def __init__(self, value: _IntValue = ..., /) -> None: ...
__add__: _UnsignedIntOp[_NBit1]
__radd__: _UnsignedIntOp[_NBit1]
__sub__: _UnsignedIntOp[_NBit1]
@@ -3351,23 +3352,23 @@ _IntType = TypeVar("_IntType", bound=integer)
_FloatType = TypeVar('_FloatType', bound=floating)
class floating(inexact[_NBit1]):
- def __init__(self, __value: _FloatValue = ...) -> None: ...
+ def __init__(self, value: _FloatValue = ..., /) -> None: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ...,
+ /,
) -> float: ...
def tolist(self) -> float: ...
- def is_integer(self: float64) -> bool: ...
+ def is_integer(self) -> bool: ...
def hex(self: float64) -> str: ...
@classmethod
- def fromhex(cls: Type[float64], __string: str) -> float64: ...
+ def fromhex(cls: Type[float64], string: str, /) -> float64: ...
def as_integer_ratio(self) -> Tuple[int, int]: ...
if sys.version_info >= (3, 9):
def __ceil__(self: float64) -> int: ...
def __floor__(self: float64) -> int: ...
def __trunc__(self: float64) -> int: ...
def __getnewargs__(self: float64) -> Tuple[float]: ...
- def __getformat__(self: float64, __typestr: L["double", "float"]) -> str: ...
+ def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ...
@overload
def __round__(self, ndigits: None = ...) -> int: ...
@overload
@@ -3405,10 +3406,9 @@ longfloat = floating[_NBitLongDouble]
# describing the two 64 bit floats representing its real and imaginary component
class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]):
- def __init__(self, __value: _ComplexValue = ...) -> None: ...
+ def __init__(self, value: _ComplexValue = ..., /) -> None: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> complex: ...
def tolist(self) -> complex: ...
@property
@@ -3448,7 +3448,7 @@ class flexible(generic): ... # type: ignore
# depending on whether or not it's used as an opaque bytes sequence
# or a structure
class void(flexible):
- def __init__(self, __value: Union[_IntLike_co, bytes]) -> None: ...
+ def __init__(self, value: _IntLike_co | bytes, /) -> None: ...
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
@@ -3470,14 +3470,13 @@ class character(flexible): # type: ignore
class bytes_(character, bytes):
@overload
- def __init__(self, __value: object = ...) -> None: ...
+ def __init__(self, value: object = ..., /) -> None: ...
@overload
def __init__(
- self, __value: str, encoding: str = ..., errors: str = ...
+ self, value: str, /, encoding: str = ..., errors: str = ...
) -> None: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> bytes: ...
def tolist(self) -> bytes: ...
@@ -3486,14 +3485,13 @@ bytes0 = bytes_
class str_(character, str):
@overload
- def __init__(self, __value: object = ...) -> None: ...
+ def __init__(self, value: object = ..., /) -> None: ...
@overload
def __init__(
- self, __value: bytes, encoding: str = ..., errors: str = ...
+ self, value: bytes, /, encoding: str = ..., errors: str = ...
) -> None: ...
def item(
- self,
- __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
+ self, args: L[0] | Tuple[()] | Tuple[L[0]] = ..., /,
) -> str: ...
def tolist(self) -> str: ...
@@ -3733,9 +3731,10 @@ class errstate(Generic[_CallType], ContextDecorator):
def __enter__(self) -> None: ...
def __exit__(
self,
- __exc_type: Optional[Type[BaseException]],
- __exc_value: Optional[BaseException],
- __traceback: Optional[TracebackType],
+ exc_type: Optional[Type[BaseException]],
+ exc_value: Optional[BaseException],
+ traceback: Optional[TracebackType],
+ /,
) -> None: ...
class ndenumerate(Generic[_ScalarType]):
diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py
index acfaa1ca5..8decb9dd7 100644
--- a/numpy/_pytesttester.py
+++ b/numpy/_pytesttester.py
@@ -137,13 +137,20 @@ class PytestTester:
# offset verbosity. The "-q" cancels a "-v".
pytest_args += ["-q"]
- # Filter out distutils cpu warnings (could be localized to
- # distutils tests). ASV has problems with top level import,
- # so fetch module for suppression here.
with warnings.catch_warnings():
warnings.simplefilter("always")
+ # Filter out distutils cpu warnings (could be localized to
+ # distutils tests). ASV has problems with top level import,
+ # so fetch module for suppression here.
from numpy.distutils import cpuinfo
+ with warnings.catch_warnings(record=True):
+ # Ignore the warning from importing the array_api submodule. This
+ # warning is done on import, so it would break pytest collection,
+ # but importing it early here prevents the warning from being
+ # issued when it imported again.
+ import numpy.array_api
+
# Filter out annoying import messages. Want these in both develop and
# release mode.
pytest_args += [
diff --git a/numpy/_pytesttester.pyi b/numpy/_pytesttester.pyi
index 693f4128a..0be64b3f7 100644
--- a/numpy/_pytesttester.pyi
+++ b/numpy/_pytesttester.pyi
@@ -1,5 +1,4 @@
-from typing import List, Iterable
-from typing_extensions import Literal as L
+from typing import List, Iterable, Literal as L
__all__: List[str]
diff --git a/numpy/array_api/__init__.py b/numpy/array_api/__init__.py
new file mode 100644
index 000000000..790157504
--- /dev/null
+++ b/numpy/array_api/__init__.py
@@ -0,0 +1,370 @@
+"""
+A NumPy sub-namespace that conforms to the Python array API standard.
+
+This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
+is still considered experimental, and will issue a warning when imported.
+
+This is a proof-of-concept namespace that wraps the corresponding NumPy
+functions to give a conforming implementation of the Python array API standard
+(https://data-apis.github.io/array-api/latest/). The standard is currently in
+an RFC phase and comments on it are both welcome and encouraged. Comments
+should be made either at https://github.com/data-apis/array-api or at
+https://github.com/data-apis/consortium-feedback/discussions.
+
+NumPy already follows the proposed spec for the most part, so this module
+serves mostly as a thin wrapper around it. However, NumPy also implements a
+lot of behavior that is not included in the spec, so this serves as a
+restricted subset of the API. Only those functions that are part of the spec
+are included in this namespace, and all functions are given with the exact
+signature given in the spec, including the use of position-only arguments, and
+omitting any extra keyword arguments implemented by NumPy but not part of the
+spec. The behavior of some functions is also modified from the NumPy behavior
+to conform to the standard. Note that the underlying array object itself is
+wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
+is implemented in pure Python with no C extensions.
+
+The array API spec is designed as a "minimal API subset" and explicitly allows
+libraries to include behaviors not specified by it. But users of this module
+that intend to write portable code should be aware that only those behaviors
+that are listed in the spec are guaranteed to be implemented across libraries.
+Consequently, the NumPy implementation was chosen to be both conforming and
+minimal, so that users can use this implementation of the array API namespace
+and be sure that behaviors that it defines will be available in conforming
+namespaces from other libraries.
+
+A few notes about the current state of this submodule:
+
+- There is a test suite that tests modules against the array API standard at
+ https://github.com/data-apis/array-api-tests. The test suite is still a work
+ in progress, but the existing tests pass on this module, with a few
+ exceptions:
+
+ - DLPack support (see https://github.com/data-apis/array-api/pull/106) is
+ not included here, as it requires a full implementation in NumPy proper
+ first.
+
+ The test suite is not yet complete, and even the tests that exist are not
+ guaranteed to give a comprehensive coverage of the spec. Therefore, when
+ reviewing and using this submodule, you should refer to the standard
+ documents themselves. There are some tests in numpy.array_api.tests, but
+ they primarily focus on things that are not tested by the official array API
+ test suite.
+
+- There is a custom array object, numpy.array_api.Array, which is returned by
+ all functions in this module. All functions in the array API namespace
+ implicitly assume that they will only receive this object as input. The only
+ way to create instances of this object is to use one of the array creation
+ functions. It does not have a public constructor on the object itself. The
+ object is a small wrapper class around numpy.ndarray. The main purpose of it
+ is to restrict the namespace of the array object to only those dtypes and
+ only those methods that are required by the spec, as well as to limit/change
+ certain behavior that differs in the spec. In particular:
+
+ - The array API namespace does not have scalar objects, only 0-D arrays.
+ Operations on Array that would create a scalar in NumPy create a 0-D
+ array.
+
+ - Indexing: Only a subset of indices supported by NumPy are required by the
+ spec. The Array object restricts indexing to only allow those types of
+ indices that are required by the spec. See the docstring of the
+ numpy.array_api.Array._validate_indices helper function for more
+ information.
+
+ - Type promotion: Some type promotion rules are different in the spec. In
+ particular, the spec does not have any value-based casting. The spec also
+ does not require cross-kind casting, like integer -> floating-point. Only
+ those promotions that are explicitly required by the array API
+ specification are allowed in this module. See NEP 47 for more info.
+
+ - Functions do not automatically call asarray() on their input, and will not
+ work if the input type is not Array. The exception is array creation
+ functions, and Python operators on the Array object, which accept Python
+ scalars of the same type as the array dtype.
+
+- All functions include type annotations, corresponding to those given in the
+ spec (see _typing.py for definitions of some custom types). These do not
+ currently fully pass mypy due to some limitations in mypy.
+
+- Dtype objects are just the NumPy dtype objects, e.g., float64 =
+ np.dtype('float64'). The spec does not require any behavior on these dtype
+ objects other than that they be accessible by name and be comparable by
+ equality, but it was considered too much extra complexity to create custom
+ objects to represent dtypes.
+
+- All places where the implementations in this submodule are known to deviate
+ from their corresponding functions in NumPy are marked with "# Note:"
+ comments.
+
+Still TODO in this module are:
+
+- DLPack support for numpy.ndarray is still in progress. See
+ https://github.com/numpy/numpy/pull/19083.
+
+- The copy=False keyword argument to asarray() is not yet implemented. This
+ requires support in numpy.asarray() first.
+
+- Some functions are not yet fully tested in the array API test suite, and may
+ require updates that are not yet known until the tests are written.
+
+- The spec is still in an RFC phase and may still have minor updates, which
+ will need to be reflected here.
+
+- The linear algebra extension in the spec will be added in a future pull
+ request.
+
+- Complex number support in array API spec is planned but not yet finalized,
+ as are the fft extension and certain linear algebra functions such as eig
+ that require complex dtypes.
+
+"""
+
+import warnings
+
+warnings.warn(
+ "The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
+)
+
+__all__ = []
+
+from ._constants import e, inf, nan, pi
+
+__all__ += ["e", "inf", "nan", "pi"]
+
+from ._creation_functions import (
+ asarray,
+ arange,
+ empty,
+ empty_like,
+ eye,
+ from_dlpack,
+ full,
+ full_like,
+ linspace,
+ meshgrid,
+ ones,
+ ones_like,
+ zeros,
+ zeros_like,
+)
+
+__all__ += [
+ "asarray",
+ "arange",
+ "empty",
+ "empty_like",
+ "eye",
+ "from_dlpack",
+ "full",
+ "full_like",
+ "linspace",
+ "meshgrid",
+ "ones",
+ "ones_like",
+ "zeros",
+ "zeros_like",
+]
+
+from ._data_type_functions import (
+ broadcast_arrays,
+ broadcast_to,
+ can_cast,
+ finfo,
+ iinfo,
+ result_type,
+)
+
+__all__ += [
+ "broadcast_arrays",
+ "broadcast_to",
+ "can_cast",
+ "finfo",
+ "iinfo",
+ "result_type",
+]
+
+from ._dtypes import (
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ bool,
+)
+
+__all__ += [
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "float32",
+ "float64",
+ "bool",
+]
+
+from ._elementwise_functions import (
+ abs,
+ acos,
+ acosh,
+ add,
+ asin,
+ asinh,
+ atan,
+ atan2,
+ atanh,
+ bitwise_and,
+ bitwise_left_shift,
+ bitwise_invert,
+ bitwise_or,
+ bitwise_right_shift,
+ bitwise_xor,
+ ceil,
+ cos,
+ cosh,
+ divide,
+ equal,
+ exp,
+ expm1,
+ floor,
+ floor_divide,
+ greater,
+ greater_equal,
+ isfinite,
+ isinf,
+ isnan,
+ less,
+ less_equal,
+ log,
+ log1p,
+ log2,
+ log10,
+ logaddexp,
+ logical_and,
+ logical_not,
+ logical_or,
+ logical_xor,
+ multiply,
+ negative,
+ not_equal,
+ positive,
+ pow,
+ remainder,
+ round,
+ sign,
+ sin,
+ sinh,
+ square,
+ sqrt,
+ subtract,
+ tan,
+ tanh,
+ trunc,
+)
+
+__all__ += [
+ "abs",
+ "acos",
+ "acosh",
+ "add",
+ "asin",
+ "asinh",
+ "atan",
+ "atan2",
+ "atanh",
+ "bitwise_and",
+ "bitwise_left_shift",
+ "bitwise_invert",
+ "bitwise_or",
+ "bitwise_right_shift",
+ "bitwise_xor",
+ "ceil",
+ "cos",
+ "cosh",
+ "divide",
+ "equal",
+ "exp",
+ "expm1",
+ "floor",
+ "floor_divide",
+ "greater",
+ "greater_equal",
+ "isfinite",
+ "isinf",
+ "isnan",
+ "less",
+ "less_equal",
+ "log",
+ "log1p",
+ "log2",
+ "log10",
+ "logaddexp",
+ "logical_and",
+ "logical_not",
+ "logical_or",
+ "logical_xor",
+ "multiply",
+ "negative",
+ "not_equal",
+ "positive",
+ "pow",
+ "remainder",
+ "round",
+ "sign",
+ "sin",
+ "sinh",
+ "square",
+ "sqrt",
+ "subtract",
+ "tan",
+ "tanh",
+ "trunc",
+]
+
+# einsum is not yet implemented in the array API spec.
+
+# from ._linear_algebra_functions import einsum
+# __all__ += ['einsum']
+
+from ._linear_algebra_functions import matmul, tensordot, transpose, vecdot
+
+__all__ += ["matmul", "tensordot", "transpose", "vecdot"]
+
+from ._manipulation_functions import (
+ concat,
+ expand_dims,
+ flip,
+ reshape,
+ roll,
+ squeeze,
+ stack,
+)
+
+__all__ += ["concat", "expand_dims", "flip", "reshape", "roll", "squeeze", "stack"]
+
+from ._searching_functions import argmax, argmin, nonzero, where
+
+__all__ += ["argmax", "argmin", "nonzero", "where"]
+
+from ._set_functions import unique
+
+__all__ += ["unique"]
+
+from ._sorting_functions import argsort, sort
+
+__all__ += ["argsort", "sort"]
+
+from ._statistical_functions import max, mean, min, prod, std, sum, var
+
+__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
+
+from ._utility_functions import all, any
+
+__all__ += ["all", "any"]
diff --git a/numpy/array_api/_array_object.py b/numpy/array_api/_array_object.py
new file mode 100644
index 000000000..2d746e78b
--- /dev/null
+++ b/numpy/array_api/_array_object.py
@@ -0,0 +1,1029 @@
+"""
+Wrapper class around the ndarray object for the array API standard.
+
+The array API standard defines some behaviors differently than ndarray, in
+particular, type promotion rules are different (the standard has no
+value-based casting). The standard also specifies a more limited subset of
+array methods and functionalities than are implemented on ndarray. Since the
+goal of the array_api namespace is to be a minimal implementation of the array
+API standard, we need to define a separate wrapper class for the array_api
+namespace.
+
+The standard compliant class is only a wrapper class. It is *not* a subclass
+of ndarray.
+"""
+
+from __future__ import annotations
+
+import operator
+from enum import IntEnum
+from ._creation_functions import asarray
+from ._dtypes import (
+ _all_dtypes,
+ _boolean_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _floating_dtypes,
+ _numeric_dtypes,
+ _result_type,
+ _dtype_categories,
+)
+
+from typing import TYPE_CHECKING, Optional, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import PyCapsule, Device, Dtype
+
+import numpy as np
+
+from numpy import array_api
+
+
+class Array:
+ """
+ n-d array object for the array API namespace.
+
+ See the docstring of :py:obj:`np.ndarray <numpy.ndarray>` for more
+ information.
+
+ This is a wrapper around numpy.ndarray that restricts the usage to only
+ those things that are required by the array API namespace. Note,
+ attributes on this object that start with a single underscore are not part
+ of the API specification and should only be used internally. This object
+ should not be constructed directly. Rather, use one of the creation
+ functions, such as asarray().
+
+ """
+
+ # Use a custom constructor instead of __init__, as manually initializing
+ # this class is not supported API.
+ @classmethod
+ def _new(cls, x, /):
+ """
+ This is a private method for initializing the array API Array
+ object.
+
+ Functions outside of the array_api submodule should not use this
+ method. Use one of the creation functions instead, such as
+ ``asarray``.
+
+ """
+ obj = super().__new__(cls)
+ # Note: The spec does not have array scalars, only 0-D arrays.
+ if isinstance(x, np.generic):
+ # Convert the array scalar to a 0-D array
+ x = np.asarray(x)
+ if x.dtype not in _all_dtypes:
+ raise TypeError(
+ f"The array_api namespace does not support the dtype '{x.dtype}'"
+ )
+ obj._array = x
+ return obj
+
+ # Prevent Array() from working
+ def __new__(cls, *args, **kwargs):
+ raise TypeError(
+ "The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead."
+ )
+
+ # These functions are not required by the spec, but are implemented for
+ # the sake of usability.
+
+ def __str__(self: Array, /) -> str:
+ """
+ Performs the operation __str__.
+ """
+ return self._array.__str__().replace("array", "Array")
+
+ def __repr__(self: Array, /) -> str:
+ """
+ Performs the operation __repr__.
+ """
+ return f"Array({np.array2string(self._array, separator=', ')}, dtype={self.dtype.name})"
+
+ # These are various helper functions to make the array behavior match the
+ # spec in places where it either deviates from or is more strict than
+ # NumPy behavior
+
+ def _check_allowed_dtypes(self, other, dtype_category, op):
+ """
+ Helper function for operators to only allow specific input dtypes
+
+ Use like
+
+ other = self._check_allowed_dtypes(other, 'numeric', '__add__')
+ if other is NotImplemented:
+ return other
+ """
+
+ if self.dtype not in _dtype_categories[dtype_category]:
+ raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+ if isinstance(other, (int, float, bool)):
+ other = self._promote_scalar(other)
+ elif isinstance(other, Array):
+ if other.dtype not in _dtype_categories[dtype_category]:
+ raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+ else:
+ return NotImplemented
+
+ # This will raise TypeError for type combinations that are not allowed
+ # to promote in the spec (even if the NumPy array operator would
+ # promote them).
+ res_dtype = _result_type(self.dtype, other.dtype)
+ if op.startswith("__i"):
+ # Note: NumPy will allow in-place operators in some cases where
+ # the type promoted operator does not match the left-hand side
+ # operand. For example,
+
+ # >>> a = np.array(1, dtype=np.int8)
+ # >>> a += np.array(1, dtype=np.int16)
+
+ # The spec explicitly disallows this.
+ if res_dtype != self.dtype:
+ raise TypeError(
+ f"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}"
+ )
+
+ return other
+
+ # Helper function to match the type promotion rules in the spec
+ def _promote_scalar(self, scalar):
+ """
+ Returns a promoted version of a Python scalar appropriate for use with
+ operations on self.
+
+ This may raise an OverflowError in cases where the scalar is an
+ integer that is too large to fit in a NumPy integer dtype, or
+ TypeError when the scalar type is incompatible with the dtype of self.
+ """
+ if isinstance(scalar, bool):
+ if self.dtype not in _boolean_dtypes:
+ raise TypeError(
+ "Python bool scalars can only be promoted with bool arrays"
+ )
+ elif isinstance(scalar, int):
+ if self.dtype in _boolean_dtypes:
+ raise TypeError(
+ "Python int scalars cannot be promoted with bool arrays"
+ )
+ elif isinstance(scalar, float):
+ if self.dtype not in _floating_dtypes:
+ raise TypeError(
+ "Python float scalars can only be promoted with floating-point arrays."
+ )
+ else:
+ raise TypeError("'scalar' must be a Python scalar")
+
+ # Note: the spec only specifies integer-dtype/int promotion
+ # behavior for integers within the bounds of the integer dtype.
+ # Outside of those bounds we use the default NumPy behavior (either
+ # cast or raise OverflowError).
+ return Array._new(np.array(scalar, self.dtype))
+
+ @staticmethod
+ def _normalize_two_args(x1, x2):
+ """
+ Normalize inputs to two arg functions to fix type promotion rules
+
+ NumPy deviates from the spec type promotion rules in cases where one
+ argument is 0-dimensional and the other is not. For example:
+
+ >>> import numpy as np
+ >>> a = np.array([1.0], dtype=np.float32)
+ >>> b = np.array(1.0, dtype=np.float64)
+ >>> np.add(a, b) # The spec says this should be float64
+ array([2.], dtype=float32)
+
+ To fix this, we add a dimension to the 0-dimension array before passing it
+ through. This works because a dimension would be added anyway from
+ broadcasting, so the resulting shape is the same, but this prevents NumPy
+ from not promoting the dtype.
+ """
+ # Another option would be to use signature=(x1.dtype, x2.dtype, None),
+ # but that only works for ufuncs, so we would have to call the ufuncs
+ # directly in the operator methods. One should also note that this
+ # sort of trick wouldn't work for functions like searchsorted, which
+ # don't do normal broadcasting, but there aren't any functions like
+ # that in the array API namespace.
+ if x1.ndim == 0 and x2.ndim != 0:
+ # The _array[None] workaround was chosen because it is relatively
+ # performant. broadcast_to(x1._array, x2.shape) is much slower. We
+ # could also manually type promote x2, but that is more complicated
+ # and about the same performance as this.
+ x1 = Array._new(x1._array[None])
+ elif x2.ndim == 0 and x1.ndim != 0:
+ x2 = Array._new(x2._array[None])
+ return (x1, x2)
+
+ # Note: A large fraction of allowed indices are disallowed here (see the
+ # docstring below)
+ @staticmethod
+ def _validate_index(key, shape):
+ """
+ Validate an index according to the array API.
+
+ The array API specification only requires a subset of indices that are
+ supported by NumPy. This function will reject any index that is
+ allowed by NumPy but not required by the array API specification. We
+ always raise ``IndexError`` on such indices (the spec does not require
+ any specific behavior on them, but this makes the NumPy array API
+ namespace a minimal implementation of the spec). See
+ https://data-apis.org/array-api/latest/API_specification/indexing.html
+ for the full list of required indexing behavior
+
+ This function either raises IndexError if the index ``key`` is
+ invalid, or a new key to be used in place of ``key`` in indexing. It
+ only raises ``IndexError`` on indices that are not already rejected by
+ NumPy, as NumPy will already raise the appropriate error on such
+ indices. ``shape`` may be None, in which case, only cases that are
+ independent of the array shape are checked.
+
+ The following cases are allowed by NumPy, but not specified by the array
+ API specification:
+
+ - The start and stop of a slice may not be out of bounds. In
+ particular, for a slice ``i:j:k`` on an axis of size ``n``, only the
+ following are allowed:
+
+ - ``i`` or ``j`` omitted (``None``).
+ - ``-n <= i <= max(0, n - 1)``.
+ - For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
+ - For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
+
+ - Boolean array indices are not allowed as part of a larger tuple
+ index.
+
+ - Integer array indices are not allowed (with the exception of 0-D
+ arrays, which are treated the same as scalars).
+
+ Additionally, it should be noted that indices that would return a
+ scalar in NumPy will return a 0-D array. Array scalars are not allowed
+ in the specification, only 0-D arrays. This is done in the
+ ``Array._new`` constructor, not this function.
+
+ """
+ if isinstance(key, slice):
+ if shape is None:
+ return key
+ if shape == ():
+ return key
+ size = shape[0]
+ # Ensure invalid slice entries are passed through.
+ if key.start is not None:
+ try:
+ operator.index(key.start)
+ except TypeError:
+ return key
+ if not (-size <= key.start <= max(0, size - 1)):
+ raise IndexError(
+ "Slices with out-of-bounds start are not allowed in the array API namespace"
+ )
+ if key.stop is not None:
+ try:
+ operator.index(key.stop)
+ except TypeError:
+ return key
+ step = 1 if key.step is None else key.step
+ if (step > 0 and not (-size <= key.stop <= size)
+ or step < 0 and not (-size - 1 <= key.stop <= max(0, size - 1))):
+ raise IndexError("Slices with out-of-bounds stop are not allowed in the array API namespace")
+ return key
+
+ elif isinstance(key, tuple):
+ key = tuple(Array._validate_index(idx, None) for idx in key)
+
+ for idx in key:
+ if (
+ isinstance(idx, np.ndarray)
+ and idx.dtype in _boolean_dtypes
+ or isinstance(idx, (bool, np.bool_))
+ ):
+ if len(key) == 1:
+ return key
+ raise IndexError(
+ "Boolean array indices combined with other indices are not allowed in the array API namespace"
+ )
+ if isinstance(idx, tuple):
+ raise IndexError(
+ "Nested tuple indices are not allowed in the array API namespace"
+ )
+
+ if shape is None:
+ return key
+ n_ellipsis = key.count(...)
+ if n_ellipsis > 1:
+ return key
+ ellipsis_i = key.index(...) if n_ellipsis else len(key)
+
+ for idx, size in list(zip(key[:ellipsis_i], shape)) + list(
+ zip(key[:ellipsis_i:-1], shape[:ellipsis_i:-1])
+ ):
+ Array._validate_index(idx, (size,))
+ return key
+ elif isinstance(key, bool):
+ return key
+ elif isinstance(key, Array):
+ if key.dtype in _integer_dtypes:
+ if key.ndim != 0:
+ raise IndexError(
+ "Non-zero dimensional integer array indices are not allowed in the array API namespace"
+ )
+ return key._array
+ elif key is Ellipsis:
+ return key
+ elif key is None:
+ raise IndexError(
+ "newaxis indices are not allowed in the array API namespace"
+ )
+ try:
+ return operator.index(key)
+ except TypeError:
+ # Note: This also omits boolean arrays that are not already in
+ # Array() form, like a list of booleans.
+ raise IndexError(
+ "Only integers, slices (`:`), ellipsis (`...`), and boolean arrays are valid indices in the array API namespace"
+ )
+
+ # Everything below this line is required by the spec.
+
+ def __abs__(self: Array, /) -> Array:
+ """
+ Performs the operation __abs__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __abs__")
+ res = self._array.__abs__()
+ return self.__class__._new(res)
+
+ def __add__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __add__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__add__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__add__(other._array)
+ return self.__class__._new(res)
+
+ def __and__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __and__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__and__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__and__(other._array)
+ return self.__class__._new(res)
+
+ def __array_namespace__(
+ self: Array, /, *, api_version: Optional[str] = None
+ ) -> object:
+ if api_version is not None and not api_version.startswith("2021."):
+ raise ValueError(f"Unrecognized array API version: {api_version!r}")
+ return array_api
+
+ def __bool__(self: Array, /) -> bool:
+ """
+ Performs the operation __bool__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("bool is only allowed on arrays with 0 dimensions")
+ res = self._array.__bool__()
+ return res
+
+ def __dlpack__(self: Array, /, *, stream: None = None) -> PyCapsule:
+ """
+ Performs the operation __dlpack__.
+ """
+ res = self._array.__dlpack__(stream=stream)
+ return self.__class__._new(res)
+
+ def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]:
+ """
+ Performs the operation __dlpack_device__.
+ """
+ # Note: device support is required for this
+ res = self._array.__dlpack_device__()
+ return self.__class__._new(res)
+
+ def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+ """
+ Performs the operation __eq__.
+ """
+ # Even though "all" dtypes are allowed, we still require them to be
+ # promotable with each other.
+ other = self._check_allowed_dtypes(other, "all", "__eq__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__eq__(other._array)
+ return self.__class__._new(res)
+
+ def __float__(self: Array, /) -> float:
+ """
+ Performs the operation __float__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("float is only allowed on arrays with 0 dimensions")
+ res = self._array.__float__()
+ return res
+
+ def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __floordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__floordiv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__floordiv__(other._array)
+ return self.__class__._new(res)
+
+ def __ge__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ge__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__ge__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ge__(other._array)
+ return self.__class__._new(res)
+
+ def __getitem__(
+ self: Array,
+ key: Union[
+ int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
+ ],
+ /,
+ ) -> Array:
+ """
+ Performs the operation __getitem__.
+ """
+ # Note: Only indices required by the spec are allowed. See the
+ # docstring of _validate_index
+ key = self._validate_index(key, self.shape)
+ res = self._array.__getitem__(key)
+ return self._new(res)
+
+ def __gt__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __gt__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__gt__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__gt__(other._array)
+ return self.__class__._new(res)
+
+ def __int__(self: Array, /) -> int:
+ """
+ Performs the operation __int__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("int is only allowed on arrays with 0 dimensions")
+ res = self._array.__int__()
+ return res
+
+ def __invert__(self: Array, /) -> Array:
+ """
+ Performs the operation __invert__.
+ """
+ if self.dtype not in _integer_or_boolean_dtypes:
+ raise TypeError("Only integer or boolean dtypes are allowed in __invert__")
+ res = self._array.__invert__()
+ return self.__class__._new(res)
+
+ def __le__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __le__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__le__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__le__(other._array)
+ return self.__class__._new(res)
+
+ # Note: __len__ may end up being removed from the array API spec.
+ def __len__(self, /) -> int:
+ """
+ Performs the operation __len__.
+ """
+ return self._array.__len__()
+
+ def __lshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __lshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__lshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__lshift__(other._array)
+ return self.__class__._new(res)
+
+ def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __lt__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__lt__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__lt__(other._array)
+ return self.__class__._new(res)
+
+ def __matmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __matmul__.
+ """
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__matmul__")
+ if other is NotImplemented:
+ return other
+ res = self._array.__matmul__(other._array)
+ return self.__class__._new(res)
+
+ def __mod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __mod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__mod__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__mod__(other._array)
+ return self.__class__._new(res)
+
+ def __mul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __mul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__mul__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__mul__(other._array)
+ return self.__class__._new(res)
+
+ def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+ """
+ Performs the operation __ne__.
+ """
+ other = self._check_allowed_dtypes(other, "all", "__ne__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ne__(other._array)
+ return self.__class__._new(res)
+
+ def __neg__(self: Array, /) -> Array:
+ """
+ Performs the operation __neg__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __neg__")
+ res = self._array.__neg__()
+ return self.__class__._new(res)
+
+ def __or__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __or__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__or__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__or__(other._array)
+ return self.__class__._new(res)
+
+ def __pos__(self: Array, /) -> Array:
+ """
+ Performs the operation __pos__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __pos__")
+ res = self._array.__pos__()
+ return self.__class__._new(res)
+
+ # PEP 484 requires int to be a subtype of float, but __pow__ should not
+ # accept int.
+ def __pow__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __pow__.
+ """
+ from ._elementwise_functions import pow
+
+ other = self._check_allowed_dtypes(other, "floating-point", "__pow__")
+ if other is NotImplemented:
+ return other
+ # Note: NumPy's __pow__ does not follow type promotion rules for 0-d
+ # arrays, so we use pow() here instead.
+ return pow(self, other)
+
+ def __rshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rshift__(other._array)
+ return self.__class__._new(res)
+
+ def __setitem__(
+ self,
+ key: Union[
+ int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
+ ],
+ value: Union[int, float, bool, Array],
+ /,
+ ) -> None:
+ """
+ Performs the operation __setitem__.
+ """
+ # Note: Only indices required by the spec are allowed. See the
+ # docstring of _validate_index
+ key = self._validate_index(key, self.shape)
+ self._array.__setitem__(key, asarray(value)._array)
+
+ def __sub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __sub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__sub__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__sub__(other._array)
+ return self.__class__._new(res)
+
+ # PEP 484 requires int to be a subtype of float, but __truediv__ should
+ # not accept int.
+ def __truediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __truediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__truediv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__truediv__(other._array)
+ return self.__class__._new(res)
+
+ def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __xor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__xor__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__xor__(other._array)
+ return self.__class__._new(res)
+
+ def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __iadd__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__iadd__")
+ if other is NotImplemented:
+ return other
+ self._array.__iadd__(other._array)
+ return self
+
+ def __radd__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __radd__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__radd__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__radd__(other._array)
+ return self.__class__._new(res)
+
+ def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __iand__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__iand__")
+ if other is NotImplemented:
+ return other
+ self._array.__iand__(other._array)
+ return self
+
+ def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __rand__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__rand__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rand__(other._array)
+ return self.__class__._new(res)
+
+ def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ifloordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__ifloordiv__")
+ if other is NotImplemented:
+ return other
+ self._array.__ifloordiv__(other._array)
+ return self
+
+ def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rfloordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rfloordiv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rfloordiv__(other._array)
+ return self.__class__._new(res)
+
+ def __ilshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __ilshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__ilshift__")
+ if other is NotImplemented:
+ return other
+ self._array.__ilshift__(other._array)
+ return self
+
+ def __rlshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rlshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rlshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rlshift__(other._array)
+ return self.__class__._new(res)
+
+ def __imatmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __imatmul__.
+ """
+ # Note: NumPy does not implement __imatmul__.
+
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__imatmul__")
+ if other is NotImplemented:
+ return other
+
+ # __imatmul__ can only be allowed when it would not change the shape
+ # of self.
+ other_shape = other.shape
+ if self.shape == () or other_shape == ():
+ raise ValueError("@= requires at least one dimension")
+ if len(other_shape) == 1 or other_shape[-1] != other_shape[-2]:
+ raise ValueError("@= cannot change the shape of the input array")
+ self._array[:] = self._array.__matmul__(other._array)
+ return self
+
+ def __rmatmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __rmatmul__.
+ """
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__rmatmul__")
+ if other is NotImplemented:
+ return other
+ res = self._array.__rmatmul__(other._array)
+ return self.__class__._new(res)
+
+ def __imod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __imod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__imod__")
+ if other is NotImplemented:
+ return other
+ self._array.__imod__(other._array)
+ return self
+
+ def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rmod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rmod__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rmod__(other._array)
+ return self.__class__._new(res)
+
+ def __imul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __imul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__imul__")
+ if other is NotImplemented:
+ return other
+ self._array.__imul__(other._array)
+ return self
+
+ def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rmul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rmul__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rmul__(other._array)
+ return self.__class__._new(res)
+
+ def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ior__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ior__")
+ if other is NotImplemented:
+ return other
+ self._array.__ior__(other._array)
+ return self
+
+ def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ror__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ror__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ror__(other._array)
+ return self.__class__._new(res)
+
+ def __ipow__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __ipow__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__ipow__")
+ if other is NotImplemented:
+ return other
+ self._array.__ipow__(other._array)
+ return self
+
+ def __rpow__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __rpow__.
+ """
+ from ._elementwise_functions import pow
+
+ other = self._check_allowed_dtypes(other, "floating-point", "__rpow__")
+ if other is NotImplemented:
+ return other
+ # Note: NumPy's __pow__ does not follow the spec type promotion rules
+ # for 0-d arrays, so we use pow() here instead.
+ return pow(other, self)
+
+ def __irshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __irshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__irshift__")
+ if other is NotImplemented:
+ return other
+ self._array.__irshift__(other._array)
+ return self
+
+ def __rrshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rrshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rrshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rrshift__(other._array)
+ return self.__class__._new(res)
+
+ def __isub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __isub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__isub__")
+ if other is NotImplemented:
+ return other
+ self._array.__isub__(other._array)
+ return self
+
+ def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rsub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rsub__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rsub__(other._array)
+ return self.__class__._new(res)
+
+ def __itruediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __itruediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__itruediv__")
+ if other is NotImplemented:
+ return other
+ self._array.__itruediv__(other._array)
+ return self
+
+ def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __rtruediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__rtruediv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rtruediv__(other._array)
+ return self.__class__._new(res)
+
+ def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ixor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ixor__")
+ if other is NotImplemented:
+ return other
+ self._array.__ixor__(other._array)
+ return self
+
+ def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __rxor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__rxor__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rxor__(other._array)
+ return self.__class__._new(res)
+
+ @property
+ def dtype(self) -> Dtype:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.dtype <numpy.ndarray.dtype>`.
+
+ See its docstring for more information.
+ """
+ return self._array.dtype
+
+ @property
+ def device(self) -> Device:
+ return "cpu"
+
+ @property
+ def ndim(self) -> int:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.ndim <numpy.ndarray.ndim>`.
+
+ See its docstring for more information.
+ """
+ return self._array.ndim
+
+ @property
+ def shape(self) -> Tuple[int, ...]:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.shape <numpy.ndarray.shape>`.
+
+ See its docstring for more information.
+ """
+ return self._array.shape
+
+ @property
+ def size(self) -> int:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.size <numpy.ndarray.size>`.
+
+ See its docstring for more information.
+ """
+ return self._array.size
+
+ @property
+ def T(self) -> Array:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.T <numpy.ndarray.T>`.
+
+ See its docstring for more information.
+ """
+ return self._array.T
diff --git a/numpy/array_api/_constants.py b/numpy/array_api/_constants.py
new file mode 100644
index 000000000..9541941e7
--- /dev/null
+++ b/numpy/array_api/_constants.py
@@ -0,0 +1,6 @@
+import numpy as np
+
+e = np.e
+inf = np.inf
+nan = np.nan
+pi = np.pi
diff --git a/numpy/array_api/_creation_functions.py b/numpy/array_api/_creation_functions.py
new file mode 100644
index 000000000..e9c01e7e6
--- /dev/null
+++ b/numpy/array_api/_creation_functions.py
@@ -0,0 +1,316 @@
+from __future__ import annotations
+
+
+from typing import TYPE_CHECKING, List, Optional, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import (
+ Array,
+ Device,
+ Dtype,
+ NestedSequence,
+ SupportsDLPack,
+ SupportsBufferProtocol,
+ )
+ from collections.abc import Sequence
+from ._dtypes import _all_dtypes
+
+import numpy as np
+
+
+def _check_valid_dtype(dtype):
+ # Note: Only spelling dtypes as the dtype objects is supported.
+
+ # We use this instead of "dtype in _all_dtypes" because the dtype objects
+ # define equality with the sorts of things we want to disallw.
+ for d in (None,) + _all_dtypes:
+ if dtype is d:
+ return
+ raise ValueError("dtype must be one of the supported dtypes")
+
+
+def asarray(
+ obj: Union[
+ Array,
+ bool,
+ int,
+ float,
+ NestedSequence[bool | int | float],
+ SupportsDLPack,
+ SupportsBufferProtocol,
+ ],
+ /,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+ copy: Optional[bool] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.asarray <numpy.asarray>`.
+
+ See its docstring for more information.
+ """
+ # _array_object imports in this file are inside the functions to avoid
+ # circular imports
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ if copy is False:
+ # Note: copy=False is not yet implemented in np.asarray
+ raise NotImplementedError("copy=False is not yet implemented")
+ if isinstance(obj, Array) and (dtype is None or obj.dtype == dtype):
+ if copy is True:
+ return Array._new(np.array(obj._array, copy=True, dtype=dtype))
+ return obj
+ if dtype is None and isinstance(obj, int) and (obj > 2 ** 64 or obj < -(2 ** 63)):
+ # Give a better error message in this case. NumPy would convert this
+ # to an object array. TODO: This won't handle large integers in lists.
+ raise OverflowError("Integer out of bounds for array dtypes")
+ res = np.asarray(obj, dtype=dtype)
+ return Array._new(res)
+
+
+def arange(
+ start: Union[int, float],
+ /,
+ stop: Optional[Union[int, float]] = None,
+ step: Union[int, float] = 1,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arange <numpy.arange>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.arange(start, stop=stop, step=step, dtype=dtype))
+
+
+def empty(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.empty <numpy.empty>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.empty(shape, dtype=dtype))
+
+
+def empty_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.empty_like <numpy.empty_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.empty_like(x._array, dtype=dtype))
+
+
+def eye(
+ n_rows: int,
+ n_cols: Optional[int] = None,
+ /,
+ *,
+ k: Optional[int] = 0,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.eye <numpy.eye>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.eye(n_rows, M=n_cols, k=k, dtype=dtype))
+
+
+def from_dlpack(x: object, /) -> Array:
+ # Note: dlpack support is not yet implemented on Array
+ raise NotImplementedError("DLPack support is not yet implemented")
+
+
+def full(
+ shape: Union[int, Tuple[int, ...]],
+ fill_value: Union[int, float],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.full <numpy.full>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ if isinstance(fill_value, Array) and fill_value.ndim == 0:
+ fill_value = fill_value._array
+ res = np.full(shape, fill_value, dtype=dtype)
+ if res.dtype not in _all_dtypes:
+ # This will happen if the fill value is not something that NumPy
+ # coerces to one of the acceptable dtypes.
+ raise TypeError("Invalid input to full")
+ return Array._new(res)
+
+
+def full_like(
+ x: Array,
+ /,
+ fill_value: Union[int, float],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.full_like <numpy.full_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ res = np.full_like(x._array, fill_value, dtype=dtype)
+ if res.dtype not in _all_dtypes:
+ # This will happen if the fill value is not something that NumPy
+ # coerces to one of the acceptable dtypes.
+ raise TypeError("Invalid input to full_like")
+ return Array._new(res)
+
+
+def linspace(
+ start: Union[int, float],
+ stop: Union[int, float],
+ /,
+ num: int,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+ endpoint: bool = True,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linspace <numpy.linspace>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint))
+
+
+def meshgrid(*arrays: Sequence[Array], indexing: str = "xy") -> List[Array, ...]:
+ """
+ Array API compatible wrapper for :py:func:`np.meshgrid <numpy.meshgrid>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return [
+ Array._new(array)
+ for array in np.meshgrid(*[a._array for a in arrays], indexing=indexing)
+ ]
+
+
+def ones(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ones <numpy.ones>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.ones(shape, dtype=dtype))
+
+
+def ones_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ones_like <numpy.ones_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.ones_like(x._array, dtype=dtype))
+
+
+def zeros(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.zeros <numpy.zeros>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.zeros(shape, dtype=dtype))
+
+
+def zeros_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.zeros_like <numpy.zeros_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.zeros_like(x._array, dtype=dtype))
diff --git a/numpy/array_api/_data_type_functions.py b/numpy/array_api/_data_type_functions.py
new file mode 100644
index 000000000..fd92aa250
--- /dev/null
+++ b/numpy/array_api/_data_type_functions.py
@@ -0,0 +1,127 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _all_dtypes, _result_type
+
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, List, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import Dtype
+ from collections.abc import Sequence
+
+import numpy as np
+
+
+def broadcast_arrays(*arrays: Sequence[Array]) -> List[Array]:
+ """
+ Array API compatible wrapper for :py:func:`np.broadcast_arrays <numpy.broadcast_arrays>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return [
+ Array._new(array) for array in np.broadcast_arrays(*[a._array for a in arrays])
+ ]
+
+
+def broadcast_to(x: Array, /, shape: Tuple[int, ...]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.broadcast_to <numpy.broadcast_to>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return Array._new(np.broadcast_to(x._array, shape))
+
+
+def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool:
+ """
+ Array API compatible wrapper for :py:func:`np.can_cast <numpy.can_cast>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ if isinstance(from_, Array):
+ from_ = from_._array
+ return np.can_cast(from_, to)
+
+
+# These are internal objects for the return types of finfo and iinfo, since
+# the NumPy versions contain extra data that isn't part of the spec.
+@dataclass
+class finfo_object:
+ bits: int
+ # Note: The types of the float data here are float, whereas in NumPy they
+ # are scalars of the corresponding float dtype.
+ eps: float
+ max: float
+ min: float
+ smallest_normal: float
+
+
+@dataclass
+class iinfo_object:
+ bits: int
+ max: int
+ min: int
+
+
+def finfo(type: Union[Dtype, Array], /) -> finfo_object:
+ """
+ Array API compatible wrapper for :py:func:`np.finfo <numpy.finfo>`.
+
+ See its docstring for more information.
+ """
+ fi = np.finfo(type)
+ # Note: The types of the float data here are float, whereas in NumPy they
+ # are scalars of the corresponding float dtype.
+ return finfo_object(
+ fi.bits,
+ float(fi.eps),
+ float(fi.max),
+ float(fi.min),
+ float(fi.smallest_normal),
+ )
+
+
+def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:
+ """
+ Array API compatible wrapper for :py:func:`np.iinfo <numpy.iinfo>`.
+
+ See its docstring for more information.
+ """
+ ii = np.iinfo(type)
+ return iinfo_object(ii.bits, ii.max, ii.min)
+
+
+def result_type(*arrays_and_dtypes: Sequence[Union[Array, Dtype]]) -> Dtype:
+ """
+ Array API compatible wrapper for :py:func:`np.result_type <numpy.result_type>`.
+
+ See its docstring for more information.
+ """
+ # Note: we use a custom implementation that gives only the type promotions
+ # required by the spec rather than using np.result_type. NumPy implements
+ # too many extra type promotions like int64 + uint64 -> float64, and does
+ # value-based casting on scalar arrays.
+ A = []
+ for a in arrays_and_dtypes:
+ if isinstance(a, Array):
+ a = a.dtype
+ elif isinstance(a, np.ndarray) or a not in _all_dtypes:
+ raise TypeError("result_type() inputs must be array_api arrays or dtypes")
+ A.append(a)
+
+ if len(A) == 0:
+ raise ValueError("at least one array or dtype is required")
+ elif len(A) == 1:
+ return A[0]
+ else:
+ t = A[0]
+ for t2 in A[1:]:
+ t = _result_type(t, t2)
+ return t
diff --git a/numpy/array_api/_dtypes.py b/numpy/array_api/_dtypes.py
new file mode 100644
index 000000000..476d619fe
--- /dev/null
+++ b/numpy/array_api/_dtypes.py
@@ -0,0 +1,143 @@
+import numpy as np
+
+# Note: we use dtype objects instead of dtype classes. The spec does not
+# require any behavior on dtypes other than equality.
+int8 = np.dtype("int8")
+int16 = np.dtype("int16")
+int32 = np.dtype("int32")
+int64 = np.dtype("int64")
+uint8 = np.dtype("uint8")
+uint16 = np.dtype("uint16")
+uint32 = np.dtype("uint32")
+uint64 = np.dtype("uint64")
+float32 = np.dtype("float32")
+float64 = np.dtype("float64")
+# Note: This name is changed
+bool = np.dtype("bool")
+
+_all_dtypes = (
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ bool,
+)
+_boolean_dtypes = (bool,)
+_floating_dtypes = (float32, float64)
+_integer_dtypes = (int8, int16, int32, int64, uint8, uint16, uint32, uint64)
+_integer_or_boolean_dtypes = (
+ bool,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+_numeric_dtypes = (
+ float32,
+ float64,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+
+_dtype_categories = {
+ "all": _all_dtypes,
+ "numeric": _numeric_dtypes,
+ "integer": _integer_dtypes,
+ "integer or boolean": _integer_or_boolean_dtypes,
+ "boolean": _boolean_dtypes,
+ "floating-point": _floating_dtypes,
+}
+
+
+# Note: the spec defines a restricted type promotion table compared to NumPy.
+# In particular, cross-kind promotions like integer + float or boolean +
+# integer are not allowed, even for functions that accept both kinds.
+# Additionally, NumPy promotes signed integer + uint64 to float64, but this
+# promotion is not allowed here. To be clear, Python scalar int objects are
+# allowed to promote to floating-point dtypes, but only in array operators
+# (see Array._promote_scalar) method in _array_object.py.
+_promotion_table = {
+ (int8, int8): int8,
+ (int8, int16): int16,
+ (int8, int32): int32,
+ (int8, int64): int64,
+ (int16, int8): int16,
+ (int16, int16): int16,
+ (int16, int32): int32,
+ (int16, int64): int64,
+ (int32, int8): int32,
+ (int32, int16): int32,
+ (int32, int32): int32,
+ (int32, int64): int64,
+ (int64, int8): int64,
+ (int64, int16): int64,
+ (int64, int32): int64,
+ (int64, int64): int64,
+ (uint8, uint8): uint8,
+ (uint8, uint16): uint16,
+ (uint8, uint32): uint32,
+ (uint8, uint64): uint64,
+ (uint16, uint8): uint16,
+ (uint16, uint16): uint16,
+ (uint16, uint32): uint32,
+ (uint16, uint64): uint64,
+ (uint32, uint8): uint32,
+ (uint32, uint16): uint32,
+ (uint32, uint32): uint32,
+ (uint32, uint64): uint64,
+ (uint64, uint8): uint64,
+ (uint64, uint16): uint64,
+ (uint64, uint32): uint64,
+ (uint64, uint64): uint64,
+ (int8, uint8): int16,
+ (int8, uint16): int32,
+ (int8, uint32): int64,
+ (int16, uint8): int16,
+ (int16, uint16): int32,
+ (int16, uint32): int64,
+ (int32, uint8): int32,
+ (int32, uint16): int32,
+ (int32, uint32): int64,
+ (int64, uint8): int64,
+ (int64, uint16): int64,
+ (int64, uint32): int64,
+ (uint8, int8): int16,
+ (uint16, int8): int32,
+ (uint32, int8): int64,
+ (uint8, int16): int16,
+ (uint16, int16): int32,
+ (uint32, int16): int64,
+ (uint8, int32): int32,
+ (uint16, int32): int32,
+ (uint32, int32): int64,
+ (uint8, int64): int64,
+ (uint16, int64): int64,
+ (uint32, int64): int64,
+ (float32, float32): float32,
+ (float32, float64): float64,
+ (float64, float32): float64,
+ (float64, float64): float64,
+ (bool, bool): bool,
+}
+
+
+def _result_type(type1, type2):
+ if (type1, type2) in _promotion_table:
+ return _promotion_table[type1, type2]
+ raise TypeError(f"{type1} and {type2} cannot be type promoted together")
diff --git a/numpy/array_api/_elementwise_functions.py b/numpy/array_api/_elementwise_functions.py
new file mode 100644
index 000000000..4408fe833
--- /dev/null
+++ b/numpy/array_api/_elementwise_functions.py
@@ -0,0 +1,729 @@
+from __future__ import annotations
+
+from ._dtypes import (
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _numeric_dtypes,
+ _result_type,
+)
+from ._array_object import Array
+
+import numpy as np
+
+
+def abs(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.abs <numpy.abs>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in abs")
+ return Array._new(np.abs(x._array))
+
+
+# Note: the function name is different here
+def acos(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arccos <numpy.arccos>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in acos")
+ return Array._new(np.arccos(x._array))
+
+
+# Note: the function name is different here
+def acosh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arccosh <numpy.arccosh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in acosh")
+ return Array._new(np.arccosh(x._array))
+
+
+def add(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.add <numpy.add>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in add")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.add(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def asin(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arcsin <numpy.arcsin>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in asin")
+ return Array._new(np.arcsin(x._array))
+
+
+# Note: the function name is different here
+def asinh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arcsinh <numpy.arcsinh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in asinh")
+ return Array._new(np.arcsinh(x._array))
+
+
+# Note: the function name is different here
+def atan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctan <numpy.arctan>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atan")
+ return Array._new(np.arctan(x._array))
+
+
+# Note: the function name is different here
+def atan2(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctan2 <numpy.arctan2>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atan2")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.arctan2(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def atanh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctanh <numpy.arctanh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atanh")
+ return Array._new(np.arctanh(x._array))
+
+
+def bitwise_and(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_and <numpy.bitwise_and>`.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_and")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_and(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_left_shift(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.left_shift <numpy.left_shift>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+ raise TypeError("Only integer dtypes are allowed in bitwise_left_shift")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ # Note: bitwise_left_shift is only defined for x2 nonnegative.
+ if np.any(x2._array < 0):
+ raise ValueError("bitwise_left_shift(x1, x2) is only defined for x2 >= 0")
+ return Array._new(np.left_shift(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_invert(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.invert <numpy.invert>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _integer_or_boolean_dtypes:
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_invert")
+ return Array._new(np.invert(x._array))
+
+
+def bitwise_or(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_or <numpy.bitwise_or>`.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_or")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_or(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_right_shift(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.right_shift <numpy.right_shift>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+ raise TypeError("Only integer dtypes are allowed in bitwise_right_shift")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ # Note: bitwise_right_shift is only defined for x2 nonnegative.
+ if np.any(x2._array < 0):
+ raise ValueError("bitwise_right_shift(x1, x2) is only defined for x2 >= 0")
+ return Array._new(np.right_shift(x1._array, x2._array))
+
+
+def bitwise_xor(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_xor <numpy.bitwise_xor>`.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_xor")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_xor(x1._array, x2._array))
+
+
+def ceil(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ceil <numpy.ceil>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in ceil")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of ceil is the same as the input
+ return x
+ return Array._new(np.ceil(x._array))
+
+
+def cos(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.cos <numpy.cos>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in cos")
+ return Array._new(np.cos(x._array))
+
+
+def cosh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.cosh <numpy.cosh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in cosh")
+ return Array._new(np.cosh(x._array))
+
+
+def divide(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.divide <numpy.divide>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in divide")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.divide(x1._array, x2._array))
+
+
+def equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.equal <numpy.equal>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.equal(x1._array, x2._array))
+
+
+def exp(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.exp <numpy.exp>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in exp")
+ return Array._new(np.exp(x._array))
+
+
+def expm1(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.expm1 <numpy.expm1>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in expm1")
+ return Array._new(np.expm1(x._array))
+
+
+def floor(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.floor <numpy.floor>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in floor")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of floor is the same as the input
+ return x
+ return Array._new(np.floor(x._array))
+
+
+def floor_divide(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.floor_divide <numpy.floor_divide>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in floor_divide")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.floor_divide(x1._array, x2._array))
+
+
+def greater(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.greater <numpy.greater>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in greater")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.greater(x1._array, x2._array))
+
+
+def greater_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.greater_equal <numpy.greater_equal>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in greater_equal")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.greater_equal(x1._array, x2._array))
+
+
+def isfinite(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isfinite <numpy.isfinite>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isfinite")
+ return Array._new(np.isfinite(x._array))
+
+
+def isinf(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isinf <numpy.isinf>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isinf")
+ return Array._new(np.isinf(x._array))
+
+
+def isnan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isnan <numpy.isnan>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isnan")
+ return Array._new(np.isnan(x._array))
+
+
+def less(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.less <numpy.less>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in less")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.less(x1._array, x2._array))
+
+
+def less_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.less_equal <numpy.less_equal>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in less_equal")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.less_equal(x1._array, x2._array))
+
+
+def log(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log <numpy.log>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log")
+ return Array._new(np.log(x._array))
+
+
+def log1p(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log1p <numpy.log1p>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log1p")
+ return Array._new(np.log1p(x._array))
+
+
+def log2(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log2 <numpy.log2>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log2")
+ return Array._new(np.log2(x._array))
+
+
+def log10(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log10 <numpy.log10>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log10")
+ return Array._new(np.log10(x._array))
+
+
+def logaddexp(x1: Array, x2: Array) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logaddexp <numpy.logaddexp>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in logaddexp")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logaddexp(x1._array, x2._array))
+
+
+def logical_and(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_and <numpy.logical_and>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_and")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_and(x1._array, x2._array))
+
+
+def logical_not(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_not <numpy.logical_not>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_not")
+ return Array._new(np.logical_not(x._array))
+
+
+def logical_or(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_or <numpy.logical_or>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_or")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_or(x1._array, x2._array))
+
+
+def logical_xor(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_xor <numpy.logical_xor>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_xor")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_xor(x1._array, x2._array))
+
+
+def multiply(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.multiply <numpy.multiply>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in multiply")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.multiply(x1._array, x2._array))
+
+
+def negative(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.negative <numpy.negative>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in negative")
+ return Array._new(np.negative(x._array))
+
+
+def not_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.not_equal <numpy.not_equal>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.not_equal(x1._array, x2._array))
+
+
+def positive(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.positive <numpy.positive>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in positive")
+ return Array._new(np.positive(x._array))
+
+
+# Note: the function name is different here
+def pow(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.power <numpy.power>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in pow")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.power(x1._array, x2._array))
+
+
+def remainder(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.remainder <numpy.remainder>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in remainder")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.remainder(x1._array, x2._array))
+
+
+def round(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.round <numpy.round>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in round")
+ return Array._new(np.round(x._array))
+
+
+def sign(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sign <numpy.sign>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in sign")
+ return Array._new(np.sign(x._array))
+
+
+def sin(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sin <numpy.sin>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sin")
+ return Array._new(np.sin(x._array))
+
+
+def sinh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sinh <numpy.sinh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sinh")
+ return Array._new(np.sinh(x._array))
+
+
+def square(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.square <numpy.square>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in square")
+ return Array._new(np.square(x._array))
+
+
+def sqrt(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sqrt <numpy.sqrt>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sqrt")
+ return Array._new(np.sqrt(x._array))
+
+
+def subtract(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.subtract <numpy.subtract>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in subtract")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.subtract(x1._array, x2._array))
+
+
+def tan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.tan <numpy.tan>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in tan")
+ return Array._new(np.tan(x._array))
+
+
+def tanh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.tanh <numpy.tanh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in tanh")
+ return Array._new(np.tanh(x._array))
+
+
+def trunc(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.trunc <numpy.trunc>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in trunc")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of trunc is the same as the input
+ return x
+ return Array._new(np.trunc(x._array))
diff --git a/numpy/array_api/_linear_algebra_functions.py b/numpy/array_api/_linear_algebra_functions.py
new file mode 100644
index 000000000..089081725
--- /dev/null
+++ b/numpy/array_api/_linear_algebra_functions.py
@@ -0,0 +1,68 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _numeric_dtypes, _result_type
+
+from typing import Optional, Sequence, Tuple, Union
+
+import numpy as np
+
+# einsum is not yet implemented in the array API spec.
+
+# def einsum():
+# """
+# Array API compatible wrapper for :py:func:`np.einsum <numpy.einsum>`.
+#
+# See its docstring for more information.
+# """
+# return np.einsum()
+
+
+def matmul(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.matmul <numpy.matmul>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to numeric dtypes only is different from
+ # np.matmul.
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in matmul")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+
+ return Array._new(np.matmul(x1._array, x2._array))
+
+
+# Note: axes must be a tuple, unlike np.tensordot where it can be an array or array-like.
+def tensordot(
+ x1: Array,
+ x2: Array,
+ /,
+ *,
+ axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2,
+) -> Array:
+ # Note: the restriction to numeric dtypes only is different from
+ # np.tensordot.
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in tensordot")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+
+ return Array._new(np.tensordot(x1._array, x2._array, axes=axes))
+
+
+def transpose(x: Array, /, *, axes: Optional[Tuple[int, ...]] = None) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.transpose <numpy.transpose>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.transpose(x._array, axes=axes))
+
+
+# Note: vecdot is not in NumPy
+def vecdot(x1: Array, x2: Array, /, *, axis: Optional[int] = None) -> Array:
+ if axis is None:
+ axis = -1
+ return tensordot(x1, x2, axes=((axis,), (axis,)))
diff --git a/numpy/array_api/_manipulation_functions.py b/numpy/array_api/_manipulation_functions.py
new file mode 100644
index 000000000..c11866261
--- /dev/null
+++ b/numpy/array_api/_manipulation_functions.py
@@ -0,0 +1,86 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._data_type_functions import result_type
+
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+
+# Note: the function name is different here
+def concat(
+ arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: Optional[int] = 0
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.concatenate <numpy.concatenate>`.
+
+ See its docstring for more information.
+ """
+ # Note: Casting rules here are different from the np.concatenate default
+ # (no for scalars with axis=None, no cross-kind casting)
+ dtype = result_type(*arrays)
+ arrays = tuple(a._array for a in arrays)
+ return Array._new(np.concatenate(arrays, axis=axis, dtype=dtype))
+
+
+def expand_dims(x: Array, /, *, axis: int) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.expand_dims <numpy.expand_dims>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.expand_dims(x._array, axis))
+
+
+def flip(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.flip <numpy.flip>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.flip(x._array, axis=axis))
+
+
+def reshape(x: Array, /, shape: Tuple[int, ...]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.reshape <numpy.reshape>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.reshape(x._array, shape))
+
+
+def roll(
+ x: Array,
+ /,
+ shift: Union[int, Tuple[int, ...]],
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.roll <numpy.roll>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.roll(x._array, shift, axis=axis))
+
+
+def squeeze(x: Array, /, axis: Union[int, Tuple[int, ...]]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.squeeze <numpy.squeeze>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.squeeze(x._array, axis=axis))
+
+
+def stack(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.stack <numpy.stack>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ result_type(*arrays)
+ arrays = tuple(a._array for a in arrays)
+ return Array._new(np.stack(arrays, axis=axis))
diff --git a/numpy/array_api/_searching_functions.py b/numpy/array_api/_searching_functions.py
new file mode 100644
index 000000000..3dcef61c3
--- /dev/null
+++ b/numpy/array_api/_searching_functions.py
@@ -0,0 +1,46 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _result_type
+
+from typing import Optional, Tuple
+
+import numpy as np
+
+
+def argmax(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.argmax <numpy.argmax>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.argmax(x._array, axis=axis, keepdims=keepdims)))
+
+
+def argmin(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.argmin <numpy.argmin>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.argmin(x._array, axis=axis, keepdims=keepdims)))
+
+
+def nonzero(x: Array, /) -> Tuple[Array, ...]:
+ """
+ Array API compatible wrapper for :py:func:`np.nonzero <numpy.nonzero>`.
+
+ See its docstring for more information.
+ """
+ return tuple(Array._new(i) for i in np.nonzero(x._array))
+
+
+def where(condition: Array, x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.where <numpy.where>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ return Array._new(np.where(condition._array, x1._array, x2._array))
diff --git a/numpy/array_api/_set_functions.py b/numpy/array_api/_set_functions.py
new file mode 100644
index 000000000..357f238f5
--- /dev/null
+++ b/numpy/array_api/_set_functions.py
@@ -0,0 +1,31 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+from typing import Tuple, Union
+
+import numpy as np
+
+
+def unique(
+ x: Array,
+ /,
+ *,
+ return_counts: bool = False,
+ return_index: bool = False,
+ return_inverse: bool = False,
+) -> Union[Array, Tuple[Array, ...]]:
+ """
+ Array API compatible wrapper for :py:func:`np.unique <numpy.unique>`.
+
+ See its docstring for more information.
+ """
+ res = np.unique(
+ x._array,
+ return_counts=return_counts,
+ return_index=return_index,
+ return_inverse=return_inverse,
+ )
+ if isinstance(res, tuple):
+ return tuple(Array._new(i) for i in res)
+ return Array._new(res)
diff --git a/numpy/array_api/_sorting_functions.py b/numpy/array_api/_sorting_functions.py
new file mode 100644
index 000000000..9cd49786c
--- /dev/null
+++ b/numpy/array_api/_sorting_functions.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+import numpy as np
+
+
+def argsort(
+ x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.argsort <numpy.argsort>`.
+
+ See its docstring for more information.
+ """
+ # Note: this keyword argument is different, and the default is different.
+ kind = "stable" if stable else "quicksort"
+ res = np.argsort(x._array, axis=axis, kind=kind)
+ if descending:
+ res = np.flip(res, axis=axis)
+ return Array._new(res)
+
+
+def sort(
+ x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sort <numpy.sort>`.
+
+ See its docstring for more information.
+ """
+ # Note: this keyword argument is different, and the default is different.
+ kind = "stable" if stable else "quicksort"
+ res = np.sort(x._array, axis=axis, kind=kind)
+ if descending:
+ res = np.flip(res, axis=axis)
+ return Array._new(res)
diff --git a/numpy/array_api/_statistical_functions.py b/numpy/array_api/_statistical_functions.py
new file mode 100644
index 000000000..63790b447
--- /dev/null
+++ b/numpy/array_api/_statistical_functions.py
@@ -0,0 +1,81 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+from typing import Optional, Tuple, Union
+
+import numpy as np
+
+
+def max(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ return Array._new(np.max(x._array, axis=axis, keepdims=keepdims))
+
+
+def mean(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ return Array._new(np.mean(x._array, axis=axis, keepdims=keepdims))
+
+
+def min(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ return Array._new(np.min(x._array, axis=axis, keepdims=keepdims))
+
+
+def prod(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ return Array._new(np.prod(x._array, axis=axis, keepdims=keepdims))
+
+
+def std(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ correction: Union[int, float] = 0.0,
+ keepdims: bool = False,
+) -> Array:
+ # Note: the keyword argument correction is different here
+ return Array._new(np.std(x._array, axis=axis, ddof=correction, keepdims=keepdims))
+
+
+def sum(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ return Array._new(np.sum(x._array, axis=axis, keepdims=keepdims))
+
+
+def var(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ correction: Union[int, float] = 0.0,
+ keepdims: bool = False,
+) -> Array:
+ # Note: the keyword argument correction is different here
+ return Array._new(np.var(x._array, axis=axis, ddof=correction, keepdims=keepdims))
diff --git a/numpy/array_api/_typing.py b/numpy/array_api/_typing.py
new file mode 100644
index 000000000..d530a91ae
--- /dev/null
+++ b/numpy/array_api/_typing.py
@@ -0,0 +1,44 @@
+"""
+This file defines the types for type annotations.
+
+These names aren't part of the module namespace, but they are used in the
+annotations in the function signatures. The functions in the module are only
+valid for inputs that match the given type annotations.
+"""
+
+__all__ = [
+ "Array",
+ "Device",
+ "Dtype",
+ "SupportsDLPack",
+ "SupportsBufferProtocol",
+ "PyCapsule",
+]
+
+from typing import Any, Sequence, Type, Union
+
+from . import (
+ Array,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+)
+
+# This should really be recursive, but that isn't supported yet. See the
+# similar comment in numpy/typing/_array_like.py
+NestedSequence = Sequence[Sequence[Any]]
+
+Device = Any
+Dtype = Type[
+ Union[[int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64]]
+]
+SupportsDLPack = Any
+SupportsBufferProtocol = Any
+PyCapsule = Any
diff --git a/numpy/array_api/_utility_functions.py b/numpy/array_api/_utility_functions.py
new file mode 100644
index 000000000..5ecb4bd9f
--- /dev/null
+++ b/numpy/array_api/_utility_functions.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+from typing import Optional, Tuple, Union
+
+import numpy as np
+
+
+def all(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.all <numpy.all>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.all(x._array, axis=axis, keepdims=keepdims)))
+
+
+def any(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.any <numpy.any>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.any(x._array, axis=axis, keepdims=keepdims)))
diff --git a/numpy/array_api/setup.py b/numpy/array_api/setup.py
new file mode 100644
index 000000000..c8bc29102
--- /dev/null
+++ b/numpy/array_api/setup.py
@@ -0,0 +1,12 @@
+def configuration(parent_package="", top_path=None):
+ from numpy.distutils.misc_util import Configuration
+
+ config = Configuration("array_api", parent_package, top_path)
+ config.add_subpackage("tests")
+ return config
+
+
+if __name__ == "__main__":
+ from numpy.distutils.core import setup
+
+ setup(configuration=configuration)
diff --git a/numpy/array_api/tests/__init__.py b/numpy/array_api/tests/__init__.py
new file mode 100644
index 000000000..536062e38
--- /dev/null
+++ b/numpy/array_api/tests/__init__.py
@@ -0,0 +1,7 @@
+"""
+Tests for the array API namespace.
+
+Note, full compliance with the array API can be tested with the official array API test
+suite https://github.com/data-apis/array-api-tests. This test suite primarily
+focuses on those things that are not tested by the official test suite.
+"""
diff --git a/numpy/array_api/tests/test_array_object.py b/numpy/array_api/tests/test_array_object.py
new file mode 100644
index 000000000..088e09b9f
--- /dev/null
+++ b/numpy/array_api/tests/test_array_object.py
@@ -0,0 +1,269 @@
+from numpy.testing import assert_raises
+import numpy as np
+
+from .. import ones, asarray, result_type
+from .._dtypes import (
+ _all_dtypes,
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _numeric_dtypes,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint64,
+)
+
+
+def test_validate_index():
+ # The indexing tests in the official array API test suite test that the
+ # array object correctly handles the subset of indices that are required
+ # by the spec. But the NumPy array API implementation specifically
+ # disallows any index not required by the spec, via Array._validate_index.
+ # This test focuses on testing that non-valid indices are correctly
+ # rejected. See
+ # https://data-apis.org/array-api/latest/API_specification/indexing.html
+ # and the docstring of Array._validate_index for the exact indexing
+ # behavior that should be allowed. This does not test indices that are
+ # already invalid in NumPy itself because Array will generally just pass
+ # such indices directly to the underlying np.ndarray.
+
+ a = ones((3, 4))
+
+ # Out of bounds slices are not allowed
+ assert_raises(IndexError, lambda: a[:4])
+ assert_raises(IndexError, lambda: a[:-4])
+ assert_raises(IndexError, lambda: a[:3:-1])
+ assert_raises(IndexError, lambda: a[:-5:-1])
+ assert_raises(IndexError, lambda: a[3:])
+ assert_raises(IndexError, lambda: a[-4:])
+ assert_raises(IndexError, lambda: a[3::-1])
+ assert_raises(IndexError, lambda: a[-4::-1])
+
+ assert_raises(IndexError, lambda: a[...,:5])
+ assert_raises(IndexError, lambda: a[...,:-5])
+ assert_raises(IndexError, lambda: a[...,:4:-1])
+ assert_raises(IndexError, lambda: a[...,:-6:-1])
+ assert_raises(IndexError, lambda: a[...,4:])
+ assert_raises(IndexError, lambda: a[...,-5:])
+ assert_raises(IndexError, lambda: a[...,4::-1])
+ assert_raises(IndexError, lambda: a[...,-5::-1])
+
+ # Boolean indices cannot be part of a larger tuple index
+ assert_raises(IndexError, lambda: a[a[:,0]==1,0])
+ assert_raises(IndexError, lambda: a[a[:,0]==1,...])
+ assert_raises(IndexError, lambda: a[..., a[0]==1])
+ assert_raises(IndexError, lambda: a[[True, True, True]])
+ assert_raises(IndexError, lambda: a[(True, True, True),])
+
+ # Integer array indices are not allowed (except for 0-D)
+ idx = asarray([[0, 1]])
+ assert_raises(IndexError, lambda: a[idx])
+ assert_raises(IndexError, lambda: a[idx,])
+ assert_raises(IndexError, lambda: a[[0, 1]])
+ assert_raises(IndexError, lambda: a[(0, 1), (0, 1)])
+ assert_raises(IndexError, lambda: a[[0, 1]])
+ assert_raises(IndexError, lambda: a[np.array([[0, 1]])])
+
+ # np.newaxis is not allowed
+ assert_raises(IndexError, lambda: a[None])
+ assert_raises(IndexError, lambda: a[None, ...])
+ assert_raises(IndexError, lambda: a[..., None])
+
+
+def test_operators():
+ # For every operator, we test that it works for the required type
+ # combinations and raises TypeError otherwise
+ binary_op_dtypes = {
+ "__add__": "numeric",
+ "__and__": "integer_or_boolean",
+ "__eq__": "all",
+ "__floordiv__": "numeric",
+ "__ge__": "numeric",
+ "__gt__": "numeric",
+ "__le__": "numeric",
+ "__lshift__": "integer",
+ "__lt__": "numeric",
+ "__mod__": "numeric",
+ "__mul__": "numeric",
+ "__ne__": "all",
+ "__or__": "integer_or_boolean",
+ "__pow__": "floating",
+ "__rshift__": "integer",
+ "__sub__": "numeric",
+ "__truediv__": "floating",
+ "__xor__": "integer_or_boolean",
+ }
+
+ # Recompute each time because of in-place ops
+ def _array_vals():
+ for d in _integer_dtypes:
+ yield asarray(1, dtype=d)
+ for d in _boolean_dtypes:
+ yield asarray(False, dtype=d)
+ for d in _floating_dtypes:
+ yield asarray(1.0, dtype=d)
+
+ for op, dtypes in binary_op_dtypes.items():
+ ops = [op]
+ if op not in ["__eq__", "__ne__", "__le__", "__ge__", "__lt__", "__gt__"]:
+ rop = "__r" + op[2:]
+ iop = "__i" + op[2:]
+ ops += [rop, iop]
+ for s in [1, 1.0, False]:
+ for _op in ops:
+ for a in _array_vals():
+ # Test array op scalar. From the spec, the following combinations
+ # are supported:
+
+ # - Python bool for a bool array dtype,
+ # - a Python int within the bounds of the given dtype for integer array dtypes,
+ # - a Python int or float for floating-point array dtypes
+
+ # We do not do bounds checking for int scalars, but rather use the default
+ # NumPy behavior for casting in that case.
+
+ if ((dtypes == "all"
+ or dtypes == "numeric" and a.dtype in _numeric_dtypes
+ or dtypes == "integer" and a.dtype in _integer_dtypes
+ or dtypes == "integer_or_boolean" and a.dtype in _integer_or_boolean_dtypes
+ or dtypes == "boolean" and a.dtype in _boolean_dtypes
+ or dtypes == "floating" and a.dtype in _floating_dtypes
+ )
+ # bool is a subtype of int, which is why we avoid
+ # isinstance here.
+ and (a.dtype in _boolean_dtypes and type(s) == bool
+ or a.dtype in _integer_dtypes and type(s) == int
+ or a.dtype in _floating_dtypes and type(s) in [float, int]
+ )):
+ # Only test for no error
+ getattr(a, _op)(s)
+ else:
+ assert_raises(TypeError, lambda: getattr(a, _op)(s))
+
+ # Test array op array.
+ for _op in ops:
+ for x in _array_vals():
+ for y in _array_vals():
+ # See the promotion table in NEP 47 or the array
+ # API spec page on type promotion. Mixed kind
+ # promotion is not defined.
+ if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
+ or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
+ or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
+ or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
+ or x.dtype in _boolean_dtypes and y.dtype not in _boolean_dtypes
+ or y.dtype in _boolean_dtypes and x.dtype not in _boolean_dtypes
+ or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
+ or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
+ ):
+ assert_raises(TypeError, lambda: getattr(x, _op)(y))
+ # Ensure in-place operators only promote to the same dtype as the left operand.
+ elif (
+ _op.startswith("__i")
+ and result_type(x.dtype, y.dtype) != x.dtype
+ ):
+ assert_raises(TypeError, lambda: getattr(x, _op)(y))
+ # Ensure only those dtypes that are required for every operator are allowed.
+ elif (dtypes == "all" and (x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
+ or x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
+ or (dtypes == "numeric" and x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
+ or dtypes == "integer" and x.dtype in _integer_dtypes and y.dtype in _numeric_dtypes
+ or dtypes == "integer_or_boolean" and (x.dtype in _integer_dtypes and y.dtype in _integer_dtypes
+ or x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes)
+ or dtypes == "boolean" and x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
+ or dtypes == "floating" and x.dtype in _floating_dtypes and y.dtype in _floating_dtypes
+ ):
+ getattr(x, _op)(y)
+ else:
+ assert_raises(TypeError, lambda: getattr(x, _op)(y))
+
+ unary_op_dtypes = {
+ "__abs__": "numeric",
+ "__invert__": "integer_or_boolean",
+ "__neg__": "numeric",
+ "__pos__": "numeric",
+ }
+ for op, dtypes in unary_op_dtypes.items():
+ for a in _array_vals():
+ if (
+ dtypes == "numeric"
+ and a.dtype in _numeric_dtypes
+ or dtypes == "integer_or_boolean"
+ and a.dtype in _integer_or_boolean_dtypes
+ ):
+ # Only test for no error
+ getattr(a, op)()
+ else:
+ assert_raises(TypeError, lambda: getattr(a, op)())
+
+ # Finally, matmul() must be tested separately, because it works a bit
+ # different from the other operations.
+ def _matmul_array_vals():
+ for a in _array_vals():
+ yield a
+ for d in _all_dtypes:
+ yield ones((3, 4), dtype=d)
+ yield ones((4, 2), dtype=d)
+ yield ones((4, 4), dtype=d)
+
+ # Scalars always error
+ for _op in ["__matmul__", "__rmatmul__", "__imatmul__"]:
+ for s in [1, 1.0, False]:
+ for a in _matmul_array_vals():
+ if (type(s) in [float, int] and a.dtype in _floating_dtypes
+ or type(s) == int and a.dtype in _integer_dtypes):
+ # Type promotion is valid, but @ is not allowed on 0-D
+ # inputs, so the error is a ValueError
+ assert_raises(ValueError, lambda: getattr(a, _op)(s))
+ else:
+ assert_raises(TypeError, lambda: getattr(a, _op)(s))
+
+ for x in _matmul_array_vals():
+ for y in _matmul_array_vals():
+ if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
+ or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
+ or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
+ or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
+ or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
+ or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
+ or x.dtype in _boolean_dtypes
+ or y.dtype in _boolean_dtypes
+ ):
+ assert_raises(TypeError, lambda: x.__matmul__(y))
+ assert_raises(TypeError, lambda: y.__rmatmul__(x))
+ assert_raises(TypeError, lambda: x.__imatmul__(y))
+ elif x.shape == () or y.shape == () or x.shape[1] != y.shape[0]:
+ assert_raises(ValueError, lambda: x.__matmul__(y))
+ assert_raises(ValueError, lambda: y.__rmatmul__(x))
+ if result_type(x.dtype, y.dtype) != x.dtype:
+ assert_raises(TypeError, lambda: x.__imatmul__(y))
+ else:
+ assert_raises(ValueError, lambda: x.__imatmul__(y))
+ else:
+ x.__matmul__(y)
+ y.__rmatmul__(x)
+ if result_type(x.dtype, y.dtype) != x.dtype:
+ assert_raises(TypeError, lambda: x.__imatmul__(y))
+ elif y.shape[0] != y.shape[1]:
+ # This one fails because x @ y has a different shape from x
+ assert_raises(ValueError, lambda: x.__imatmul__(y))
+ else:
+ x.__imatmul__(y)
+
+
+def test_python_scalar_construtors():
+ a = asarray(False)
+ b = asarray(0)
+ c = asarray(0.0)
+
+ assert bool(a) == bool(b) == bool(c) == False
+ assert int(a) == int(b) == int(c) == 0
+ assert float(a) == float(b) == float(c) == 0.0
+
+ # bool/int/float should only be allowed on 0-D arrays.
+ assert_raises(TypeError, lambda: bool(asarray([False])))
+ assert_raises(TypeError, lambda: int(asarray([0])))
+ assert_raises(TypeError, lambda: float(asarray([0.0])))
diff --git a/numpy/array_api/tests/test_creation_functions.py b/numpy/array_api/tests/test_creation_functions.py
new file mode 100644
index 000000000..3cb8865cd
--- /dev/null
+++ b/numpy/array_api/tests/test_creation_functions.py
@@ -0,0 +1,141 @@
+from numpy.testing import assert_raises
+import numpy as np
+
+from .. import all
+from .._creation_functions import (
+ asarray,
+ arange,
+ empty,
+ empty_like,
+ eye,
+ from_dlpack,
+ full,
+ full_like,
+ linspace,
+ meshgrid,
+ ones,
+ ones_like,
+ zeros,
+ zeros_like,
+)
+from .._array_object import Array
+from .._dtypes import (
+ _all_dtypes,
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _numeric_dtypes,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint64,
+)
+
+
+def test_asarray_errors():
+ # Test various protections against incorrect usage
+ assert_raises(TypeError, lambda: Array([1]))
+ assert_raises(TypeError, lambda: asarray(["a"]))
+ assert_raises(ValueError, lambda: asarray([1.0], dtype=np.float16))
+ assert_raises(OverflowError, lambda: asarray(2**100))
+ # Preferably this would be OverflowError
+ # assert_raises(OverflowError, lambda: asarray([2**100]))
+ assert_raises(TypeError, lambda: asarray([2**100]))
+ asarray([1], device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: asarray([1], device="gpu"))
+
+ assert_raises(ValueError, lambda: asarray([1], dtype=int))
+ assert_raises(ValueError, lambda: asarray([1], dtype="i"))
+
+
+def test_asarray_copy():
+ a = asarray([1])
+ b = asarray(a, copy=True)
+ a[0] = 0
+ assert all(b[0] == 1)
+ assert all(a[0] == 0)
+ # Once copy=False is implemented, replace this with
+ # a = asarray([1])
+ # b = asarray(a, copy=False)
+ # a[0] = 0
+ # assert all(b[0] == 0)
+ assert_raises(NotImplementedError, lambda: asarray(a, copy=False))
+
+
+def test_arange_errors():
+ arange(1, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: arange(1, device="gpu"))
+ assert_raises(ValueError, lambda: arange(1, dtype=int))
+ assert_raises(ValueError, lambda: arange(1, dtype="i"))
+
+
+def test_empty_errors():
+ empty((1,), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: empty((1,), device="gpu"))
+ assert_raises(ValueError, lambda: empty((1,), dtype=int))
+ assert_raises(ValueError, lambda: empty((1,), dtype="i"))
+
+
+def test_empty_like_errors():
+ empty_like(asarray(1), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: empty_like(asarray(1), device="gpu"))
+ assert_raises(ValueError, lambda: empty_like(asarray(1), dtype=int))
+ assert_raises(ValueError, lambda: empty_like(asarray(1), dtype="i"))
+
+
+def test_eye_errors():
+ eye(1, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: eye(1, device="gpu"))
+ assert_raises(ValueError, lambda: eye(1, dtype=int))
+ assert_raises(ValueError, lambda: eye(1, dtype="i"))
+
+
+def test_full_errors():
+ full((1,), 0, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: full((1,), 0, device="gpu"))
+ assert_raises(ValueError, lambda: full((1,), 0, dtype=int))
+ assert_raises(ValueError, lambda: full((1,), 0, dtype="i"))
+
+
+def test_full_like_errors():
+ full_like(asarray(1), 0, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: full_like(asarray(1), 0, device="gpu"))
+ assert_raises(ValueError, lambda: full_like(asarray(1), 0, dtype=int))
+ assert_raises(ValueError, lambda: full_like(asarray(1), 0, dtype="i"))
+
+
+def test_linspace_errors():
+ linspace(0, 1, 10, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: linspace(0, 1, 10, device="gpu"))
+ assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype=float))
+ assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype="f"))
+
+
+def test_ones_errors():
+ ones((1,), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: ones((1,), device="gpu"))
+ assert_raises(ValueError, lambda: ones((1,), dtype=int))
+ assert_raises(ValueError, lambda: ones((1,), dtype="i"))
+
+
+def test_ones_like_errors():
+ ones_like(asarray(1), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: ones_like(asarray(1), device="gpu"))
+ assert_raises(ValueError, lambda: ones_like(asarray(1), dtype=int))
+ assert_raises(ValueError, lambda: ones_like(asarray(1), dtype="i"))
+
+
+def test_zeros_errors():
+ zeros((1,), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: zeros((1,), device="gpu"))
+ assert_raises(ValueError, lambda: zeros((1,), dtype=int))
+ assert_raises(ValueError, lambda: zeros((1,), dtype="i"))
+
+
+def test_zeros_like_errors():
+ zeros_like(asarray(1), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: zeros_like(asarray(1), device="gpu"))
+ assert_raises(ValueError, lambda: zeros_like(asarray(1), dtype=int))
+ assert_raises(ValueError, lambda: zeros_like(asarray(1), dtype="i"))
diff --git a/numpy/array_api/tests/test_elementwise_functions.py b/numpy/array_api/tests/test_elementwise_functions.py
new file mode 100644
index 000000000..a9274aec9
--- /dev/null
+++ b/numpy/array_api/tests/test_elementwise_functions.py
@@ -0,0 +1,111 @@
+from inspect import getfullargspec
+
+from numpy.testing import assert_raises
+
+from .. import asarray, _elementwise_functions
+from .._elementwise_functions import bitwise_left_shift, bitwise_right_shift
+from .._dtypes import (
+ _dtype_categories,
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+)
+
+
+def nargs(func):
+ return len(getfullargspec(func).args)
+
+
+def test_function_types():
+ # Test that every function accepts only the required input types. We only
+ # test the negative cases here (error). The positive cases are tested in
+ # the array API test suite.
+
+ elementwise_function_input_types = {
+ "abs": "numeric",
+ "acos": "floating-point",
+ "acosh": "floating-point",
+ "add": "numeric",
+ "asin": "floating-point",
+ "asinh": "floating-point",
+ "atan": "floating-point",
+ "atan2": "floating-point",
+ "atanh": "floating-point",
+ "bitwise_and": "integer or boolean",
+ "bitwise_invert": "integer or boolean",
+ "bitwise_left_shift": "integer",
+ "bitwise_or": "integer or boolean",
+ "bitwise_right_shift": "integer",
+ "bitwise_xor": "integer or boolean",
+ "ceil": "numeric",
+ "cos": "floating-point",
+ "cosh": "floating-point",
+ "divide": "floating-point",
+ "equal": "all",
+ "exp": "floating-point",
+ "expm1": "floating-point",
+ "floor": "numeric",
+ "floor_divide": "numeric",
+ "greater": "numeric",
+ "greater_equal": "numeric",
+ "isfinite": "numeric",
+ "isinf": "numeric",
+ "isnan": "numeric",
+ "less": "numeric",
+ "less_equal": "numeric",
+ "log": "floating-point",
+ "logaddexp": "floating-point",
+ "log10": "floating-point",
+ "log1p": "floating-point",
+ "log2": "floating-point",
+ "logical_and": "boolean",
+ "logical_not": "boolean",
+ "logical_or": "boolean",
+ "logical_xor": "boolean",
+ "multiply": "numeric",
+ "negative": "numeric",
+ "not_equal": "all",
+ "positive": "numeric",
+ "pow": "floating-point",
+ "remainder": "numeric",
+ "round": "numeric",
+ "sign": "numeric",
+ "sin": "floating-point",
+ "sinh": "floating-point",
+ "sqrt": "floating-point",
+ "square": "numeric",
+ "subtract": "numeric",
+ "tan": "floating-point",
+ "tanh": "floating-point",
+ "trunc": "numeric",
+ }
+
+ def _array_vals():
+ for d in _integer_dtypes:
+ yield asarray(1, dtype=d)
+ for d in _boolean_dtypes:
+ yield asarray(False, dtype=d)
+ for d in _floating_dtypes:
+ yield asarray(1.0, dtype=d)
+
+ for x in _array_vals():
+ for func_name, types in elementwise_function_input_types.items():
+ dtypes = _dtype_categories[types]
+ func = getattr(_elementwise_functions, func_name)
+ if nargs(func) == 2:
+ for y in _array_vals():
+ if x.dtype not in dtypes or y.dtype not in dtypes:
+ assert_raises(TypeError, lambda: func(x, y))
+ else:
+ if x.dtype not in dtypes:
+ assert_raises(TypeError, lambda: func(x))
+
+
+def test_bitwise_shift_error():
+ # bitwise shift functions should raise when the second argument is negative
+ assert_raises(
+ ValueError, lambda: bitwise_left_shift(asarray([1, 1]), asarray([1, -1]))
+ )
+ assert_raises(
+ ValueError, lambda: bitwise_right_shift(asarray([1, 1]), asarray([1, -1]))
+ )
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 759a91d27..06f2a6376 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -3252,7 +3252,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
a.dumps()
Returns the pickle of the array as a string.
- pickle.loads or numpy.loads will convert the string back to an array.
+ pickle.loads will convert the string back to an array.
Parameters
----------
diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py
index 602b1db6e..8773d6c96 100644
--- a/numpy/core/_add_newdocs_scalars.py
+++ b/numpy/core/_add_newdocs_scalars.py
@@ -205,12 +205,12 @@ add_newdoc_for_scalar_type('bytes_', ['string_'],
add_newdoc_for_scalar_type('void', [],
r"""
Either an opaque sequence of bytes, or a structure.
-
+
>>> np.void(b'abcd')
void(b'\x61\x62\x63\x64')
-
+
Structured `void` scalars can only be constructed via extraction from :ref:`structured_arrays`:
-
+
>>> arr = np.array((1, 2), dtype=[('x', np.int8), ('y', np.int8)])
>>> arr[()]
(1, 2) # looks like a tuple, but is `np.void`
@@ -226,20 +226,36 @@ add_newdoc_for_scalar_type('datetime64', [],
>>> np.datetime64(10, 'Y')
numpy.datetime64('1980')
>>> np.datetime64('1980', 'Y')
- numpy.datetime64('1980')
+ numpy.datetime64('1980')
>>> np.datetime64(10, 'D')
numpy.datetime64('1970-01-11')
-
+
See :ref:`arrays.datetime` for more information.
""")
add_newdoc_for_scalar_type('timedelta64', [],
"""
A timedelta stored as a 64-bit integer.
-
+
See :ref:`arrays.datetime` for more information.
""")
+add_newdoc('numpy.core.numerictypes', "integer", ('is_integer',
+ """
+ integer.is_integer() -> bool
+
+ Return ``True`` if the number is finite with integral value.
+
+ .. versionadded:: 1.22
+
+ Examples
+ --------
+ >>> np.int64(-2).is_integer()
+ True
+ >>> np.uint32(5).is_integer()
+ True
+ """))
+
# TODO: work out how to put this on the base class, np.floating
for float_name in ('half', 'single', 'double', 'longdouble'):
add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
@@ -257,3 +273,20 @@ for float_name in ('half', 'single', 'double', 'longdouble'):
>>> np.{ftype}(-.25).as_integer_ratio()
(-1, 4)
""".format(ftype=float_name)))
+
+ add_newdoc('numpy.core.numerictypes', float_name, ('is_integer',
+ f"""
+ {float_name}.is_integer() -> bool
+
+ Return ``True`` if the floating point number is finite with integral
+ value, and ``False`` otherwise.
+
+ .. versionadded:: 1.22
+
+ Examples
+ --------
+ >>> np.{float_name}(-2.0).is_integer()
+ True
+ >>> np.{float_name}(3.2).is_integer()
+ False
+ """))
diff --git a/numpy/core/_asarray.pyi b/numpy/core/_asarray.pyi
index 1928cfe12..fee9b7b6e 100644
--- a/numpy/core/_asarray.pyi
+++ b/numpy/core/_asarray.pyi
@@ -1,14 +1,8 @@
-import sys
-from typing import TypeVar, Union, Iterable, overload
+from typing import TypeVar, Union, Iterable, overload, Literal
from numpy import ndarray
from numpy.typing import ArrayLike, DTypeLike
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_ArrayType = TypeVar("_ArrayType", bound=ndarray)
_Requirements = Literal[
diff --git a/numpy/core/_type_aliases.py b/numpy/core/_type_aliases.py
index 67addef48..3765a0d34 100644
--- a/numpy/core/_type_aliases.py
+++ b/numpy/core/_type_aliases.py
@@ -115,15 +115,6 @@ def _add_aliases():
# add forward, reverse, and string mapping to numarray
sctypeDict[char] = info.type
- # Add deprecated numeric-style type aliases manually, at some point
- # we may want to deprecate the lower case "bytes0" version as well.
- for name in ["Bytes0", "Datetime64", "Str0", "Uint32", "Uint64"]:
- if english_lower(name) not in allTypes:
- # Only one of Uint32 or Uint64, aliases of `np.uintp`, was (and is) defined, note that this
- # is not UInt32/UInt64 (capital i), which is removed.
- continue
- allTypes[name] = allTypes[english_lower(name)]
- sctypeDict[name] = sctypeDict[english_lower(name)]
_add_aliases()
diff --git a/numpy/core/_type_aliases.pyi b/numpy/core/_type_aliases.pyi
index 6a1099cd3..c10d072f9 100644
--- a/numpy/core/_type_aliases.pyi
+++ b/numpy/core/_type_aliases.pyi
@@ -1,13 +1,7 @@
-import sys
-from typing import Dict, Union, Type, List
+from typing import Dict, Union, Type, List, TypedDict
from numpy import generic, signedinteger, unsignedinteger, floating, complexfloating
-if sys.version_info >= (3, 8):
- from typing import TypedDict
-else:
- from typing_extensions import TypedDict
-
class _SCTypes(TypedDict):
int: List[Type[signedinteger]]
uint: List[Type[unsignedinteger]]
diff --git a/numpy/core/_ufunc_config.pyi b/numpy/core/_ufunc_config.pyi
index e90f1c510..9c8cc8ab6 100644
--- a/numpy/core/_ufunc_config.pyi
+++ b/numpy/core/_ufunc_config.pyi
@@ -1,16 +1,10 @@
-import sys
-from typing import Optional, Union, Callable, Any
-
-if sys.version_info >= (3, 8):
- from typing import Literal, Protocol, TypedDict
-else:
- from typing_extensions import Literal, Protocol, TypedDict
+from typing import Optional, Union, Callable, Any, Literal, Protocol, TypedDict
_ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"]
_ErrFunc = Callable[[str, int], Any]
class _SupportsWrite(Protocol):
- def write(self, __msg: str) -> Any: ...
+ def write(self, msg: str, /) -> Any: ...
class _ErrDict(TypedDict):
divide: _ErrKind
diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi
index ac2b6f5a8..df22efed6 100644
--- a/numpy/core/arrayprint.pyi
+++ b/numpy/core/arrayprint.pyi
@@ -1,6 +1,5 @@
-import sys
from types import TracebackType
-from typing import Any, Optional, Callable, Union, Type
+from typing import Any, Optional, Callable, Union, Type, Literal, TypedDict, SupportsIndex
# Using a private class is by no means ideal, but it is simply a consquence
# of a `contextlib.context` returning an instance of aformentioned class
@@ -23,11 +22,6 @@ from numpy import (
)
from numpy.typing import ArrayLike, _CharLike_co, _FloatLike_co
-if sys.version_info > (3, 8):
- from typing import Literal, TypedDict, SupportsIndex
-else:
- from typing_extensions import Literal, TypedDict, SupportsIndex
-
_FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"]
class _FormatDict(TypedDict, total=False):
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 1b6917ebc..4891e8f23 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -489,7 +489,6 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalAnd'),
- TD(O, f='npy_ObjectLogicalAnd', out='?'),
),
'logical_not':
Ufunc(1, 1, None,
@@ -497,7 +496,6 @@ defdict = {
None,
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalNot'),
- TD(O, f='npy_ObjectLogicalNot', out='?'),
),
'logical_or':
Ufunc(2, 1, False_,
@@ -505,13 +503,13 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalOr'),
- TD(O, f='npy_ObjectLogicalOr', out='?'),
),
'logical_xor':
Ufunc(2, 1, False_,
docstrings.get('numpy.core.umath.logical_xor'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
+ # TODO: using obj.logical_xor() seems pretty much useless:
TD(P, f='logical_xor'),
),
'maximum':
diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi
index 2457e8719..52025d502 100644
--- a/numpy/core/einsumfunc.pyi
+++ b/numpy/core/einsumfunc.pyi
@@ -1,5 +1,4 @@
-import sys
-from typing import List, TypeVar, Optional, Any, overload, Union, Tuple, Sequence
+from typing import List, TypeVar, Optional, Any, overload, Union, Tuple, Sequence, Literal
from numpy import (
ndarray,
@@ -26,11 +25,6 @@ from numpy.typing import (
_DTypeLikeComplex_co,
)
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_ArrayType = TypeVar(
"_ArrayType",
bound=ndarray[Any, dtype[Union[bool_, number[Any]]]],
@@ -52,7 +46,8 @@ __all__: List[str]
# Something like `is_scalar = bool(__subscripts.partition("->")[-1])`
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeBool_co,
out: None = ...,
dtype: Optional[_DTypeLikeBool] = ...,
@@ -62,7 +57,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeUInt_co,
out: None = ...,
dtype: Optional[_DTypeLikeUInt] = ...,
@@ -72,7 +68,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeInt_co,
out: None = ...,
dtype: Optional[_DTypeLikeInt] = ...,
@@ -82,7 +79,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeFloat_co,
out: None = ...,
dtype: Optional[_DTypeLikeFloat] = ...,
@@ -92,7 +90,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeComplex_co,
out: None = ...,
dtype: Optional[_DTypeLikeComplex] = ...,
@@ -102,7 +101,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: Any,
casting: _CastingUnsafe,
dtype: Optional[_DTypeLikeComplex_co] = ...,
@@ -112,7 +112,8 @@ def einsum(
) -> Any: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeComplex_co,
out: _ArrayType,
dtype: Optional[_DTypeLikeComplex_co] = ...,
@@ -122,7 +123,8 @@ def einsum(
) -> _ArrayType: ...
@overload
def einsum(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: Any,
out: _ArrayType,
casting: _CastingUnsafe,
@@ -136,7 +138,8 @@ def einsum(
# NOTE: In practice the list consists of a `str` (first element)
# and a variable number of integer tuples.
def einsum_path(
- __subscripts: str,
+ subscripts: str,
+ /,
*operands: _ArrayLikeComplex_co,
optimize: _OptimizeKind = ...,
) -> Tuple[List[Any], str]: ...
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 764377bc9..5ecb1e666 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -3320,18 +3320,15 @@ def around(a, decimals=0, out=None):
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
- .. [2] "How Futile are Mindless Assessments of
- Roundoff in Floating-Point Computation?", William Kahan,
- https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
- array([0., 2.])
+ array([0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
- array([0.4, 1.6])
+ array([0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
- array([0., 2., 2., 4., 4.])
+ array([0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi
index 45057e4b1..3cbe1d5c5 100644
--- a/numpy/core/fromnumeric.pyi
+++ b/numpy/core/fromnumeric.pyi
@@ -1,6 +1,5 @@
-import sys
import datetime as dt
-from typing import Optional, Union, Sequence, Tuple, Any, overload, TypeVar
+from typing import Optional, Union, Sequence, Tuple, Any, overload, TypeVar, Literal
from numpy import (
ndarray,
@@ -26,11 +25,6 @@ from numpy.typing import (
_NumberLike_co,
)
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
# Various annotations for scalars
# While dt.datetime and dt.timedelta are not technically part of NumPy,
diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi
index b5d6ca6ab..c35629aa7 100644
--- a/numpy/core/function_base.pyi
+++ b/numpy/core/function_base.pyi
@@ -1,14 +1,8 @@
-import sys
-from typing import overload, Tuple, Union, Sequence, Any
+from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal
from numpy import ndarray
from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co
-if sys.version_info >= (3, 8):
- from typing import SupportsIndex, Literal
-else:
- from typing_extensions import SupportsIndex, Literal
-
# TODO: wait for support for recursive types
_ArrayLikeNested = Sequence[Sequence[Any]]
_ArrayLikeNumber = Union[
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index bc1fad72f..e975b0105 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -18,6 +18,7 @@
* NPY_CPU_ARCEL
* NPY_CPU_ARCEB
* NPY_CPU_RISCV64
+ * NPY_CPU_LOONGARCH
* NPY_CPU_WASM
*/
#ifndef _NPY_CPUARCH_H_
@@ -103,6 +104,8 @@
#define NPY_CPU_ARCEB
#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64
#define NPY_CPU_RISCV64
+#elif defined(__loongarch__)
+ #define NPY_CPU_LOONGARCH
#elif defined(__EMSCRIPTEN__)
/* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */
#define NPY_CPU_WASM
diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h
index aa367a002..620595bec 100644
--- a/numpy/core/include/numpy/npy_endian.h
+++ b/numpy/core/include/numpy/npy_endian.h
@@ -49,6 +49,7 @@
|| defined(NPY_CPU_PPC64LE) \
|| defined(NPY_CPU_ARCEL) \
|| defined(NPY_CPU_RISCV64) \
+ || defined(NPY_CPU_LOONGARCH) \
|| defined(NPY_CPU_WASM)
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
#elif defined(NPY_CPU_PPC) \
diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h
index f32e298f0..e9a6a30d2 100644
--- a/numpy/core/include/numpy/npy_math.h
+++ b/numpy/core/include/numpy/npy_math.h
@@ -391,7 +391,7 @@ NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0);
union { \
ctype z; \
type a[2]; \
- } z1;; \
+ } z1; \
\
z1.a[0] = (x); \
z1.a[1] = (y); \
diff --git a/numpy/core/multiarray.pyi b/numpy/core/multiarray.pyi
index 7ae831b53..97e9c3498 100644
--- a/numpy/core/multiarray.pyi
+++ b/numpy/core/multiarray.pyi
@@ -1,12 +1,11 @@
# TODO: Sort out any and all missing functions in this namespace
import os
-import sys
import datetime as dt
from typing import (
+ Literal as L,
Any,
Callable,
- IO,
Iterable,
Optional,
overload,
@@ -16,6 +15,10 @@ from typing import (
Union,
Sequence,
Tuple,
+ SupportsIndex,
+ final,
+ Final,
+ Protocol,
)
from numpy import (
@@ -47,6 +50,7 @@ from numpy import (
_CastingKind,
_ModeKind,
_SupportsBuffer,
+ _IOProtocol,
)
from numpy.typing import (
@@ -78,15 +82,6 @@ from numpy.typing import (
_TD64Like_co,
)
-from numpy.array_api import (
- _CopyMode
-)
-
-if sys.version_info >= (3, 8):
- from typing import SupportsIndex, final, Final, Literal as L
-else:
- from typing_extensions import SupportsIndex, final, Final, Literal as L
-
_SCT = TypeVar("_SCT", bound=generic)
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
@@ -312,7 +307,8 @@ def ravel_multi_index(
@overload
def concatenate( # type: ignore[misc]
- __arrays: _ArrayLike[_SCT],
+ arrays: _ArrayLike[_SCT],
+ /,
axis: Optional[SupportsIndex] = ...,
out: None = ...,
*,
@@ -321,7 +317,8 @@ def concatenate( # type: ignore[misc]
) -> NDArray[_SCT]: ...
@overload
def concatenate( # type: ignore[misc]
- __arrays: ArrayLike,
+ arrays: ArrayLike,
+ /,
axis: Optional[SupportsIndex] = ...,
out: None = ...,
*,
@@ -330,7 +327,8 @@ def concatenate( # type: ignore[misc]
) -> NDArray[Any]: ...
@overload
def concatenate( # type: ignore[misc]
- __arrays: ArrayLike,
+ arrays: ArrayLike,
+ /,
axis: Optional[SupportsIndex] = ...,
out: None = ...,
*,
@@ -339,7 +337,8 @@ def concatenate( # type: ignore[misc]
) -> NDArray[_SCT]: ...
@overload
def concatenate( # type: ignore[misc]
- __arrays: ArrayLike,
+ arrays: ArrayLike,
+ /,
axis: Optional[SupportsIndex] = ...,
out: None = ...,
*,
@@ -348,7 +347,8 @@ def concatenate( # type: ignore[misc]
) -> NDArray[Any]: ...
@overload
def concatenate(
- __arrays: ArrayLike,
+ arrays: ArrayLike,
+ /,
axis: Optional[SupportsIndex] = ...,
out: _ArrayType = ...,
*,
@@ -357,19 +357,22 @@ def concatenate(
) -> _ArrayType: ...
def inner(
- __a: ArrayLike,
- __b: ArrayLike,
+ a: ArrayLike,
+ b: ArrayLike,
+ /,
) -> Any: ...
@overload
def where(
- __condition: ArrayLike,
+ condition: ArrayLike,
+ /,
) -> Tuple[NDArray[intp], ...]: ...
@overload
def where(
- __condition: ArrayLike,
- __x: ArrayLike,
- __y: ArrayLike,
+ condition: ArrayLike,
+ x: ArrayLike,
+ y: ArrayLike,
+ /,
) -> NDArray[Any]: ...
def lexsort(
@@ -384,7 +387,7 @@ def can_cast(
) -> bool: ...
def min_scalar_type(
- __a: ArrayLike,
+ a: ArrayLike, /,
) -> dtype[Any]: ...
def result_type(
@@ -397,24 +400,25 @@ def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ...
def dot(a: ArrayLike, b: ArrayLike, out: _ArrayType) -> _ArrayType: ...
@overload
-def vdot(__a: _ArrayLikeBool_co, __b: _ArrayLikeBool_co) -> bool_: ... # type: ignore[misc]
+def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> bool_: ... # type: ignore[misc]
@overload
-def vdot(__a: _ArrayLikeUInt_co, __b: _ArrayLikeUInt_co) -> unsignedinteger[Any]: ... # type: ignore[misc]
+def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger[Any]: ... # type: ignore[misc]
@overload
-def vdot(__a: _ArrayLikeInt_co, __b: _ArrayLikeInt_co) -> signedinteger[Any]: ... # type: ignore[misc]
+def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger[Any]: ... # type: ignore[misc]
@overload
-def vdot(__a: _ArrayLikeFloat_co, __b: _ArrayLikeFloat_co) -> floating[Any]: ... # type: ignore[misc]
+def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating[Any]: ... # type: ignore[misc]
@overload
-def vdot(__a: _ArrayLikeComplex_co, __b: _ArrayLikeComplex_co) -> complexfloating[Any, Any]: ... # type: ignore[misc]
+def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating[Any, Any]: ... # type: ignore[misc]
@overload
-def vdot(__a: _ArrayLikeTD64_co, __b: _ArrayLikeTD64_co) -> timedelta64: ...
+def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ...
@overload
-def vdot(__a: _ArrayLikeObject_co, __b: Any) -> Any: ...
+def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ...
@overload
-def vdot(__a: Any, __b: _ArrayLikeObject_co) -> Any: ...
+def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ...
def bincount(
- __x: ArrayLike,
+ x: ArrayLike,
+ /,
weights: Optional[ArrayLike] = ...,
minlength: SupportsIndex = ...,
) -> NDArray[intp]: ...
@@ -433,27 +437,31 @@ def putmask(
) -> None: ...
def packbits(
- __a: _ArrayLikeInt_co,
+ a: _ArrayLikeInt_co,
+ /,
axis: Optional[SupportsIndex] = ...,
bitorder: L["big", "little"] = ...,
) -> NDArray[uint8]: ...
def unpackbits(
- __a: _ArrayLike[uint8],
+ a: _ArrayLike[uint8],
+ /,
axis: Optional[SupportsIndex] = ...,
count: Optional[SupportsIndex] = ...,
bitorder: L["big", "little"] = ...,
) -> NDArray[uint8]: ...
def shares_memory(
- __a: object,
- __b: object,
+ a: object,
+ b: object,
+ /,
max_work: Optional[int] = ...,
) -> bool: ...
def may_share_memory(
- __a: object,
- __b: object,
+ a: object,
+ b: object,
+ /,
max_work: Optional[int] = ...,
) -> bool: ...
@@ -592,7 +600,7 @@ def asfortranarray(
# In practice `List[Any]` is list with an int, int and a valid
# `np.seterrcall()` object
def geterrobj() -> List[Any]: ...
-def seterrobj(__errobj: List[Any]) -> None: ...
+def seterrobj(errobj: List[Any], /) -> None: ...
def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]: ...
@@ -626,7 +634,7 @@ def fromstring(
) -> NDArray[Any]: ...
def frompyfunc(
- __func: Callable[..., Any],
+ func: Callable[..., Any], /,
nin: SupportsIndex,
nout: SupportsIndex,
*,
@@ -635,7 +643,7 @@ def frompyfunc(
@overload
def fromfile(
- file: str | bytes | os.PathLike[Any] | IO[Any],
+ file: str | bytes | os.PathLike[Any] | _IOProtocol,
dtype: None = ...,
count: SupportsIndex = ...,
sep: str = ...,
@@ -645,7 +653,7 @@ def fromfile(
) -> NDArray[float64]: ...
@overload
def fromfile(
- file: str | bytes | os.PathLike[Any] | IO[Any],
+ file: str | bytes | os.PathLike[Any] | _IOProtocol,
dtype: _DTypeLike[_SCT],
count: SupportsIndex = ...,
sep: str = ...,
@@ -655,7 +663,7 @@ def fromfile(
) -> NDArray[_SCT]: ...
@overload
def fromfile(
- file: str | bytes | os.PathLike[Any] | IO[Any],
+ file: str | bytes | os.PathLike[Any] | _IOProtocol,
dtype: DTypeLike,
count: SupportsIndex = ...,
sep: str = ...,
@@ -711,8 +719,8 @@ def frombuffer(
@overload
def arange( # type: ignore[misc]
- __stop: _IntLike_co,
- *,
+ stop: _IntLike_co,
+ /, *,
dtype: None = ...,
like: ArrayLike = ...,
) -> NDArray[signedinteger[Any]]: ...
@@ -727,8 +735,8 @@ def arange( # type: ignore[misc]
) -> NDArray[signedinteger[Any]]: ...
@overload
def arange( # type: ignore[misc]
- __stop: _FloatLike_co,
- *,
+ stop: _FloatLike_co,
+ /, *,
dtype: None = ...,
like: ArrayLike = ...,
) -> NDArray[floating[Any]]: ...
@@ -743,8 +751,8 @@ def arange( # type: ignore[misc]
) -> NDArray[floating[Any]]: ...
@overload
def arange(
- __stop: _TD64Like_co,
- *,
+ stop: _TD64Like_co,
+ /, *,
dtype: None = ...,
like: ArrayLike = ...,
) -> NDArray[timedelta64]: ...
@@ -768,8 +776,8 @@ def arange( # both start and stop must always be specified for datetime64
) -> NDArray[datetime64]: ...
@overload
def arange(
- __stop: Any,
- *,
+ stop: Any,
+ /, *,
dtype: _DTypeLike[_SCT],
like: ArrayLike = ...,
) -> NDArray[_SCT]: ...
@@ -784,7 +792,7 @@ def arange(
) -> NDArray[_SCT]: ...
@overload
def arange(
- __stop: Any,
+ stop: Any, /,
*,
dtype: DTypeLike,
like: ArrayLike = ...,
@@ -800,7 +808,7 @@ def arange(
) -> NDArray[Any]: ...
def datetime_data(
- __dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64],
+ dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /,
) -> Tuple[str, int]: ...
# The datetime functions perform unsafe casts to `datetime64[D]`,
@@ -951,7 +959,7 @@ def compare_chararrays(
rstrip: bool,
) -> NDArray[bool_]: ...
-def add_docstring(__obj: Callable[..., Any], __docstring: str) -> None: ...
+def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ...
_GetItemKeys = L[
"C", "CONTIGUOUS", "C_CONTIGUOUS",
diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi
index 3c2b553ec..54ab4b7c8 100644
--- a/numpy/core/numeric.pyi
+++ b/numpy/core/numeric.pyi
@@ -1,4 +1,3 @@
-import sys
from typing import (
Any,
Optional,
@@ -10,16 +9,12 @@ from typing import (
overload,
TypeVar,
Iterable,
+ Literal,
)
from numpy import ndarray, generic, dtype, bool_, signedinteger, _OrderKACF, _OrderCF
from numpy.typing import ArrayLike, DTypeLike, _ShapeLike
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_T = TypeVar("_T")
_ArrayType = TypeVar("_ArrayType", bound=ndarray)
diff --git a/numpy/core/numerictypes.pyi b/numpy/core/numerictypes.pyi
index e99e1c500..1d3ff773b 100644
--- a/numpy/core/numerictypes.pyi
+++ b/numpy/core/numerictypes.pyi
@@ -1,6 +1,7 @@
import sys
import types
from typing import (
+ Literal as L,
Type,
Union,
Tuple,
@@ -10,6 +11,8 @@ from typing import (
Dict,
List,
Iterable,
+ Protocol,
+ TypedDict,
)
from numpy import (
@@ -49,11 +52,6 @@ from numpy.core._type_aliases import (
from numpy.typing import DTypeLike, ArrayLike, _SupportsDType
-if sys.version_info >= (3, 8):
- from typing import Literal as L, Protocol, TypedDict
-else:
- from typing_extensions import Literal as L, Protocol, TypedDict
-
_T = TypeVar("_T")
_SCT = TypeVar("_SCT", bound=generic)
@@ -86,8 +84,8 @@ class _typedict(Dict[Type[generic], _T]):
if sys.version_info >= (3, 10):
_TypeTuple = Union[
Type[Any],
- types.Union,
- Tuple[Union[Type[Any], types.Union, Tuple[Any, ...]], ...],
+ types.UnionType,
+ Tuple[Union[Type[Any], types.UnionType, Tuple[Any, ...]], ...],
]
else:
_TypeTuple = Union[
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index 70085d896..e1fdd06f2 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -126,18 +126,6 @@ def set_module(module):
return decorator
-
-# Call textwrap.dedent here instead of in the function so as to avoid
-# calling dedent multiple times on the same text
-_wrapped_func_source = textwrap.dedent("""
- @functools.wraps(implementation)
- def {name}(*args, **kwargs):
- relevant_args = dispatcher(*args, **kwargs)
- return implement_array_function(
- implementation, {name}, relevant_args, args, kwargs)
- """)
-
-
def array_function_dispatch(dispatcher, module=None, verify=True,
docs_from_dispatcher=False):
"""Decorator for adding dispatch with the __array_function__ protocol.
@@ -187,25 +175,15 @@ def array_function_dispatch(dispatcher, module=None, verify=True,
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
- # Equivalently, we could define this function directly instead of using
- # exec. This version has the advantage of giving the helper function a
- # more interpettable name. Otherwise, the original function does not
- # show up at all in many cases, e.g., if it's written in C or if the
- # dispatcher gets an invalid keyword argument.
- source = _wrapped_func_source.format(name=implementation.__name__)
-
- source_object = compile(
- source, filename='<__array_function__ internals>', mode='exec')
- scope = {
- 'implementation': implementation,
- 'dispatcher': dispatcher,
- 'functools': functools,
- 'implement_array_function': implement_array_function,
- }
- exec(source_object, scope)
-
- public_api = scope[implementation.__name__]
+ @functools.wraps(implementation)
+ def public_api(*args, **kwargs):
+ relevant_args = dispatcher(*args, **kwargs)
+ return implement_array_function(
+ implementation, public_api, relevant_args, args, kwargs)
+ public_api.__code__ = public_api.__code__.replace(
+ co_name=implementation.__name__,
+ co_filename='<__array_function__ internals>')
if module is not None:
public_api.__module__ = module
diff --git a/numpy/core/records.py b/numpy/core/records.py
index b3474ad01..fd5f1ab39 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -664,17 +664,17 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None,
if nn > 0:
shape = shape[:-nn]
+ _array = recarray(shape, descr)
+
+ # populate the record array (makes a copy)
for k, obj in enumerate(arrayList):
nn = descr[k].ndim
testshape = obj.shape[:obj.ndim - nn]
+ name = _names[k]
if testshape != shape:
- raise ValueError("array-shape mismatch in array %d" % k)
+ raise ValueError(f'array-shape mismatch in array {k} ("{name}")')
- _array = recarray(shape, descr)
-
- # populate the record array (makes a copy)
- for i in range(len(arrayList)):
- _array[_names[i]] = arrayList[i]
+ _array[name] = obj
return _array
@@ -939,7 +939,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
- raise IOError("Didn't read as many bytes as expected")
+ raise OSError("Didn't read as many bytes as expected")
return _array
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index c20320910..ba7d83787 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -381,9 +381,9 @@ def check_mathlib(config_cmd):
mathlibs = libs
break
else:
- raise EnvironmentError("math library missing; rerun "
- "setup.py after setting the "
- "MATHLIB env variable")
+ raise RuntimeError(
+ "math library missing; rerun setup.py after setting the "
+ "MATHLIB env variable")
return mathlibs
def visibility_define(config):
diff --git a/numpy/core/shape_base.pyi b/numpy/core/shape_base.pyi
index 9aaeceed7..d7914697d 100644
--- a/numpy/core/shape_base.pyi
+++ b/numpy/core/shape_base.pyi
@@ -1,14 +1,8 @@
-import sys
-from typing import TypeVar, overload, List, Sequence, Any
+from typing import TypeVar, overload, List, Sequence, Any, SupportsIndex
from numpy import generic, dtype
from numpy.typing import ArrayLike, NDArray, _NestedSequence, _SupportsArray
-if sys.version_info >= (3, 8):
- from typing import SupportsIndex
-else:
- from typing_extensions import SupportsIndex
-
_SCT = TypeVar("_SCT", bound=generic)
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
@@ -17,23 +11,23 @@ _ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]]
__all__: List[str]
@overload
-def atleast_1d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
@overload
-def atleast_1d(__arys: ArrayLike) -> NDArray[Any]: ...
+def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ...
@overload
def atleast_1d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
@overload
-def atleast_2d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
@overload
-def atleast_2d(__arys: ArrayLike) -> NDArray[Any]: ...
+def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ...
@overload
def atleast_2d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
@overload
-def atleast_3d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
@overload
-def atleast_3d(__arys: ArrayLike) -> NDArray[Any]: ...
+def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ...
@overload
def atleast_3d(*arys: ArrayLike) -> List[NDArray[Any]]: ...
diff --git a/numpy/core/src/common/npy_cpu_dispatch.h b/numpy/core/src/common/npy_cpu_dispatch.h
index c8411104a..09e00badf 100644
--- a/numpy/core/src/common/npy_cpu_dispatch.h
+++ b/numpy/core/src/common/npy_cpu_dispatch.h
@@ -57,7 +57,7 @@
* avoid linking duplications due to the nature of the dispatch-able sources.
*
* Example:
- * @targets baseline avx avx512_skx vsx3 asimdhp // configration statments
+ * @targets baseline avx avx512_skx vsx3 asimdhp // configuration statements
*
* void NPY_CPU_DISPATCH_CURFX(dispatch_me)(const int *src, int *dst)
* {
@@ -180,7 +180,7 @@
* Macro NPY_CPU_DISPATCH_DECLARE_XB(LEFT, ...)
*
* Same as `NPY_CPU_DISPATCH_DECLARE` but exclude the baseline declaration even
- * if it was provided within the configration statments.
+ * if it was provided within the configuration statements.
*/
#define NPY_CPU_DISPATCH_DECLARE_XB(...) \
NPY__CPU_DISPATCH_CALL(NPY_CPU_DISPATCH_DECLARE_CHK_, NPY_CPU_DISPATCH_DECLARE_CB_, __VA_ARGS__)
@@ -196,7 +196,7 @@
* Example:
* Assume we have a dispatch-able source exporting the following function:
*
- * @targets baseline avx2 avx512_skx // configration statments
+ * @targets baseline avx2 avx512_skx // configration statements
*
* void NPY_CPU_DISPATCH_CURFX(dispatch_me)(const int *src, int *dst)
* {
@@ -238,7 +238,7 @@
* Macro NPY_CPU_DISPATCH_CALL_XB(LEFT, ...)
*
* Same as `NPY_CPU_DISPATCH_DECLARE` but exclude the baseline declaration even
- * if it was provided within the configration statements.
+ * if it was provided within the configuration statements.
* Returns void.
*/
#define NPY_CPU_DISPATCH_CALL_XB_CB_(TESTED_FEATURES, TARGET_NAME, LEFT, ...) \
diff --git a/numpy/core/src/common/simd/avx2/arithmetic.h b/numpy/core/src/common/simd/avx2/arithmetic.h
index e1b170863..ad9688338 100644
--- a/numpy/core/src/common/simd/avx2/arithmetic.h
+++ b/numpy/core/src/common/simd/avx2/arithmetic.h
@@ -284,7 +284,7 @@ NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a)
{
__m256i s0 = _mm256_hadd_epi32(a, a);
s0 = _mm256_hadd_epi32(s0, s0);
- __m128i s1 = _mm256_extracti128_si256(s0, 1);;
+ __m128i s1 = _mm256_extracti128_si256(s0, 1);
s1 = _mm_add_epi32(_mm256_castsi256_si128(s0), s1);
return _mm_cvtsi128_si32(s1);
}
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index 887deff53..e74056736 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -3,11 +3,6 @@
#include "structmember.h"
#include <pymem.h>
-/* public api in 3.7 */
-#if PY_VERSION_HEX < 0x03070000
-#define PyTraceMalloc_Track _PyTraceMalloc_Track
-#define PyTraceMalloc_Untrack _PyTraceMalloc_Untrack
-#endif
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index 29a2bb0e8..2ad8d6d0e 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -61,7 +61,7 @@ npy_fallocate(npy_intp nbytes, FILE * fp)
* early exit on no space, other errors will also get found during fwrite
*/
if (r == -1 && errno == ENOSPC) {
- PyErr_Format(PyExc_IOError, "Not enough free space to write "
+ PyErr_Format(PyExc_OSError, "Not enough free space to write "
"%"NPY_INTP_FMT" bytes", nbytes);
return -1;
}
@@ -138,7 +138,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
if (n3 == 0) {
/* binary data */
if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_LIST_PICKLE)) {
- PyErr_SetString(PyExc_IOError,
+ PyErr_SetString(PyExc_OSError,
"cannot write object arrays to a file in binary mode");
return -1;
}
@@ -182,7 +182,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
#endif
NPY_END_ALLOW_THREADS;
if (n < size) {
- PyErr_Format(PyExc_IOError,
+ PyErr_Format(PyExc_OSError,
"%ld requested and %ld written",
(long) size, (long) n);
return -1;
@@ -198,7 +198,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
(size_t) PyArray_DESCR(self)->elsize,
1, fp) < 1) {
NPY_END_THREADS;
- PyErr_Format(PyExc_IOError,
+ PyErr_Format(PyExc_OSError,
"problem writing element %" NPY_INTP_FMT
" to file", it->index);
Py_DECREF(it);
@@ -266,7 +266,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
NPY_END_ALLOW_THREADS;
Py_DECREF(byteobj);
if (n < n2) {
- PyErr_Format(PyExc_IOError,
+ PyErr_Format(PyExc_OSError,
"problem writing element %" NPY_INTP_FMT
" to file", it->index);
Py_DECREF(strobj);
@@ -276,7 +276,7 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
/* write separator for all but last one */
if (it->index != it->size-1) {
if (fwrite(sep, 1, n3, fp) < n3) {
- PyErr_Format(PyExc_IOError,
+ PyErr_Format(PyExc_OSError,
"problem writing separator to file");
Py_DECREF(strobj);
Py_DECREF(it);
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index e3b25d076..45b03a6f3 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -449,7 +449,7 @@ PyArray_GetCastSafety(
/**
* Check whether a cast is safe, see also `PyArray_GetCastSafety` for
- * a similiar function. Unlike GetCastSafety, this function checks the
+ * a similar function. Unlike GetCastSafety, this function checks the
* `castingimpl->casting` when available. This allows for two things:
*
* 1. It avoids calling `resolve_descriptors` in some cases.
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 1449ddcef..deab7d2a1 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -3340,7 +3340,7 @@ array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, npy_intp num, size_t *nrea
fail = 1;
}
if (fail) {
- PyErr_SetString(PyExc_IOError,
+ PyErr_SetString(PyExc_OSError,
"could not seek in file");
return NULL;
}
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 50964dab8..90453e38f 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -1723,22 +1723,6 @@ _convert_from_str(PyObject *obj, int align)
goto fail;
}
- /* Check for a deprecated Numeric-style typecode */
- /* `Uint` has deliberately weird uppercasing */
- char *dep_tps[] = {"Bytes", "Datetime64", "Str", "Uint"};
- int ndep_tps = sizeof(dep_tps) / sizeof(dep_tps[0]);
- for (int i = 0; i < ndep_tps; ++i) {
- char *dep_tp = dep_tps[i];
-
- if (strncmp(type, dep_tp, strlen(dep_tp)) == 0) {
- /* Deprecated 2020-06-09, NumPy 1.20 */
- if (DEPRECATE("Numeric-style type codes are "
- "deprecated and will result in "
- "an error in the future.") < 0) {
- goto fail;
- }
- }
- }
/*
* Probably only ever dispatches to `_convert_from_type`, but who
* knows what users are injecting into `np.typeDict`.
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index e533e4932..e38873746 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -819,6 +819,10 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop *
# define _CONVERT_FN(x) npy_floatbits_to_halfbits(x)
# elif @is_double1@
# define _CONVERT_FN(x) npy_doublebits_to_halfbits(x)
+# elif @is_half1@
+# define _CONVERT_FN(x) (x)
+# elif @is_bool1@
+# define _CONVERT_FN(x) npy_float_to_half((float)(x!=0))
# else
# define _CONVERT_FN(x) npy_float_to_half((float)x)
# endif
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 2ca642d76..d33c7060b 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -2284,7 +2284,7 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
return NULL;
}
if (npy_fseek(fp, offset, SEEK_CUR) != 0) {
- PyErr_SetFromErrno(PyExc_IOError);
+ PyErr_SetFromErrno(PyExc_OSError);
goto cleanup;
}
if (type == NULL) {
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 40f736125..740ec8cc2 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -1908,6 +1908,39 @@ error:
}
/**end repeat**/
+/**begin repeat
+ * #name = half, float, double, longdouble#
+ * #Name = Half, Float, Double, LongDouble#
+ * #is_half = 1,0,0,0#
+ * #c = f, f, , l#
+ */
+static PyObject *
+@name@_is_integer(PyObject *self)
+{
+#if @is_half@
+ npy_double val = npy_half_to_double(PyArrayScalar_VAL(self, @Name@));
+#else
+ npy_@name@ val = PyArrayScalar_VAL(self, @Name@);
+#endif
+ PyObject *ret;
+
+ if (npy_isnan(val)) {
+ Py_RETURN_FALSE;
+ }
+ if (!npy_isfinite(val)) {
+ Py_RETURN_FALSE;
+ }
+
+ ret = (npy_floor@c@(val) == val) ? Py_True : Py_False;
+ Py_INCREF(ret);
+ return ret;
+}
+/**end repeat**/
+
+static PyObject *
+integer_is_integer(PyObject *self) {
+ Py_RETURN_TRUE;
+}
/*
* need to fill in doc-strings for these methods on import -- copy from
@@ -2167,7 +2200,7 @@ static PyMethodDef @name@type_methods[] = {
/**end repeat**/
/**begin repeat
- * #name = integer,floating, complexfloating#
+ * #name = floating, complexfloating#
*/
static PyMethodDef @name@type_methods[] = {
/* Hook for the round() builtin */
@@ -2178,6 +2211,17 @@ static PyMethodDef @name@type_methods[] = {
};
/**end repeat**/
+static PyMethodDef integertype_methods[] = {
+ /* Hook for the round() builtin */
+ {"__round__",
+ (PyCFunction)integertype_dunder_round,
+ METH_VARARGS | METH_KEYWORDS, NULL},
+ {"is_integer",
+ (PyCFunction)integer_is_integer,
+ METH_NOARGS, NULL},
+ {NULL, NULL, 0, NULL} /* sentinel */
+};
+
/**begin repeat
* #name = half,float,double,longdouble#
*/
@@ -2185,6 +2229,9 @@ static PyMethodDef @name@type_methods[] = {
{"as_integer_ratio",
(PyCFunction)@name@_as_integer_ratio,
METH_NOARGS, NULL},
+ {"is_integer",
+ (PyCFunction)@name@_is_integer,
+ METH_NOARGS, NULL},
{NULL, NULL, 0, NULL}
};
/**end repeat**/
diff --git a/numpy/core/src/umath/_scaled_float_dtype.c b/numpy/core/src/umath/_scaled_float_dtype.c
index 599774cce..cbea378f0 100644
--- a/numpy/core/src/umath/_scaled_float_dtype.c
+++ b/numpy/core/src/umath/_scaled_float_dtype.c
@@ -464,9 +464,6 @@ init_casts(void)
* 2. Addition, which needs to use the common instance, and runs into
* cast safety subtleties since we will implement it without an additional
* cast.
- *
- * NOTE: When first writing this, promotion did not exist for new-style loops,
- * if it exists, we could use promotion to implement double * sfloat.
*/
static int
multiply_sfloats(PyArrayMethod_Context *NPY_UNUSED(context),
@@ -591,7 +588,8 @@ add_sfloats_resolve_descriptors(
static int
-add_loop(const char *ufunc_name, PyBoundArrayMethodObject *bmeth)
+add_loop(const char *ufunc_name,
+ PyArray_DTypeMeta *dtypes[3], PyObject *meth_or_promoter)
{
PyObject *mod = PyImport_ImportModule("numpy");
if (mod == NULL) {
@@ -605,13 +603,12 @@ add_loop(const char *ufunc_name, PyBoundArrayMethodObject *bmeth)
"numpy.%s was not a ufunc!", ufunc_name);
return -1;
}
- PyObject *dtype_tup = PyArray_TupleFromItems(
- 3, (PyObject **)bmeth->dtypes, 0);
+ PyObject *dtype_tup = PyArray_TupleFromItems(3, (PyObject **)dtypes, 1);
if (dtype_tup == NULL) {
Py_DECREF(ufunc);
return -1;
}
- PyObject *info = PyTuple_Pack(2, dtype_tup, bmeth->method);
+ PyObject *info = PyTuple_Pack(2, dtype_tup, meth_or_promoter);
Py_DECREF(dtype_tup);
if (info == NULL) {
Py_DECREF(ufunc);
@@ -624,6 +621,28 @@ add_loop(const char *ufunc_name, PyBoundArrayMethodObject *bmeth)
}
+
+/*
+ * We add some very basic promoters to allow multiplying normal and scaled
+ */
+static int
+promote_to_sfloat(PyUFuncObject *NPY_UNUSED(ufunc),
+ PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]),
+ PyArray_DTypeMeta *const signature[3],
+ PyArray_DTypeMeta *new_dtypes[3])
+{
+ for (int i = 0; i < 3; i++) {
+ PyArray_DTypeMeta *new = &PyArray_SFloatDType;
+ if (signature[i] != NULL) {
+ new = signature[i];
+ }
+ Py_INCREF(new);
+ new_dtypes[i] = new;
+ }
+ return 0;
+}
+
+
/*
* Add new ufunc loops (this is somewhat clumsy as of writing it, but should
* get less so with the introduction of public API).
@@ -650,7 +669,8 @@ init_ufuncs(void) {
if (bmeth == NULL) {
return -1;
}
- int res = add_loop("multiply", bmeth);
+ int res = add_loop("multiply",
+ bmeth->dtypes, (PyObject *)bmeth->method);
Py_DECREF(bmeth);
if (res < 0) {
return -1;
@@ -667,11 +687,40 @@ init_ufuncs(void) {
if (bmeth == NULL) {
return -1;
}
- res = add_loop("add", bmeth);
+ res = add_loop("add",
+ bmeth->dtypes, (PyObject *)bmeth->method);
Py_DECREF(bmeth);
if (res < 0) {
return -1;
}
+
+ /*
+ * Add a promoter for both directions of multiply with double.
+ */
+ PyArray_DTypeMeta *double_DType = PyArray_DTypeFromTypeNum(NPY_DOUBLE);
+ Py_DECREF(double_DType); /* immortal anyway */
+
+ PyArray_DTypeMeta *promoter_dtypes[3] = {
+ &PyArray_SFloatDType, double_DType, NULL};
+
+ PyObject *promoter = PyCapsule_New(
+ &promote_to_sfloat, "numpy._ufunc_promoter", NULL);
+ if (promoter == NULL) {
+ return -1;
+ }
+ res = add_loop("multiply", promoter_dtypes, promoter);
+ if (res < 0) {
+ Py_DECREF(promoter);
+ return -1;
+ }
+ promoter_dtypes[0] = double_DType;
+ promoter_dtypes[1] = &PyArray_SFloatDType;
+ res = add_loop("multiply", promoter_dtypes, promoter);
+ Py_DECREF(promoter);
+ if (res < 0) {
+ return -1;
+ }
+
return 0;
}
diff --git a/numpy/core/src/umath/dispatching.c b/numpy/core/src/umath/dispatching.c
index b1c5ccb6b..b97441b13 100644
--- a/numpy/core/src/umath/dispatching.c
+++ b/numpy/core/src/umath/dispatching.c
@@ -97,8 +97,9 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate)
return -1;
}
}
- if (!PyObject_TypeCheck(PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) {
- /* Must also accept promoters in the future. */
+ PyObject *meth_or_promoter = PyTuple_GET_ITEM(info, 1);
+ if (!PyObject_TypeCheck(meth_or_promoter, &PyArrayMethod_Type)
+ && !PyCapsule_IsValid(meth_or_promoter, "numpy._ufunc_promoter")) {
PyErr_SetString(PyExc_TypeError,
"Second argument to info must be an ArrayMethod or promoter");
return -1;
@@ -354,15 +355,68 @@ resolve_implementation_info(PyUFuncObject *ufunc,
* those defined by the `signature` unmodified).
*/
static PyObject *
-call_promoter_and_recurse(
- PyUFuncObject *NPY_UNUSED(ufunc), PyObject *NPY_UNUSED(promoter),
- PyArray_DTypeMeta *NPY_UNUSED(op_dtypes[]),
- PyArray_DTypeMeta *NPY_UNUSED(signature[]),
- PyArrayObject *const NPY_UNUSED(operands[]))
+call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter,
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArrayObject *const operands[])
{
- PyErr_SetString(PyExc_NotImplementedError,
- "Internal NumPy error, promoters are not used/implemented yet.");
- return NULL;
+ int nargs = ufunc->nargs;
+ PyObject *resolved_info = NULL;
+
+ int promoter_result;
+ PyArray_DTypeMeta *new_op_dtypes[NPY_MAXARGS];
+
+ if (PyCapsule_CheckExact(promoter)) {
+ /* We could also go the other way and wrap up the python function... */
+ promoter_function *promoter_function = PyCapsule_GetPointer(promoter,
+ "numpy._ufunc_promoter");
+ if (promoter_function == NULL) {
+ return NULL;
+ }
+ promoter_result = promoter_function(ufunc,
+ op_dtypes, signature, new_op_dtypes);
+ }
+ else {
+ PyErr_SetString(PyExc_NotImplementedError,
+ "Calling python functions for promotion is not implemented.");
+ return NULL;
+ }
+ if (promoter_result < 0) {
+ return NULL;
+ }
+ /*
+ * If none of the dtypes changes, we would recurse infinitely, abort.
+ * (Of course it is nevertheless possible to recurse infinitely.)
+ */
+ int dtypes_changed = 0;
+ for (int i = 0; i < nargs; i++) {
+ if (new_op_dtypes[i] != op_dtypes[i]) {
+ dtypes_changed = 1;
+ break;
+ }
+ }
+ if (!dtypes_changed) {
+ goto finish;
+ }
+
+ /*
+ * Do a recursive call, the promotion function has to ensure that the
+ * new tuple is strictly more precise (thus guaranteeing eventual finishing)
+ */
+ if (Py_EnterRecursiveCall(" during ufunc promotion.") != 0) {
+ goto finish;
+ }
+ /* TODO: The caching logic here may need revising: */
+ resolved_info = promote_and_get_info_and_ufuncimpl(ufunc,
+ operands, signature, new_op_dtypes,
+ /* no legacy promotion */ NPY_FALSE, /* cache */ NPY_TRUE);
+
+ Py_LeaveRecursiveCall();
+
+ finish:
+ for (int i = 0; i < nargs; i++) {
+ Py_XDECREF(new_op_dtypes[i]);
+ }
+ return resolved_info;
}
diff --git a/numpy/core/src/umath/dispatching.h b/numpy/core/src/umath/dispatching.h
index b01bc79fa..8d116873c 100644
--- a/numpy/core/src/umath/dispatching.h
+++ b/numpy/core/src/umath/dispatching.h
@@ -7,6 +7,10 @@
#include "array_method.h"
+typedef int promoter_function(PyUFuncObject *ufunc,
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[]);
+
NPY_NO_EXPORT int
PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate);
diff --git a/numpy/core/src/umath/legacy_array_method.c b/numpy/core/src/umath/legacy_array_method.c
index a5e123baa..4351f1d25 100644
--- a/numpy/core/src/umath/legacy_array_method.c
+++ b/numpy/core/src/umath/legacy_array_method.c
@@ -142,7 +142,7 @@ simple_legacy_resolve_descriptors(
}
}
- return NPY_SAFE_CASTING;
+ return NPY_NO_CASTING;
fail:
for (int i = 0; i < nin + nout; i++) {
@@ -244,7 +244,7 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc,
.dtypes = signature,
.flags = flags,
.slots = slots,
- .casting = NPY_EQUIV_CASTING,
+ .casting = NPY_NO_CASTING,
};
PyBoundArrayMethodObject *bound_res = PyArrayMethod_FromSpec_int(&spec, 1);
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index b1afa69a7..8df439aca 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1340,7 +1340,7 @@ TIMEDELTA_mq_m_divide(char **args, npy_intp const *dimensions, npy_intp const *s
*((npy_timedelta *)op1) = NPY_DATETIME_NAT;
}
else {
- *((npy_timedelta *)op1) = libdivide_s64_do(in1, &fast_d);;
+ *((npy_timedelta *)op1) = libdivide_s64_do(in1, &fast_d);
}
}
}
diff --git a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
index b17643d23..cc0fd19bb 100644
--- a/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
+++ b/numpy/core/src/umath/loops_exponent_log.dispatch.c.src
@@ -800,7 +800,7 @@ AVX512F_exp_DOUBLE(npy_double * op,
q = _mm512_fmadd_pd(q, r, mA2);
q = _mm512_fmadd_pd(q, r, mA1);
q = _mm512_mul_pd(q, r);
- __m512d p = _mm512_fmadd_pd(r, q, r2);;
+ __m512d p = _mm512_fmadd_pd(r, q, r2);
p = _mm512_add_pd(r1, p);
/* Get 2^(j/32) from lookup table */
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index bed303a86..ebc6bf02a 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -4286,7 +4286,8 @@ _get_dtype(PyObject *dtype_obj) {
else if (NPY_UNLIKELY(out->singleton != descr)) {
/* This does not warn about `metadata`, but units is important. */
if (!PyArray_EquivTypes(out->singleton, descr)) {
- PyErr_Format(PyExc_TypeError,
+ /* Deprecated NumPy 1.21.2 (was an accidental error in 1.21) */
+ if (DEPRECATE(
"The `dtype` and `signature` arguments to "
"ufuncs only select the general DType and not details "
"such as the byte order or time unit (with rare "
@@ -4296,9 +4297,11 @@ _get_dtype(PyObject *dtype_obj) {
"In rare cases where the time unit was preserved, "
"either cast the inputs or provide an output array. "
"In the future NumPy may transition to allow providing "
- "`dtype=` to denote the outputs `dtype` as well");
- Py_DECREF(descr);
- return NULL;
+ "`dtype=` to denote the outputs `dtype` as well. "
+ "(Deprecated NumPy 1.21)") < 0) {
+ Py_DECREF(descr);
+ return NULL;
+ }
}
}
Py_INCREF(out);
diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py
index 3f67f1832..a13e807e2 100644
--- a/numpy/core/tests/test_casting_unittests.py
+++ b/numpy/core/tests/test_casting_unittests.py
@@ -695,6 +695,13 @@ class TestCasting:
expected = arr_normal.astype(dtype)
except TypeError:
with pytest.raises(TypeError):
- arr_NULLs.astype(dtype)
+ arr_NULLs.astype(dtype),
else:
assert_array_equal(expected, arr_NULLs.astype(dtype))
+
+ def test_float_to_bool(self):
+ # test case corresponding to gh-19514
+ # simple test for casting bool_ to float16
+ res = np.array([0, 3, -7], dtype=np.int8).view(bool)
+ expected = [0, 1, 1]
+ assert_array_equal(res, expected)
diff --git a/numpy/core/tests/test_custom_dtypes.py b/numpy/core/tests/test_custom_dtypes.py
index 3ec2363b9..5eb82bc93 100644
--- a/numpy/core/tests/test_custom_dtypes.py
+++ b/numpy/core/tests/test_custom_dtypes.py
@@ -101,6 +101,18 @@ class TestSFloat:
expected_view = a.view(np.float64) * b.view(np.float64)
assert_array_equal(res.view(np.float64), expected_view)
+ def test_basic_multiply_promotion(self):
+ float_a = np.array([1., 2., 3.])
+ b = self._get_array(2.)
+
+ res1 = float_a * b
+ res2 = b * float_a
+ # one factor is one, so we get the factor of b:
+ assert res1.dtype == res2.dtype == b.dtype
+ expected_view = float_a * b.view(np.float64)
+ assert_array_equal(res1.view(np.float64), expected_view)
+ assert_array_equal(res2.view(np.float64), expected_view)
+
def test_basic_addition(self):
a = self._get_array(2.)
b = self._get_array(4.)
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index b4146eadf..5a490646e 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -152,7 +152,7 @@ class TestDateTime:
expected = np.arange(size)
arr = np.tile(np.datetime64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
-
+
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_timedelta_nat_argsort_stability(self, size):
@@ -1373,13 +1373,13 @@ class TestDateTime:
assert_equal(tda / 0.5, tdc)
assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))
# m8 / m8
- assert_equal(tda / tdb, 6.0 / 9.0)
- assert_equal(np.divide(tda, tdb), 6.0 / 9.0)
- assert_equal(np.true_divide(tda, tdb), 6.0 / 9.0)
- assert_equal(tdb / tda, 9.0 / 6.0)
+ assert_equal(tda / tdb, 6 / 9)
+ assert_equal(np.divide(tda, tdb), 6 / 9)
+ assert_equal(np.true_divide(tda, tdb), 6 / 9)
+ assert_equal(tdb / tda, 9 / 6)
assert_equal((tda / tdb).dtype, np.dtype('f8'))
- assert_equal(tda / tdd, 60.0)
- assert_equal(tdd / tda, 1.0 / 60.0)
+ assert_equal(tda / tdd, 60)
+ assert_equal(tdd / tda, 1 / 60)
# int / m8
assert_raises(TypeError, np.divide, 2, tdb)
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 42e632e4a..44c76e0b8 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -314,21 +314,6 @@ class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTest
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
-class TestNumericStyleTypecodes(_DeprecationTestCase):
- """
- Most numeric style typecodes were previously deprecated (and removed)
- in 1.20. This also deprecates the remaining ones.
- """
- # 2020-06-09, NumPy 1.20
- def test_all_dtypes(self):
- deprecated_types = ['Bytes0', 'Datetime64', 'Str0']
- # Depending on intp size, either Uint32 or Uint64 is defined:
- deprecated_types.append(f"U{np.dtype(np.intp).name}")
- for dt in deprecated_types:
- self.assert_deprecated(np.dtype, exceptions=(TypeError,),
- args=(dt,))
-
-
class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase):
# Deprecated 2021-01-05, NumPy 1.21
message = r".*`.dtype` attribute"
@@ -1174,3 +1159,36 @@ class TestCtypesGetter(_DeprecationTestCase):
)
def test_not_deprecated(self, name: str) -> None:
self.assert_not_deprecated(lambda: getattr(self.ctypes, name))
+
+
+class TestUFuncForcedDTypeWarning(_DeprecationTestCase):
+ message = "The `dtype` and `signature` arguments to ufuncs only select the"
+
+ def test_not_deprecated(self):
+ import pickle
+ # does not warn (test relies on bad pickling behaviour, simply remove
+ # it if the `assert int64 is not int64_2` should start failing.
+ int64 = np.dtype("int64")
+ int64_2 = pickle.loads(pickle.dumps(int64))
+ assert int64 is not int64_2
+ self.assert_not_deprecated(lambda: np.add(3, 4, dtype=int64_2))
+
+ def test_deprecation(self):
+ int64 = np.dtype("int64")
+ self.assert_deprecated(lambda: np.add(3, 5, dtype=int64.newbyteorder()))
+ self.assert_deprecated(lambda: np.add(3, 5, dtype="m8[ns]"))
+
+ def test_behaviour(self):
+ int64 = np.dtype("int64")
+ arr = np.arange(10, dtype="m8[s]")
+
+ with pytest.warns(DeprecationWarning, match=self.message):
+ np.add(3, 5, dtype=int64.newbyteorder())
+ with pytest.warns(DeprecationWarning, match=self.message):
+ np.add(3, 5, dtype="m8[ns]") # previously used the "ns"
+ with pytest.warns(DeprecationWarning, match=self.message):
+ np.add(arr, arr, dtype="m8[ns]") # never preserved the "ns"
+ with pytest.warns(DeprecationWarning, match=self.message):
+ np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns"
+ with pytest.warns(DeprecationWarning, match=self.message):
+ np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns"
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 4f52268f5..23269f01b 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -109,9 +109,12 @@ class TestBuiltin:
operation(np.dtype(np.int32), 7)
@pytest.mark.parametrize("dtype",
- ['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
- 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
- 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0',
+ ['Bool', 'Bytes0', 'Complex32', 'Complex64',
+ 'Datetime64', 'Float16', 'Float32', 'Float64',
+ 'Int8', 'Int16', 'Int32', 'Int64',
+ 'Object0', 'Str0', 'Timedelta64',
+ 'UInt8', 'UInt16', 'Uint32', 'UInt32',
+ 'Uint64', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 9c56df2ba..8f8043c30 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -4885,9 +4885,9 @@ class TestIO:
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
- assert_raises(IOError, np.fromfile, b, np.uint8, 80)
+ assert_raises(OSError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
- assert_raises(IOError, lambda x: x.tofile(b), d)
+ assert_raises(OSError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
@@ -4970,12 +4970,12 @@ class TestIO:
x.tofile(tmp_filename)
def fail(*args, **kwargs):
- raise IOError('Can not tell or seek')
+ raise OSError('Can not tell or seek')
with io.open(tmp_filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
- assert_raises(IOError, np.fromfile, f, dtype=x.dtype)
+ assert_raises(OSError, np.fromfile, f, dtype=x.dtype)
def test_io_open_unbuffered_fromfile(self, x, tmp_filename):
# gh-6632
@@ -5284,12 +5284,12 @@ class TestIO:
def test_tofile_cleanup(self, tmp_filename):
x = np.zeros((10), dtype=object)
with open(tmp_filename, 'wb') as f:
- assert_raises(IOError, lambda: x.tofile(f, sep=''))
+ assert_raises(OSError, lambda: x.tofile(f, sep=''))
# Dup-ed file handle should be closed or remove will fail on Windows OS
os.remove(tmp_filename)
# Also make sure that we close the Python handle
- assert_raises(IOError, lambda: x.tofile(tmp_filename))
+ assert_raises(OSError, lambda: x.tofile(tmp_filename))
os.remove(tmp_filename)
def test_fromfile_subarray_binary(self, tmp_filename):
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index e2d648a3c..19de0a8aa 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -2381,7 +2381,7 @@ class TestClip:
shape=in_shapes[1], elements={"allow_nan": False}))
# Then calculate our result and expected result and check that they're
- # equal! See gh-12519 and gh-19457 for discussion deciding on this
+ # equal! See gh-12519 and gh-19457 for discussion deciding on this
# property and the result_type argument.
result = np.clip(arr, amin, amax)
t = np.result_type(arr, amin, amax)
@@ -2637,15 +2637,15 @@ class TestStdVar:
def test_ddof1(self):
assert_almost_equal(np.var(self.A, ddof=1),
- self.real_var*len(self.A)/float(len(self.A)-1))
+ self.real_var * len(self.A) / (len(self.A) - 1))
assert_almost_equal(np.std(self.A, ddof=1)**2,
- self.real_var*len(self.A)/float(len(self.A)-1))
+ self.real_var*len(self.A) / (len(self.A) - 1))
def test_ddof2(self):
assert_almost_equal(np.var(self.A, ddof=2),
- self.real_var*len(self.A)/float(len(self.A)-2))
+ self.real_var * len(self.A) / (len(self.A) - 2))
assert_almost_equal(np.std(self.A, ddof=2)**2,
- self.real_var*len(self.A)/float(len(self.A)-2))
+ self.real_var * len(self.A) / (len(self.A) - 2))
def test_out_scalar(self):
d = np.arange(10)
diff --git a/numpy/core/tests/test_scalar_methods.py b/numpy/core/tests/test_scalar_methods.py
index 3693bba59..94b2dd3c9 100644
--- a/numpy/core/tests/test_scalar_methods.py
+++ b/numpy/core/tests/test_scalar_methods.py
@@ -102,3 +102,29 @@ class TestAsIntegerRatio:
pytest.skip("longdouble too small on this platform")
assert_equal(nf / df, f, "{}/{}".format(n, d))
+
+
+class TestIsInteger:
+ @pytest.mark.parametrize("str_value", ["inf", "nan"])
+ @pytest.mark.parametrize("code", np.typecodes["Float"])
+ def test_special(self, code: str, str_value: str) -> None:
+ cls = np.dtype(code).type
+ value = cls(str_value)
+ assert not value.is_integer()
+
+ @pytest.mark.parametrize(
+ "code", np.typecodes["Float"] + np.typecodes["AllInteger"]
+ )
+ def test_true(self, code: str) -> None:
+ float_array = np.arange(-5, 5).astype(code)
+ for value in float_array:
+ assert value.is_integer()
+
+ @pytest.mark.parametrize("code", np.typecodes["Float"])
+ def test_false(self, code: str) -> None:
+ float_array = np.arange(-5, 5).astype(code)
+ float_array *= 1.1
+ for value in float_array:
+ if value == 0:
+ continue
+ assert not value.is_integer()
diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py
index ea5bbe103..f0c60953b 100644
--- a/numpy/core/tests/test_simd.py
+++ b/numpy/core/tests/test_simd.py
@@ -850,7 +850,7 @@ class _SIMD_ALL(_Test_Utility):
return
safe_neg = lambda x: -x-1 if -x > int_max else -x
- # test round divison for signed integers
+ # test round division for signed integers
for x, d in itertools.product(rdata, divisors):
d_neg = safe_neg(d)
data = self._data(x)
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index dab11d948..c3ea10d93 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -388,6 +388,24 @@ class TestUfunc:
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
+
+ def test_signature9(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 1, 1, "( 3) -> ( )")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 0))
+ assert_equal(ixs, (0,))
+ assert_equal(flags, (0,))
+ assert_equal(sizes, (3,))
+
+ def test_signature10(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "( 3? ) , (3? , 3?) ,(n )-> ( 9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
def test_signature_failure_extra_parenthesis(self):
with assert_raises(ValueError):
@@ -518,26 +536,36 @@ class TestUfunc:
np.add(arr, arr, dtype="m")
np.maximum(arr, arr, dtype="m")
- def test_forced_dtype_warning(self):
- # does not warn (test relies on bad pickling behaviour, simply remove
- # it if the `assert int64 is not int64_2` should start failing.
- int64 = np.dtype("int64")
- int64_2 = pickle.loads(pickle.dumps(int64))
- assert int64 is not int64_2
- np.add(3, 4, dtype=int64_2)
+ @pytest.mark.parametrize("ufunc", [np.add, np.sqrt])
+ def test_cast_safety(self, ufunc):
+ """Basic test for the safest casts, because ufuncs inner loops can
+ indicate a cast-safety as well (which is normally always "no").
+ """
+ def call_ufunc(arr, **kwargs):
+ return ufunc(*(arr,) * ufunc.nin, **kwargs)
+
+ arr = np.array([1., 2., 3.], dtype=np.float32)
+ arr_bs = arr.astype(arr.dtype.newbyteorder())
+ expected = call_ufunc(arr)
+ # Normally, a "no" cast:
+ res = call_ufunc(arr, casting="no")
+ assert_array_equal(expected, res)
+ # Byte-swapping is not allowed with "no" though:
+ with pytest.raises(TypeError):
+ call_ufunc(arr_bs, casting="no")
- arr = np.arange(10, dtype="m8[s]")
- msg = "The `dtype` and `signature` arguments to ufuncs only select the"
- with pytest.raises(TypeError, match=msg):
- np.add(3, 5, dtype=int64.newbyteorder())
- with pytest.raises(TypeError, match=msg):
- np.add(3, 5, dtype="m8[ns]") # previously used the "ns"
- with pytest.raises(TypeError, match=msg):
- np.add(arr, arr, dtype="m8[ns]") # never preserved the "ns"
- with pytest.raises(TypeError, match=msg):
- np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns"
- with pytest.raises(TypeError, match=msg):
- np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns"
+ # But is allowed with "equiv":
+ res = call_ufunc(arr_bs, casting="equiv")
+ assert_array_equal(expected, res)
+
+ # Casting to float64 is safe, but not equiv:
+ with pytest.raises(TypeError):
+ call_ufunc(arr_bs, dtype=np.float64, casting="equiv")
+
+ # but it is safe cast:
+ res = call_ufunc(arr_bs, dtype=np.float64, casting="safe")
+ expected = call_ufunc(arr.astype(np.float64)) # upcast
+ assert_array_equal(expected, res)
def test_true_divide(self):
a = np.array(10)
@@ -2049,6 +2077,27 @@ class TestUfunc:
assert_raises(TypeError, f, a, b)
assert_raises(TypeError, f, c, a)
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or]) # logical_xor object loop is bad
+ @pytest.mark.parametrize("signature",
+ [(None, None, object), (object, None, None),
+ (None, object, None)])
+ def test_logical_ufuncs_object_signatures(self, ufunc, signature):
+ a = np.array([True, None, False], dtype=object)
+ res = ufunc(a, a, signature=signature)
+ assert res.dtype == object
+
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or, np.logical_xor])
+ @pytest.mark.parametrize("signature",
+ [(bool, None, object), (object, None, bool),
+ (None, object, bool)])
+ def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature):
+ # Most mixed signatures fail (except those with bool out, e.g. `OO->?`)
+ a = np.array([True, None, False])
+ with pytest.raises(TypeError):
+ ufunc(a, a, signature=signature)
+
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
# appropriately.
diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py
index c051cd61b..af5bbe59e 100644
--- a/numpy/core/tests/test_umath_complex.py
+++ b/numpy/core/tests/test_umath_complex.py
@@ -134,8 +134,7 @@ class TestClog:
x = np.array([1+0j, 1+2j])
y_r = np.log(np.abs(x)) + 1j * np.angle(x)
y = np.log(x)
- for i in range(len(x)):
- assert_almost_equal(y[i], y_r[i])
+ assert_almost_equal(y, y_r)
@platform_skip
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
@@ -365,18 +364,24 @@ class TestCpow:
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = x ** 2
y = np.power(x, 2)
- for i in range(len(x)):
- assert_almost_equal(y[i], y_r[i])
+ assert_almost_equal(y, y_r)
def test_scalar(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = list(range(len(x)))
- # Compute the values for complex type in python
- p_r = [complex(x[i]) ** complex(y[i]) for i in lx]
- # Substitute a result allowed by C99 standard
- p_r[4] = complex(np.inf, np.nan)
- # Do the same with numpy complex scalars
+
+ # Hardcode the expected `builtins.complex` values,
+ # as complex exponentiation is broken as of bpo-44698
+ p_r = [
+ 1+0j,
+ 0.20787957635076193+0j,
+ 0.35812203996480685+0.6097119028618724j,
+ 0.12659112128185032+0.48847676699581527j,
+ complex(np.inf, np.nan),
+ complex(np.nan, np.nan),
+ ]
+
n_r = [x[i] ** y[i] for i in lx]
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
@@ -385,11 +390,18 @@ class TestCpow:
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = list(range(len(x)))
- # Compute the values for complex type in python
- p_r = [complex(x[i]) ** complex(y[i]) for i in lx]
- # Substitute a result allowed by C99 standard
- p_r[4] = complex(np.inf, np.nan)
- # Do the same with numpy arrays
+
+ # Hardcode the expected `builtins.complex` values,
+ # as complex exponentiation is broken as of bpo-44698
+ p_r = [
+ 1+0j,
+ 0.20787957635076193+0j,
+ 0.35812203996480685+0.6097119028618724j,
+ 0.12659112128185032+0.48847676699581527j,
+ complex(np.inf, np.nan),
+ complex(np.nan, np.nan),
+ ]
+
n_r = x ** y
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
@@ -405,8 +417,7 @@ class TestCabs:
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan])
y = np.abs(x)
- for i in range(len(x)):
- assert_almost_equal(y[i], y_r[i])
+ assert_almost_equal(y, y_r)
def test_fabs(self):
# Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
@@ -452,9 +463,10 @@ class TestCabs:
return np.abs(complex(a, b))
xa = np.array(x, dtype=complex)
- for i in range(len(xa)):
- ref = g(x[i], y[i])
- check_real_value(f, x[i], y[i], ref)
+ assert len(xa) == len(x) == len(y)
+ for xi, yi in zip(x, y):
+ ref = g(xi, yi)
+ check_real_value(f, xi, yi, ref)
class TestCarg:
def test_simple(self):
@@ -583,7 +595,7 @@ class TestComplexAbsoluteMixedDTypes:
@pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
@pytest.mark.parametrize("astype", [np.complex64, np.complex128])
@pytest.mark.parametrize("func", ['abs', 'square', 'conjugate'])
-
+
def test_array(self, stride, astype, func):
dtype = [('template_id', '<i8'), ('bank_chisq','<f4'),
('bank_chisq_dof','<i8'), ('chisq', '<f4'), ('chisq_dof','<i8'),
@@ -602,9 +614,9 @@ class TestComplexAbsoluteMixedDTypes:
myfunc = getattr(np, func)
a = vec['mycomplex']
g = myfunc(a[::stride])
-
+
b = vec['mycomplex'].copy()
h = myfunc(b[::stride])
-
+
assert_array_max_ulp(h.real, g.real, 1)
assert_array_max_ulp(h.imag, g.imag, 1)
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index 47d07ad4a..e7fd494d3 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -193,7 +193,12 @@ class _Config:
clang = dict(
native = '-march=native',
opt = "-O3",
- werror = '-Werror'
+ # One of the following flags needs to be applicable for Clang to
+ # guarantee the sanity of the testing process, however in certain
+ # cases `-Werror` gets skipped during the availability test due to
+ # "unused arguments" warnings.
+ # see https://github.com/numpy/numpy/issues/19624
+ werror = '-Werror-implicit-function-declaration -Werror'
),
icc = dict(
native = '-xHost',
@@ -516,7 +521,7 @@ class _Config:
def rm_temp():
try:
shutil.rmtree(tmp)
- except IOError:
+ except OSError:
pass
atexit.register(rm_temp)
self.conf_tmp_path = tmp
@@ -2495,7 +2500,7 @@ class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
last_hash = f.readline().split("cache_hash:")
if len(last_hash) == 2 and int(last_hash[1]) == cache_hash:
return True
- except IOError:
+ except OSError:
pass
self.dist_log("generate dispatched config -> ", config_path)
diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py
index 51ce3c129..776202109 100644
--- a/numpy/distutils/cpuinfo.py
+++ b/numpy/distutils/cpuinfo.py
@@ -27,7 +27,7 @@ from subprocess import getstatusoutput
def getoutput(cmd, successful_status=(0,), stacklevel=1):
try:
status, output = getstatusoutput(cmd)
- except EnvironmentError as e:
+ except OSError as e:
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
return False, ""
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
@@ -109,7 +109,7 @@ class LinuxCPUInfo(CPUInfoBase):
info[0]['uname_m'] = output.strip()
try:
fo = open('/proc/cpuinfo')
- except EnvironmentError as e:
+ except OSError as e:
warnings.warn(str(e), UserWarning, stacklevel=2)
else:
for line in fo:
diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py
index fb10d2470..79998cf5d 100644
--- a/numpy/distutils/exec_command.py
+++ b/numpy/distutils/exec_command.py
@@ -284,7 +284,7 @@ def _exec_command(command, use_shell=None, use_tee = None, **env):
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False)
- except EnvironmentError:
+ except OSError:
# Return 127, as os.spawn*() and /bin/sh do
return 127, ''
diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py
index 351a43dd7..01314c136 100644
--- a/numpy/distutils/fcompiler/compaq.py
+++ b/numpy/distutils/fcompiler/compaq.py
@@ -84,9 +84,9 @@ class CompaqVisualFCompiler(FCompiler):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e))
else:
raise
- except IOError as e:
+ except OSError as e:
if not "vcvarsall.bat" in str(e):
- print("Unexpected IOError in", __file__)
+ print("Unexpected OSError in", __file__)
raise
except ValueError as e:
if not "'path'" in str(e):
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 9c65ff43e..a903f3ea3 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -42,7 +42,7 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
- 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
+ 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
class InstallableLib:
@@ -110,6 +110,13 @@ def get_num_build_jobs():
return max(x for x in cmdattr if x is not None)
def quote_args(args):
+ """Quote list of arguments.
+
+ .. deprecated:: 1.22.
+ """
+ import warnings
+ warnings.warn('"quote_args" is deprecated.',
+ DeprecationWarning, stacklevel=2)
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
index 951ce5fb8..f6e3ad397 100644
--- a/numpy/distutils/npy_pkg_config.py
+++ b/numpy/distutils/npy_pkg_config.py
@@ -9,7 +9,7 @@ __all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}')
-class FormatError(IOError):
+class FormatError(OSError):
"""
Exception thrown when there is a problem parsing a configuration file.
@@ -20,7 +20,7 @@ class FormatError(IOError):
def __str__(self):
return self.msg
-class PkgNotFound(IOError):
+class PkgNotFound(OSError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 2846d754e..8467e1c19 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -2424,6 +2424,10 @@ class flame_info(system_info):
if info is None:
return
+ # Add the extra flag args to info
+ extra_info = self.calc_extra_info()
+ dict_append(info, **extra_info)
+
if self.check_embedded_lapack(info):
# check if the user has supplied all information required
self.set_info(**info)
diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py
index fb91f1789..733a9fc50 100644
--- a/numpy/distutils/unixccompiler.py
+++ b/numpy/distutils/unixccompiler.py
@@ -105,7 +105,7 @@ def UnixCCompiler_create_static_lib(self, objects, output_libname,
# and recreate.
# Also, ar on OS X doesn't handle updating universal archives
os.unlink(output_filename)
- except (IOError, OSError):
+ except OSError:
pass
self.mkpath(os.path.dirname(output_filename))
tmp_objects = objects + self.objects
diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi
index 7d8e092ea..e52e12bbd 100644
--- a/numpy/f2py/__init__.pyi
+++ b/numpy/f2py/__init__.pyi
@@ -1,7 +1,6 @@
import os
import subprocess
-from typing import Any, List, Iterable, Dict, overload
-from typing_extensions import TypedDict, Literal as L
+from typing import Literal as L, Any, List, Iterable, Dict, overload, TypedDict
from numpy._pytesttester import PytestTester
diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py
index 62aa2fca9..5c9ddb00a 100644
--- a/numpy/f2py/cb_rules.py
+++ b/numpy/f2py/cb_rules.py
@@ -110,6 +110,7 @@ f2py_cb_start_clock();
capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\");
if (capi_tmp) {
capi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp);
+ Py_DECREF(capi_tmp);
if (capi_arglist==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\");
goto capi_fail;
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 3ac9b80c8..c3ec792e3 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -3414,8 +3414,8 @@ if __name__ == "__main__":
try:
open(l).close()
files.append(l)
- except IOError as detail:
- errmess('IOError: %s\n' % str(detail))
+ except OSError as detail:
+ errmess(f'OSError: {detail!s}\n')
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py
index a14f068f1..f45374be6 100755
--- a/numpy/f2py/f2py2e.py
+++ b/numpy/f2py/f2py2e.py
@@ -275,9 +275,8 @@ def scaninputline(inputline):
with open(l):
pass
files.append(l)
- except IOError as detail:
- errmess('IOError: %s. Skipping file "%s".\n' %
- (str(detail), l))
+ except OSError as detail:
+ errmess(f'OSError: {detail!s}. Skipping file "{l!s}".\n')
elif f == -1:
skipfuncs.append(l)
elif f == 0:
@@ -359,33 +358,34 @@ def buildmodules(lst):
cfuncs.buildcfuncs()
outmess('Building modules...\n')
modules, mnames, isusedby = [], [], {}
- for i in range(len(lst)):
- if '__user__' in lst[i]['name']:
- cb_rules.buildcallbacks(lst[i])
+ for item in lst:
+ if '__user__' in item['name']:
+ cb_rules.buildcallbacks(item)
else:
- if 'use' in lst[i]:
- for u in lst[i]['use'].keys():
+ if 'use' in item:
+ for u in item['use'].keys():
if u not in isusedby:
isusedby[u] = []
- isusedby[u].append(lst[i]['name'])
- modules.append(lst[i])
- mnames.append(lst[i]['name'])
+ isusedby[u].append(item['name'])
+ modules.append(item)
+ mnames.append(item['name'])
ret = {}
- for i in range(len(mnames)):
- if mnames[i] in isusedby:
+ for module, name in zip(modules, mnames):
+ if name in isusedby:
outmess('\tSkipping module "%s" which is used by %s.\n' % (
- mnames[i], ','.join(['"%s"' % s for s in isusedby[mnames[i]]])))
+ name, ','.join('"%s"' % s for s in isusedby[name])))
else:
um = []
- if 'use' in modules[i]:
- for u in modules[i]['use'].keys():
+ if 'use' in module:
+ for u in module['use'].keys():
if u in isusedby and u in mnames:
um.append(modules[mnames.index(u)])
else:
outmess(
- '\tModule "%s" uses nonexisting "%s" which will be ignored.\n' % (mnames[i], u))
- ret[mnames[i]] = {}
- dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um))
+ f'\tModule "{name}" uses nonexisting "{u}" '
+ 'which will be ignored.\n')
+ ret[name] = {}
+ dict_append(ret[name], rules.buildmodule(module, um))
return ret
@@ -429,18 +429,20 @@ def run_main(comline_list):
capi_maps.load_f2cmap_file(options['f2cmap_file'])
postlist = callcrackfortran(files, options)
isusedby = {}
- for i in range(len(postlist)):
- if 'use' in postlist[i]:
- for u in postlist[i]['use'].keys():
+ for plist in postlist:
+ if 'use' in plist:
+ for u in plist['use'].keys():
if u not in isusedby:
isusedby[u] = []
- isusedby[u].append(postlist[i]['name'])
- for i in range(len(postlist)):
- if postlist[i]['block'] == 'python module' and '__user__' in postlist[i]['name']:
- if postlist[i]['name'] in isusedby:
+ isusedby[u].append(plist['name'])
+ for plist in postlist:
+ if plist['block'] == 'python module' and '__user__' in plist['name']:
+ if plist['name'] in isusedby:
# if not quiet:
- outmess('Skipping Makefile build for module "%s" which is used by %s\n' % (
- postlist[i]['name'], ','.join(['"%s"' % s for s in isusedby[postlist[i]['name']]])))
+ outmess(
+ f'Skipping Makefile build for module "{plist["name"]}" '
+ 'which is used by {}\n'.format(
+ ','.join(f'"{s}"' for s in isusedby[plist['name']])))
if 'signsfile' in options:
if options['verbose'] > 1:
outmess(
@@ -448,8 +450,8 @@ def run_main(comline_list):
outmess('%s %s\n' %
(os.path.basename(sys.argv[0]), options['signsfile']))
return
- for i in range(len(postlist)):
- if postlist[i]['block'] != 'python module':
+ for plist in postlist:
+ if plist['block'] != 'python module':
if 'python module' not in options:
errmess(
'Tip: If your original code is Fortran source then you must use -m option.\n')
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index d5fa76fed..eace3c9fc 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -36,7 +36,7 @@ def _cleanup():
pass
try:
shutil.rmtree(_module_dir)
- except (IOError, OSError):
+ except OSError:
pass
_module_dir = None
diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py
index 604ac8fde..392644237 100644
--- a/numpy/fft/tests/test_pocketfft.py
+++ b/numpy/fft/tests/test_pocketfft.py
@@ -10,7 +10,7 @@ import queue
def fft1(x):
L = len(x)
- phase = -2j*np.pi*(np.arange(L)/float(L))
+ phase = -2j * np.pi * (np.arange(L) / L)
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi
index 25640ec07..ae23b2ec4 100644
--- a/numpy/lib/__init__.pyi
+++ b/numpy/lib/__init__.pyi
@@ -130,7 +130,6 @@ from numpy.lib.npyio import (
recfromtxt as recfromtxt,
recfromcsv as recfromcsv,
load as load,
- loads as loads,
save as save,
savez as savez,
savez_compressed as savez_compressed,
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index c790a6462..56b94853d 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -530,7 +530,7 @@ class DataSource:
return _file_openers[ext](found, mode=mode,
encoding=encoding, newline=newline)
else:
- raise IOError("%s not found." % path)
+ raise FileNotFoundError(f"{path} not found.")
class Repository (DataSource):
diff --git a/numpy/lib/arraypad.pyi b/numpy/lib/arraypad.pyi
index df9538dd7..49ce8e683 100644
--- a/numpy/lib/arraypad.pyi
+++ b/numpy/lib/arraypad.pyi
@@ -1,11 +1,12 @@
-import sys
from typing import (
+ Literal as L,
Any,
Dict,
List,
overload,
Tuple,
TypeVar,
+ Protocol,
)
from numpy import ndarray, dtype, generic
@@ -18,20 +19,16 @@ from numpy.typing import (
_SupportsArray,
)
-if sys.version_info >= (3, 8):
- from typing import Literal as L, Protocol
-else:
- from typing_extensions import Literal as L, Protocol
-
_SCT = TypeVar("_SCT", bound=generic)
class _ModeFunc(Protocol):
def __call__(
self,
- __vector: NDArray[Any],
- __iaxis_pad_width: Tuple[int, int],
- __iaxis: int,
- __kwargs: Dict[str, Any],
+ vector: NDArray[Any],
+ iaxis_pad_width: Tuple[int, int],
+ iaxis: int,
+ kwargs: Dict[str, Any],
+ /,
) -> None: ...
_ModeKind = L[
diff --git a/numpy/lib/arrayterator.pyi b/numpy/lib/arrayterator.pyi
index 39d6fd843..82c669206 100644
--- a/numpy/lib/arrayterator.pyi
+++ b/numpy/lib/arrayterator.pyi
@@ -1,4 +1,3 @@
-import sys
from typing import (
List,
Any,
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 6ac66c22a..e566e253d 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -162,7 +162,6 @@ evolved with time and this document is more current.
"""
import numpy
-import io
import warnings
from numpy.lib.utils import safe_eval
from numpy.compat import (
@@ -831,7 +830,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
------
ValueError
If the data or the mode is invalid.
- IOError
+ OSError
If the file is not found or cannot be opened correctly.
See Also
@@ -909,7 +908,7 @@ def _read_bytes(fp, size, error_template="ran out of data"):
data += r
if len(r) == 0 or len(data) == size:
break
- except io.BlockingIOError:
+ except BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
diff --git a/numpy/lib/format.pyi b/numpy/lib/format.pyi
index 4c44d57bf..092245daf 100644
--- a/numpy/lib/format.pyi
+++ b/numpy/lib/format.pyi
@@ -1,10 +1,4 @@
-import sys
-from typing import Any, List, Set
-
-if sys.version_info >= (3, 8):
- from typing import Literal, Final
-else:
- from typing_extensions import Literal, Final
+from typing import Any, List, Set, Literal, Final
__all__: List[str]
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index b43a1d666..d875a00ae 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1512,7 +1512,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi):
difference from their predecessor of more than ``max(discont, period/2)``
to their `period`-complementary values.
- For the default case where `period` is :math:`2\pi` and is `discont` is
+ For the default case where `period` is :math:`2\pi` and `discont` is
:math:`\pi`, this unwraps a radian phase `p` such that adjacent differences
are never greater than :math:`\pi` by adding :math:`2k\pi` for some
integer :math:`k`.
@@ -1866,6 +1866,8 @@ def _parse_gufunc_signature(signature):
Tuple of input and output core dimensions parsed from the signature, each
of the form List[Tuple[str, ...]].
"""
+ signature = re.sub(r'\s+', '', signature)
+
if not re.match(_SIGNATURE, signature):
raise ValueError(
'not a valid gufunc signature: {}'.format(signature))
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 8d1b6e5be..2a4402c89 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -149,9 +149,9 @@ class nd_grid:
try:
size = []
typ = int
- for k in range(len(key)):
- step = key[k].step
- start = key[k].start
+ for kk in key:
+ step = kk.step
+ start = kk.start
if start is None:
start = 0
if step is None:
@@ -161,19 +161,19 @@ class nd_grid:
typ = float
else:
size.append(
- int(math.ceil((key[k].stop - start)/(step*1.0))))
+ int(math.ceil((kk.stop - start) / (step * 1.0))))
if (isinstance(step, (_nx.floating, float)) or
isinstance(start, (_nx.floating, float)) or
- isinstance(key[k].stop, (_nx.floating, float))):
+ isinstance(kk.stop, (_nx.floating, float))):
typ = float
if self.sparse:
nn = [_nx.arange(_x, dtype=_t)
for _x, _t in zip(size, (typ,)*len(size))]
else:
nn = _nx.indices(size, typ)
- for k in range(len(size)):
- step = key[k].step
- start = key[k].start
+ for k, kk in enumerate(key):
+ step = kk.step
+ start = kk.start
if start is None:
start = 0
if step is None:
@@ -181,7 +181,7 @@ class nd_grid:
if isinstance(step, (_nx.complexfloating, complex)):
step = int(abs(step))
if step != 1:
- step = (key[k].stop - start)/float(step-1)
+ step = (kk.stop - start) / float(step - 1)
nn[k] = (nn[k]*step+start)
if self.sparse:
slobj = [_nx.newaxis]*len(size)
diff --git a/numpy/lib/index_tricks.pyi b/numpy/lib/index_tricks.pyi
index 0f9ae94a9..530be3cae 100644
--- a/numpy/lib/index_tricks.pyi
+++ b/numpy/lib/index_tricks.pyi
@@ -1,4 +1,3 @@
-import sys
from typing import (
Any,
Tuple,
@@ -8,6 +7,8 @@ from typing import (
List,
Union,
Sequence,
+ Literal,
+ SupportsIndex,
)
from numpy import (
@@ -49,11 +50,6 @@ from numpy.core.multiarray import (
ravel_multi_index as ravel_multi_index,
)
-if sys.version_info >= (3, 8):
- from typing import Literal, SupportsIndex
-else:
- from typing_extensions import Literal, SupportsIndex
-
_T = TypeVar("_T")
_DType = TypeVar("_DType", bound=dtype[Any])
_BoolType = TypeVar("_BoolType", Literal[True], Literal[False])
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 7c73d9655..b91bf440f 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -5,7 +5,7 @@ import itertools
import warnings
import weakref
import contextlib
-from operator import itemgetter, index as opindex
+from operator import itemgetter, index as opindex, methodcaller
from collections.abc import Mapping
import numpy as np
@@ -26,18 +26,9 @@ from numpy.compat import (
)
-@set_module('numpy')
-def loads(*args, **kwargs):
- # NumPy 1.15.0, 2017-12-10
- warnings.warn(
- "np.loads is deprecated, use pickle.loads instead",
- DeprecationWarning, stacklevel=2)
- return pickle.loads(*args, **kwargs)
-
-
__all__ = [
- 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
- 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
+ 'savetxt', 'loadtxt', 'genfromtxt',
+ 'recfromtxt', 'recfromcsv', 'load', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
@@ -333,10 +324,12 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
Raises
------
- IOError
+ OSError
If the input file does not exist or cannot be read.
+ UnpicklingError
+ If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
ValueError
- The file contains an object array, but allow_pickle=False given.
+ The file contains an object array, but ``allow_pickle=False`` given.
See Also
--------
@@ -445,8 +438,8 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
try:
return pickle.load(fid, **pickle_kwargs)
except Exception as e:
- raise IOError(
- "Failed to interpret file %s as a pickle" % repr(file)) from e
+ raise pickle.UnpicklingError(
+ f"Failed to interpret file {file!r} as a pickle") from e
def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
@@ -728,41 +721,42 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
zipf.close()
+def _floatconv(x):
+ try:
+ return float(x) # The fastest path.
+ except ValueError:
+ if '0x' in x: # Don't accidentally convert "a" ("0xa") to 10.
+ try:
+ return float.fromhex(x)
+ except ValueError:
+ pass
+ raise # Raise the original exception, which makes more sense.
+
+
+_CONVERTERS = [ # These converters only ever get strs (not bytes) as input.
+ (np.bool_, lambda x: bool(int(x))),
+ (np.uint64, np.uint64),
+ (np.int64, np.int64),
+ (np.integer, lambda x: int(float(x))),
+ (np.longdouble, np.longdouble),
+ (np.floating, _floatconv),
+ (complex, lambda x: complex(x.replace('+-', '-'))),
+ (np.bytes_, methodcaller('encode', 'latin-1')),
+ (np.unicode_, str),
+]
+
+
def _getconv(dtype):
- """ Find the correct dtype converter. Adapted from matplotlib """
+ """
+ Find the correct dtype converter. Adapted from matplotlib.
- def floatconv(x):
- try:
- return float(x) # The fastest path.
- except ValueError:
- if '0x' in x: # Don't accidentally convert "a" ("0xa") to 10.
- try:
- return float.fromhex(x)
- except ValueError:
- pass
- raise # Raise the original exception, which makes more sense.
-
- typ = dtype.type
- if issubclass(typ, np.bool_):
- return lambda x: bool(int(x))
- if issubclass(typ, np.uint64):
- return np.uint64
- if issubclass(typ, np.int64):
- return np.int64
- if issubclass(typ, np.integer):
- return lambda x: int(float(x))
- elif issubclass(typ, np.longdouble):
- return np.longdouble
- elif issubclass(typ, np.floating):
- return floatconv
- elif issubclass(typ, complex):
- return lambda x: complex(asstr(x).replace('+-', '-'))
- elif issubclass(typ, np.bytes_):
- return asbytes
- elif issubclass(typ, np.unicode_):
- return asunicode
- else:
- return asstr
+ Even when a lambda is returned, it is defined at the toplevel, to allow
+ testing for equality and enabling optimization for single-type data.
+ """
+ for base, conv in _CONVERTERS:
+ if issubclass(dtype.type, base):
+ return conv
+ return str
# _loadtxt_flatten_dtype_internal and _loadtxt_pack_items are loadtxt helpers
@@ -978,52 +972,13 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
# Nested functions used by loadtxt.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- def split_line(line):
- """Chop off comments, strip, and split at delimiter. """
- line = _decode_line(line, encoding=encoding)
+ def split_line(line: str):
+ """Chop off comments, strip, and split at delimiter."""
for comment in comments: # Much faster than using a single regex.
line = line.split(comment, 1)[0]
line = line.strip('\r\n')
return line.split(delimiter) if line else []
- def read_data(chunk_size):
- """Parse each line, including the first.
-
- The file read, `fh`, is a global defined above.
-
- Parameters
- ----------
- chunk_size : int
- At most `chunk_size` lines are read at a time, with iteration
- until all lines are read.
-
- """
- X = []
- line_iter = itertools.chain([first_line], fh)
- line_iter = itertools.islice(line_iter, max_rows)
- for i, line in enumerate(line_iter):
- vals = split_line(line)
- if len(vals) == 0:
- continue
- if usecols:
- vals = [vals[j] for j in usecols]
- if len(vals) != ncols:
- line_num = i + skiprows + 1
- raise ValueError("Wrong number of columns at line %d"
- % line_num)
-
- # Convert each value according to its column and store
- items = [conv(val) for (conv, val) in zip(converters, vals)]
-
- # Then pack it according to the dtype's nesting
- items = packer(items)
- X.append(items)
- if len(X) > chunk_size:
- yield X
- X = []
- if X:
- yield X
-
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Main body of loadtxt.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -1051,14 +1006,14 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
byte_converters = True
if usecols is not None:
- # Allow usecols to be a single int or a sequence of ints
+ # Copy usecols, allowing it to be a single int or a sequence of ints.
try:
- usecols_as_list = list(usecols)
+ usecols = list(usecols)
except TypeError:
- usecols_as_list = [usecols]
- for col_idx in usecols_as_list:
+ usecols = [usecols]
+ for i, col_idx in enumerate(usecols):
try:
- opindex(col_idx)
+ usecols[i] = opindex(col_idx) # Cast to builtin int now.
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
@@ -1066,8 +1021,13 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
type(col_idx),
)
raise
- # Fall back to existing code
- usecols = usecols_as_list
+ if len(usecols) > 1:
+ usecols_getter = itemgetter(*usecols)
+ else:
+ # Get an iterable back, even if using a single column.
+ usecols_getter = lambda obj, c=usecols[0]: [obj[c]]
+ else:
+ usecols_getter = None
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
@@ -1075,50 +1035,70 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
dtype_types, packer = _loadtxt_flatten_dtype_internal(dtype)
- fown = False
+ fh_closing_ctx = contextlib.nullcontext()
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fencoding = getattr(fh, 'encoding', 'latin1')
- fh = iter(fh)
- fown = True
+ line_iter = iter(fh)
+ fh_closing_ctx = contextlib.closing(fh)
else:
- fh = iter(fname)
+ line_iter = iter(fname)
fencoding = getattr(fname, 'encoding', 'latin1')
+ try:
+ first_line = next(line_iter)
+ except StopIteration:
+ pass # Nothing matters if line_iter is empty.
+ else:
+ # Put first_line back.
+ line_iter = itertools.chain([first_line], line_iter)
+ if isinstance(first_line, bytes):
+ # Using latin1 matches _decode_line's behavior.
+ decoder = methodcaller(
+ "decode",
+ encoding if encoding is not None else "latin1")
+ line_iter = map(decoder, line_iter)
except TypeError as e:
raise ValueError(
f"fname must be a string, filehandle, list of strings,\n"
f"or generator. Got {type(fname)} instead."
) from e
- # input may be a python2 io stream
- if encoding is not None:
- fencoding = encoding
- # we must assume local encoding
- # TODO emit portability warning?
- elif fencoding is None:
- import locale
- fencoding = locale.getpreferredencoding()
+ with fh_closing_ctx:
+
+ # input may be a python2 io stream
+ if encoding is not None:
+ fencoding = encoding
+ # we must assume local encoding
+ # TODO emit portability warning?
+ elif fencoding is None:
+ import locale
+ fencoding = locale.getpreferredencoding()
- try:
# Skip the first `skiprows` lines
for i in range(skiprows):
- next(fh)
+ next(line_iter)
# Read until we find a line with some values, and use it to determine
# the need for decoding and estimate the number of columns.
- for first_line in fh:
+ for first_line in line_iter:
ncols = len(usecols or split_line(first_line))
if ncols:
+ # Put first_line back.
+ line_iter = itertools.chain([first_line], line_iter)
break
else: # End of lines reached
- first_line = ''
ncols = len(usecols or [])
warnings.warn('loadtxt: Empty input file: "%s"' % fname,
stacklevel=2)
+ line_iter = itertools.islice(line_iter, max_rows)
+ lineno_words_iter = filter(
+ itemgetter(1), # item[1] is words; filter skips empty lines.
+ enumerate(map(split_line, line_iter), 1 + skiprows))
+
# Now that we know ncols, create the default converters list, and
# set packing, if necessary.
if len(dtype_types) > 1:
@@ -1144,36 +1124,55 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
continue
if byte_converters:
# converters may use decode to workaround numpy's old
- # behaviour, so encode the string again before passing to
- # the user converter
- def tobytes_first(x, conv):
- if type(x) is bytes:
- return conv(x)
+ # behaviour, so encode the string again (converters are only
+ # called with strings) before passing to the user converter.
+ def tobytes_first(conv, x):
return conv(x.encode("latin1"))
- converters[i] = functools.partial(tobytes_first, conv=conv)
+ converters[i] = functools.partial(tobytes_first, conv)
else:
converters[i] = conv
- converters = [conv if conv is not bytes else
- lambda x: x.encode(fencoding) for conv in converters]
+ fencode = methodcaller("encode", fencoding)
+ converters = [conv if conv is not bytes else fencode
+ for conv in converters]
+ if len(set(converters)) == 1:
+ # Optimize single-type data. Note that this is only reached if
+ # `_getconv` returns equal callables (i.e. not local lambdas) on
+ # equal dtypes.
+ def convert_row(vals, _conv=converters[0]):
+ return [*map(_conv, vals)]
+ else:
+ def convert_row(vals):
+ return [conv(val) for conv, val in zip(converters, vals)]
# read data in chunks and fill it into an array via resize
# over-allocating and shrinking the array later may be faster but is
# probably not relevant compared to the cost of actually reading and
# converting the data
X = None
- for x in read_data(_loadtxt_chunksize):
+ while True:
+ chunk = []
+ for lineno, words in itertools.islice(
+ lineno_words_iter, _loadtxt_chunksize):
+ if usecols_getter is not None:
+ words = usecols_getter(words)
+ elif len(words) != ncols:
+ raise ValueError(
+ f"Wrong number of columns at line {lineno}")
+ # Convert each value according to its column, then pack it
+ # according to the dtype's nesting, and store it.
+ chunk.append(packer(convert_row(words)))
+ if not chunk: # The islice is empty, i.e. we're done.
+ break
+
if X is None:
- X = np.array(x, dtype)
+ X = np.array(chunk, dtype)
else:
nshape = list(X.shape)
pos = nshape[0]
- nshape[0] += len(x)
+ nshape[0] += len(chunk)
X.resize(nshape, refcheck=False)
- X[pos:, ...] = x
- finally:
- if fown:
- fh.close()
+ X[pos:, ...] = chunk
if X is None:
X = np.array([], dtype)
@@ -1475,8 +1474,11 @@ def fromregex(file, regexp, dtype, encoding=None):
Parameters
----------
- file : str or file
+ file : path or file
Filename or file object to read.
+
+ .. versionchanged:: 1.22.0
+ Now accepts `os.PathLike` implementations.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
@@ -1526,6 +1528,7 @@ def fromregex(file, regexp, dtype, encoding=None):
"""
own_fh = False
if not hasattr(file, "read"):
+ file = os.fspath(file)
file = np.lib._datasource.open(file, 'rt', encoding=encoding)
own_fh = True
@@ -1534,9 +1537,9 @@ def fromregex(file, regexp, dtype, encoding=None):
dtype = np.dtype(dtype)
content = file.read()
- if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode):
+ if isinstance(content, bytes) and isinstance(regexp, str):
regexp = asbytes(regexp)
- elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes):
+ elif isinstance(content, str) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
@@ -2307,62 +2310,6 @@ _genfromtxt_with_like = array_function_dispatch(
)(genfromtxt)
-def ndfromtxt(fname, **kwargs):
- """
- Load ASCII data stored in a file and return it as a single array.
-
- .. deprecated:: 1.17
- ndfromtxt` is a deprecated alias of `genfromtxt` which
- overwrites the ``usemask`` argument with `False` even when
- explicitly called as ``ndfromtxt(..., usemask=True)``.
- Use `genfromtxt` instead.
-
- Parameters
- ----------
- fname, kwargs : For a description of input parameters, see `genfromtxt`.
-
- See Also
- --------
- numpy.genfromtxt : generic function.
-
- """
- kwargs['usemask'] = False
- # Numpy 1.17
- warnings.warn(
- "np.ndfromtxt is a deprecated alias of np.genfromtxt, "
- "prefer the latter.",
- DeprecationWarning, stacklevel=2)
- return genfromtxt(fname, **kwargs)
-
-
-def mafromtxt(fname, **kwargs):
- """
- Load ASCII data stored in a text file and return a masked array.
-
- .. deprecated:: 1.17
- np.mafromtxt is a deprecated alias of `genfromtxt` which
- overwrites the ``usemask`` argument with `True` even when
- explicitly called as ``mafromtxt(..., usemask=False)``.
- Use `genfromtxt` instead.
-
- Parameters
- ----------
- fname, kwargs : For a description of input parameters, see `genfromtxt`.
-
- See Also
- --------
- numpy.genfromtxt : generic function to load ASCII data.
-
- """
- kwargs['usemask'] = True
- # Numpy 1.17
- warnings.warn(
- "np.mafromtxt is a deprecated alias of np.genfromtxt, "
- "prefer the latter.",
- DeprecationWarning, stacklevel=2)
- return genfromtxt(fname, **kwargs)
-
-
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi
index 508357927..4841e9e71 100644
--- a/numpy/lib/npyio.pyi
+++ b/numpy/lib/npyio.pyi
@@ -1,104 +1,265 @@
-from typing import Mapping, List, Any
+import os
+import sys
+import zipfile
+import types
+from typing import (
+ Literal as L,
+ Any,
+ Mapping,
+ TypeVar,
+ Generic,
+ List,
+ Type,
+ Iterator,
+ Union,
+ IO,
+ overload,
+ Sequence,
+ Callable,
+ Pattern,
+ Protocol,
+ Iterable,
+)
from numpy import (
DataSource as DataSource,
+ ndarray,
+ recarray,
+ dtype,
+ generic,
+ float64,
+ void,
)
+from numpy.ma.mrecords import MaskedRecords
+from numpy.typing import ArrayLike, DTypeLike, NDArray, _SupportsDType
+
from numpy.core.multiarray import (
packbits as packbits,
unpackbits as unpackbits,
)
+_T = TypeVar("_T")
+_T_contra = TypeVar("_T_contra", contravariant=True)
+_T_co = TypeVar("_T_co", covariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True)
+_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True)
+
+_DTypeLike = Union[
+ Type[_SCT],
+ dtype[_SCT],
+ _SupportsDType[dtype[_SCT]],
+]
+
+class _SupportsGetItem(Protocol[_T_contra, _T_co]):
+ def __getitem__(self, key: _T_contra, /) -> _T_co: ...
+
+class _SupportsRead(Protocol[_CharType_co]):
+ def read(self) -> _CharType_co: ...
+
+class _SupportsReadSeek(Protocol[_CharType_co]):
+ def read(self, n: int, /) -> _CharType_co: ...
+ def seek(self, offset: int, whence: int, /) -> object: ...
+
+class _SupportsWrite(Protocol[_CharType_contra]):
+ def write(self, s: _CharType_contra, /) -> object: ...
+
__all__: List[str]
-def loads(*args, **kwargs): ...
-
-class BagObj:
- def __init__(self, obj): ...
- def __getattribute__(self, key): ...
- def __dir__(self): ...
-
-def zipfile_factory(file, *args, **kwargs): ...
-
-class NpzFile(Mapping[Any, Any]):
- zip: Any
- fid: Any
- files: Any
- allow_pickle: Any
- pickle_kwargs: Any
- f: Any
- def __init__(self, fid, own_fid=..., allow_pickle=..., pickle_kwargs=...): ...
- def __enter__(self): ...
- def __exit__(self, exc_type, exc_value, traceback): ...
- def close(self): ...
- def __del__(self): ...
- def __iter__(self): ...
- def __len__(self): ...
- def __getitem__(self, key): ...
- def iteritems(self): ...
- def iterkeys(self): ...
-
-def load(file, mmap_mode=..., allow_pickle=..., fix_imports=..., encoding=...): ...
-def save(file, arr, allow_pickle=..., fix_imports=...): ...
-def savez(file, *args, **kwds): ...
-def savez_compressed(file, *args, **kwds): ...
+class BagObj(Generic[_T_co]):
+ def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ...
+ def __getattribute__(self, key: str) -> _T_co: ...
+ def __dir__(self) -> List[str]: ...
+
+class NpzFile(Mapping[str, NDArray[Any]]):
+ zip: zipfile.ZipFile
+ fid: None | IO[str]
+ files: List[str]
+ allow_pickle: bool
+ pickle_kwargs: None | Mapping[str, Any]
+ # Represent `f` as a mutable property so we can access the type of `self`
+ @property
+ def f(self: _T) -> BagObj[_T]: ...
+ @f.setter
+ def f(self: _T, value: BagObj[_T]) -> None: ...
+ def __init__(
+ self,
+ fid: IO[str],
+ own_fid: bool = ...,
+ allow_pickle: bool = ...,
+ pickle_kwargs: None | Mapping[str, Any] = ...,
+ ) -> None: ...
+ def __enter__(self: _T) -> _T: ...
+ def __exit__(
+ self,
+ exc_type: None | Type[BaseException],
+ exc_value: None | BaseException,
+ traceback: None | types.TracebackType,
+ /,
+ ) -> None: ...
+ def close(self) -> None: ...
+ def __del__(self) -> None: ...
+ def __iter__(self) -> Iterator[str]: ...
+ def __len__(self) -> int: ...
+ def __getitem__(self, key: str) -> NDArray[Any]: ...
+
+# NOTE: Returns a `NpzFile` if file is a zip file;
+# returns an `ndarray`/`memmap` otherwise
+def load(
+ file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes],
+ mmap_mode: L[None, "r+", "r", "w+", "c"] = ...,
+ allow_pickle: bool = ...,
+ fix_imports: bool = ...,
+ encoding: L["ASCII", "latin1", "bytes"] = ...,
+) -> Any: ...
+
+def save(
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
+ arr: ArrayLike,
+ allow_pickle: bool = ...,
+ fix_imports: bool = ...,
+) -> None: ...
+
+def savez(
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
+ *args: ArrayLike,
+ **kwds: ArrayLike,
+) -> None: ...
+
+def savez_compressed(
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
+ *args: ArrayLike,
+ **kwds: ArrayLike,
+) -> None: ...
+
+# File-like objects only have to implement `__iter__` and,
+# optionally, `encoding`
+@overload
def loadtxt(
- fname,
- dtype=...,
- comments=...,
- delimiter=...,
- converters=...,
- skiprows=...,
- usecols=...,
- unpack=...,
- ndmin=...,
- encoding=...,
- max_rows=...,
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: None = ...,
+ comments: str | Sequence[str] = ...,
+ delimiter: None | str = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ skiprows: int = ...,
+ usecols: int | Sequence[int] = ...,
+ unpack: bool = ...,
+ ndmin: L[0, 1, 2] = ...,
+ encoding: None | str = ...,
+ max_rows: None | int = ...,
*,
- like=...,
-): ...
+ like: None | ArrayLike = ...
+) -> NDArray[float64]: ...
+@overload
+def loadtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: _DTypeLike[_SCT],
+ comments: str | Sequence[str] = ...,
+ delimiter: None | str = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ skiprows: int = ...,
+ usecols: int | Sequence[int] = ...,
+ unpack: bool = ...,
+ ndmin: L[0, 1, 2] = ...,
+ encoding: None | str = ...,
+ max_rows: None | int = ...,
+ *,
+ like: None | ArrayLike = ...
+) -> NDArray[_SCT]: ...
+@overload
+def loadtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: DTypeLike,
+ comments: str | Sequence[str] = ...,
+ delimiter: None | str = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ skiprows: int = ...,
+ usecols: int | Sequence[int] = ...,
+ unpack: bool = ...,
+ ndmin: L[0, 1, 2] = ...,
+ encoding: None | str = ...,
+ max_rows: None | int = ...,
+ *,
+ like: None | ArrayLike = ...
+) -> NDArray[Any]: ...
+
def savetxt(
- fname,
- X,
- fmt=...,
- delimiter=...,
- newline=...,
- header=...,
- footer=...,
- comments=...,
- encoding=...,
-): ...
-def fromregex(file, regexp, dtype, encoding=...): ...
+ fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes],
+ X: ArrayLike,
+ fmt: str | Sequence[str] = ...,
+ delimiter: str = ...,
+ newline: str = ...,
+ header: str = ...,
+ footer: str = ...,
+ comments: str = ...,
+ encoding: None | str = ...,
+) -> None: ...
+
+@overload
+def fromregex(
+ file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
+ regexp: str | bytes | Pattern[Any],
+ dtype: _DTypeLike[_SCT],
+ encoding: None | str = ...
+) -> NDArray[_SCT]: ...
+@overload
+def fromregex(
+ file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
+ regexp: str | bytes | Pattern[Any],
+ dtype: DTypeLike,
+ encoding: None | str = ...
+) -> NDArray[Any]: ...
+
+# TODO: Sort out arguments
+@overload
def genfromtxt(
- fname,
- dtype=...,
- comments=...,
- delimiter=...,
- skip_header=...,
- skip_footer=...,
- converters=...,
- missing_values=...,
- filling_values=...,
- usecols=...,
- names=...,
- excludelist=...,
- deletechars=...,
- replace_space=...,
- autostrip=...,
- case_sensitive=...,
- defaultfmt=...,
- unpack=...,
- usemask=...,
- loose=...,
- invalid_raise=...,
- max_rows=...,
- encoding=...,
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: None = ...,
+ *args: Any,
+ **kwargs: Any,
+) -> NDArray[float64]: ...
+@overload
+def genfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: _DTypeLike[_SCT],
+ *args: Any,
+ **kwargs: Any,
+) -> NDArray[_SCT]: ...
+@overload
+def genfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: DTypeLike,
+ *args: Any,
+ **kwargs: Any,
+) -> NDArray[Any]: ...
+
+@overload
+def recfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ *,
+ usemask: L[False] = ...,
+ **kwargs: Any,
+) -> recarray[Any, dtype[void]]: ...
+@overload
+def recfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ *,
+ usemask: L[True],
+ **kwargs: Any,
+) -> MaskedRecords[Any, dtype[void]]: ...
+
+@overload
+def recfromcsv(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ *,
+ usemask: L[False] = ...,
+ **kwargs: Any,
+) -> recarray[Any, dtype[void]]: ...
+@overload
+def recfromcsv(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
- like=...,
-): ...
-def recfromtxt(fname, **kwargs): ...
-def recfromcsv(fname, **kwargs): ...
-
-# NOTE: Deprecated
-# def ndfromtxt(fname, **kwargs): ...
-# def mafromtxt(fname, **kwargs): ...
+ usemask: L[True],
+ **kwargs: Any,
+) -> MaskedRecords[Any, dtype[void]]: ...
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 23021cafa..c40e50a57 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -152,9 +152,8 @@ def poly(seq_of_zeros):
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
- for k in range(len(seq_of_zeros)):
- a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
- mode='full')
+ for zero in seq_of_zeros:
+ a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
@@ -770,8 +769,8 @@ def polyval(p, x):
else:
x = NX.asanyarray(x)
y = NX.zeros_like(x)
- for i in range(len(p)):
- y = y * x + p[i]
+ for pv in p:
+ y = y * x + pv
return y
@@ -1273,14 +1272,14 @@ class poly1d:
s = s[:-5]
return s
- for k in range(len(coeffs)):
- if not iscomplex(coeffs[k]):
- coefstr = fmt_float(real(coeffs[k]))
- elif real(coeffs[k]) == 0:
- coefstr = '%sj' % fmt_float(imag(coeffs[k]))
+ for k, coeff in enumerate(coeffs):
+ if not iscomplex(coeff):
+ coefstr = fmt_float(real(coeff))
+ elif real(coeff) == 0:
+ coefstr = '%sj' % fmt_float(imag(coeff))
else:
- coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
- fmt_float(imag(coeffs[k])))
+ coefstr = '(%s + %sj)' % (fmt_float(real(coeff)),
+ fmt_float(imag(coeff)))
power = (N-k)
if power == 0:
diff --git a/numpy/lib/shape_base.pyi b/numpy/lib/shape_base.pyi
index cfb3040b7..1598dc36c 100644
--- a/numpy/lib/shape_base.pyi
+++ b/numpy/lib/shape_base.pyi
@@ -1,5 +1,4 @@
-from typing import List, TypeVar, Callable, Sequence, Any, overload, Tuple
-from typing_extensions import SupportsIndex, Protocol
+from typing import List, TypeVar, Callable, Sequence, Any, overload, Tuple, SupportsIndex, Protocol
from numpy import (
generic,
@@ -39,15 +38,17 @@ _ArrayLike = _NestedSequence[_SupportsDType[dtype[_SCT]]]
class _ArrayWrap(Protocol):
def __call__(
self,
- __array: NDArray[Any],
- __context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ array: NDArray[Any],
+ context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ /,
) -> Any: ...
class _ArrayPrepare(Protocol):
def __call__(
self,
- __array: NDArray[Any],
- __context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ array: NDArray[Any],
+ context: None | Tuple[ufunc, Tuple[Any, ...], int] = ...,
+ /,
) -> Any: ...
class _SupportsArrayWrap(Protocol):
diff --git a/numpy/lib/stride_tricks.pyi b/numpy/lib/stride_tricks.pyi
index d2e744b5a..bafc46e9c 100644
--- a/numpy/lib/stride_tricks.pyi
+++ b/numpy/lib/stride_tricks.pyi
@@ -1,16 +1,81 @@
-from typing import Any, List
+from typing import Any, List, Dict, Iterable, TypeVar, overload, SupportsIndex
-from numpy.typing import _ShapeLike, _Shape
+from numpy import dtype, generic
+from numpy.typing import (
+ NDArray,
+ ArrayLike,
+ _ShapeLike,
+ _Shape,
+ _NestedSequence,
+ _SupportsArray,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]]
__all__: List[str]
class DummyArray:
- __array_interface__: Any
- base: Any
- def __init__(self, interface, base=...): ...
+ __array_interface__: Dict[str, Any]
+ base: None | NDArray[Any]
+ def __init__(
+ self,
+ interface: Dict[str, Any],
+ base: None | NDArray[Any] = ...,
+ ) -> None: ...
+
+@overload
+def as_strided(
+ x: _ArrayLike[_SCT],
+ shape: None | Iterable[int] = ...,
+ strides: None | Iterable[int] = ...,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def as_strided(
+ x: ArrayLike,
+ shape: None | Iterable[int] = ...,
+ strides: None | Iterable[int] = ...,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def sliding_window_view(
+ x: _ArrayLike[_SCT],
+ window_shape: int | Iterable[int],
+ axis: None | SupportsIndex = ...,
+ *,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def sliding_window_view(
+ x: ArrayLike,
+ window_shape: int | Iterable[int],
+ axis: None | SupportsIndex = ...,
+ *,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def broadcast_to(
+ array: _ArrayLike[_SCT],
+ shape: int | Iterable[int],
+ subok: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def broadcast_to(
+ array: ArrayLike,
+ shape: int | Iterable[int],
+ subok: bool = ...,
+) -> NDArray[Any]: ...
-def as_strided(x, shape=..., strides=..., subok=..., writeable=...): ...
-def sliding_window_view(x, window_shape, axis=..., *, subok=..., writeable=...): ...
-def broadcast_to(array, shape, subok=...): ...
def broadcast_shapes(*args: _ShapeLike) -> _Shape: ...
-def broadcast_arrays(*args, subok=...): ...
+
+def broadcast_arrays(
+ *args: ArrayLike,
+ subok: bool = ...,
+) -> List[NDArray[Any]]: ...
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index 1ed7815d9..2738d41c4 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -102,10 +102,10 @@ class TestDataSourceOpen:
def test_InvalidHTTP(self):
url = invalid_httpurl()
- assert_raises(IOError, self.ds.open, url)
+ assert_raises(OSError, self.ds.open, url)
try:
self.ds.open(url)
- except IOError as e:
+ except OSError as e:
# Regression test for bug fixed in r4342.
assert_(e.errno is None)
@@ -120,7 +120,7 @@ class TestDataSourceOpen:
def test_InvalidFile(self):
invalid_file = invalid_textfile(self.tmpdir)
- assert_raises(IOError, self.ds.open, invalid_file)
+ assert_raises(OSError, self.ds.open, invalid_file)
def test_ValidGzipFile(self):
try:
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index e1b615223..829691b1c 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1528,6 +1528,21 @@ class TestVectorize:
([('x',)], [('y',), ()]))
assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'),
([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
+
+ # Tests to check if whitespaces are ignored
+ assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'),
+ ([('x', 'y')], [()]))
+ assert_equal(nfb._parse_gufunc_signature('(x),( y) ->()'),
+ ([('x',), ('y',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('( x)-> (y ) '),
+ ([('x',)], [('y',)]))
+ assert_equal(nfb._parse_gufunc_signature(' (x)->( y),( )'),
+ ([('x',)], [('y',), ()]))
+ assert_equal(nfb._parse_gufunc_signature(
+ '( ), ( a, b,c ) ,( d) -> (d , e)'),
+ ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
+
with assert_raises(ValueError):
nfb._parse_gufunc_signature('(x)(y)->()')
with assert_raises(ValueError):
@@ -2757,11 +2772,6 @@ class TestInterp:
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
-def compare_results(res, desired):
- for i in range(len(desired)):
- assert_array_equal(res[i], desired[i])
-
-
class TestPercentile:
def test_basic(self):
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index d97ad76df..11f2b7d4d 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -1229,9 +1229,11 @@ class Testfromregex:
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
- def test_record_unicode(self):
+ @pytest.mark.parametrize("path_type", [str, Path])
+ def test_record_unicode(self, path_type):
utf8 = b'\xcf\x96'
- with temppath() as path:
+ with temppath() as str_path:
+ path = path_type(str_path)
with open(path, 'wb') as f:
f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
@@ -2503,28 +2505,6 @@ class TestPathUsage:
data = np.genfromtxt(path)
assert_array_equal(a, data)
- def test_ndfromtxt(self):
- # Test outputting a standard ndarray
- with temppath(suffix='.txt') as path:
- path = Path(path)
- with path.open('w') as f:
- f.write(u'1 2\n3 4')
-
- control = np.array([[1, 2], [3, 4]], dtype=int)
- test = np.genfromtxt(path, dtype=int)
- assert_array_equal(test, control)
-
- def test_mafromtxt(self):
- # From `test_fancy_dtype_alt` above
- with temppath(suffix='.txt') as path:
- path = Path(path)
- with path.open('w') as f:
- f.write(u'1,2,3.0\n4,5,6.0\n')
-
- test = np.genfromtxt(path, delimiter=',', usemask=True)
- control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
- assert_equal(test, control)
-
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index fb7ba7874..a148e53da 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -392,7 +392,7 @@ class TestArraySplit:
assert_(a.dtype.type is res[-1].dtype.type)
# Same thing for manual splits:
- res = array_split(a, [0, 1, 2], axis=0)
+ res = array_split(a, [0, 1], axis=0)
tgt = [np.zeros((0, 10)), np.array([np.arange(10)]),
np.array([np.arange(10)])]
compare_results(res, tgt)
@@ -713,5 +713,9 @@ class TestMayShareMemory:
# Utility
def compare_results(res, desired):
- for i in range(len(desired)):
- assert_array_equal(res[i], desired[i])
+ """Compare lists of arrays."""
+ if len(res) != len(desired):
+ raise ValueError("Iterables have different lengths")
+ # See also PEP 618 for Python 3.10
+ for x, y in zip(res, desired):
+ assert_array_equal(x, y)
diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py
index 8a877ae69..72c91836f 100644
--- a/numpy/lib/tests/test_utils.py
+++ b/numpy/lib/tests/test_utils.py
@@ -11,6 +11,10 @@ from io import StringIO
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
+@pytest.mark.skipif(
+ sys.version_info == (3, 10, 0, "candidate", 1),
+ reason="Broken as of bpo-44524",
+)
def test_lookfor():
out = StringIO()
utils.lookfor('eigenvalue', module='numpy', output=out,
@@ -160,7 +164,7 @@ def test_info_method_heading():
class WithPublicMethods:
def first_method():
pass
-
+
def _has_method_heading(cls):
out = StringIO()
utils.info(cls, output=out)
diff --git a/numpy/lib/type_check.pyi b/numpy/lib/type_check.pyi
index fbe325858..5eb0e62d2 100644
--- a/numpy/lib/type_check.pyi
+++ b/numpy/lib/type_check.pyi
@@ -1,5 +1,5 @@
-import sys
from typing import (
+ Literal as L,
Any,
Container,
Iterable,
@@ -7,6 +7,7 @@ from typing import (
overload,
Type,
TypeVar,
+ Protocol,
)
from numpy import (
@@ -32,11 +33,6 @@ from numpy.typing import (
_DTypeLikeComplex,
)
-if sys.version_info >= (3, 8):
- from typing import Protocol, Literal as L
-else:
- from typing_extensions import Protocol, Literal as L
-
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
_SCT = TypeVar("_SCT", bound=generic)
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index b1a916d4a..1f2cb66fa 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -351,8 +351,7 @@ def who(vardict=None):
maxshape = 0
maxbyte = 0
totalbytes = 0
- for k in range(len(sta)):
- val = sta[k]
+ for val in sta:
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
@@ -369,8 +368,7 @@ def who(vardict=None):
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print(prval + "\n" + "="*(len(prval)+5) + "\n")
- for k in range(len(sta)):
- val = sta[k]
+ for val in sta:
print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
diff --git a/numpy/lib/utils.pyi b/numpy/lib/utils.pyi
index 0518655c6..f0a8797ad 100644
--- a/numpy/lib/utils.pyi
+++ b/numpy/lib/utils.pyi
@@ -1,4 +1,3 @@
-import sys
from ast import AST
from typing import (
Any,
@@ -11,6 +10,7 @@ from typing import (
Tuple,
TypeVar,
Union,
+ Protocol,
)
from numpy import ndarray, generic
@@ -21,17 +21,12 @@ from numpy.core.numerictypes import (
issubsctype as issubsctype,
)
-if sys.version_info >= (3, 8):
- from typing import Protocol
-else:
- from typing_extensions import Protocol
-
_T_contra = TypeVar("_T_contra", contravariant=True)
_FuncType = TypeVar("_FuncType", bound=Callable[..., Any])
# A file-like object opened in `w` mode
class _SupportsWrite(Protocol[_T_contra]):
- def write(self, __s: _T_contra) -> Any: ...
+ def write(self, s: _T_contra, /) -> Any: ...
__all__: List[str]
@@ -60,7 +55,8 @@ def deprecate(
) -> _Deprecate: ...
@overload
def deprecate(
- __func: _FuncType,
+ func: _FuncType,
+ /,
old_name: Optional[str] = ...,
new_name: Optional[str] = ...,
message: Optional[str] = ...,
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index d2150919f..b2ac383a2 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -3374,8 +3374,12 @@ class MaskedArray(ndarray):
_mask[indx] = mval
elif not self._hardmask:
# Set the data, then the mask
- _data[indx] = dval
- _mask[indx] = mval
+ if (isinstance(indx, masked_array) and
+ not isinstance(value, masked_array)):
+ _data[indx.data] = dval
+ else:
+ _data[indx] = dval
+ _mask[indx] = mval
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
_data[indx] = dval
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index 6814931b0..10b1b209c 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -658,8 +658,8 @@ def openfile(fname):
# Try to open the file and guess its type
try:
f = open(fname)
- except IOError as e:
- raise IOError(f"No such file: '{fname}'") from e
+ except FileNotFoundError as e:
+ raise FileNotFoundError(f"No such file: '{fname}'") from e
if f.readline()[:2] != "\\x":
f.seek(0, 0)
return f
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index ab003b94e..2b3034f9c 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -697,6 +697,22 @@ class TestMa:
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
+ def test_assignment_by_condition(self):
+ # Test for gh-18951
+ a = array([1, 2, 3, 4], mask=[1, 0, 1, 0])
+ c = a >= 3
+ a[c] = 5
+ assert_(a[2] is masked)
+
+ def test_assignment_by_condition_2(self):
+ # gh-19721
+ a = masked_array([0, 1], mask=[False, False])
+ b = masked_array([0, 1], mask=[True, True])
+ mask = a < 1
+ b[mask] = a[mask]
+ expected_mask = [False, True]
+ assert_equal(b.mask, expected_mask)
+
class TestUfuncs:
def setup(self):
diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi
index 14dc55131..64b683d7c 100644
--- a/numpy/random/_generator.pyi
+++ b/numpy/random/_generator.pyi
@@ -1,5 +1,4 @@
-import sys
-from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload, TypeVar
+from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload, TypeVar, Literal
from numpy import (
bool_,
@@ -44,11 +43,6 @@ from numpy.typing import (
_UIntCodes,
)
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
_DTypeLikeFloat32 = Union[
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index e2430d139..60b6bfc72 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -561,7 +561,7 @@ cdef class Generator:
raise TypeError('Unsupported dtype %r for integers' % _dtype)
- if size is None and dtype in (bool, int, np.compat.long):
+ if size is None and dtype in (bool, int):
if np.array(ret).shape == ():
return dtype(ret)
return ret
diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi
index 1b8bacdae..820f27392 100644
--- a/numpy/random/_mt19937.pyi
+++ b/numpy/random/_mt19937.pyi
@@ -1,15 +1,9 @@
-import sys
-from typing import Any, Union
+from typing import Any, Union, TypedDict
from numpy import dtype, ndarray, uint32
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy.typing import _ArrayLikeInt_co
-if sys.version_info >= (3, 8):
- from typing import TypedDict
-else:
- from typing_extensions import TypedDict
-
class _MT19937Internal(TypedDict):
key: ndarray[Any, dtype[uint32]]
pos: int
diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi
index 25e2fdde6..4881a987e 100644
--- a/numpy/random/_pcg64.pyi
+++ b/numpy/random/_pcg64.pyi
@@ -1,14 +1,8 @@
-import sys
-from typing import Union
+from typing import Union, TypedDict
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy.typing import _ArrayLikeInt_co
-if sys.version_info >= (3, 8):
- from typing import TypedDict
-else:
- from typing_extensions import TypedDict
-
class _PCG64Internal(TypedDict):
state: int
inc: int
diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi
index f6a5b9b9b..dd1c5e6e9 100644
--- a/numpy/random/_philox.pyi
+++ b/numpy/random/_philox.pyi
@@ -1,15 +1,9 @@
-import sys
-from typing import Any, Union
+from typing import Any, Union, TypedDict
from numpy import dtype, ndarray, uint64
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy.typing import _ArrayLikeInt_co
-if sys.version_info >= (3, 8):
- from typing import TypedDict
-else:
- from typing_extensions import TypedDict
-
class _PhiloxInternal(TypedDict):
counter: ndarray[Any, dtype[uint64]]
key: ndarray[Any, dtype[uint64]]
diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi
index 72a271c92..94d11a210 100644
--- a/numpy/random/_sfc64.pyi
+++ b/numpy/random/_sfc64.pyi
@@ -1,5 +1,4 @@
-import sys
-from typing import Any, Union
+from typing import Any, Union, TypedDict
from numpy import dtype as dtype
from numpy import ndarray as ndarray
@@ -7,11 +6,6 @@ from numpy import uint64
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy.typing import _ArrayLikeInt_co
-if sys.version_info >= (3, 8):
- from typing import TypedDict
-else:
- from typing_extensions import TypedDict
-
class _SFC64Internal(TypedDict):
state: ndarray[Any, dtype[uint64]]
diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi
index 5b68dde6c..fa2f1ab12 100644
--- a/numpy/random/bit_generator.pyi
+++ b/numpy/random/bit_generator.pyi
@@ -1,5 +1,4 @@
import abc
-import sys
from threading import Lock
from typing import (
Any,
@@ -16,16 +15,12 @@ from typing import (
TypeVar,
Union,
overload,
+ Literal,
)
from numpy import dtype, ndarray, uint32, uint64
from numpy.typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt32Codes, _UInt64Codes
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_T = TypeVar("_T")
_DTypeLikeUint32 = Union[
diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi
index 3137b0a95..cbe87a299 100644
--- a/numpy/random/mtrand.pyi
+++ b/numpy/random/mtrand.pyi
@@ -1,5 +1,4 @@
-import sys
-from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload
+from typing import Any, Callable, Dict, Optional, Tuple, Type, Union, overload, Literal
from numpy import (
bool_,
@@ -44,11 +43,6 @@ from numpy.typing import (
_UIntCodes,
)
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
_DTypeLikeFloat32 = Union[
dtype[float32],
_SupportsDType[dtype[float32]],
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index 4f5862faa..c9d8ee8e3 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -763,7 +763,7 @@ cdef class RandomState:
else:
raise TypeError('Unsupported dtype %r for randint' % _dtype)
- if size is None and dtype in (bool, int, np.compat.long):
+ if size is None and dtype in (bool, int):
if np.array(ret).shape == ():
return dtype(ret)
return ret
diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py
index 9f6dcdc6b..88d2792a6 100644
--- a/numpy/random/tests/test_generator_mt19937_regressions.py
+++ b/numpy/random/tests/test_generator_mt19937_regressions.py
@@ -32,11 +32,11 @@ class TestRegression:
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
- freq = np.sum(rvsn == 1) / float(N)
+ freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
- freq = np.sum(rvsn == 2) / float(N)
+ freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py
index 0bf361e5e..595fb5fd3 100644
--- a/numpy/random/tests/test_randomstate_regression.py
+++ b/numpy/random/tests/test_randomstate_regression.py
@@ -43,11 +43,11 @@ class TestRegression:
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
- freq = np.sum(rvsn == 1) / float(N)
+ freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
- freq = np.sum(rvsn == 2) / float(N)
+ freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py
index 54d5a3efb..8bf419875 100644
--- a/numpy/random/tests/test_regression.py
+++ b/numpy/random/tests/test_regression.py
@@ -39,11 +39,11 @@ class TestRegression:
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
- freq = np.sum(rvsn == 1) / float(N)
+ freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
- freq = np.sum(rvsn == 2) / float(N)
+ freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
diff --git a/numpy/setup.py b/numpy/setup.py
index cbf633504..a0ca99919 100644
--- a/numpy/setup.py
+++ b/numpy/setup.py
@@ -4,6 +4,7 @@ def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('numpy', parent_package, top_path)
+ config.add_subpackage('array_api')
config.add_subpackage('compat')
config.add_subpackage('core')
config.add_subpackage('distutils')
diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi
index 29915309f..26ce52e40 100644
--- a/numpy/testing/_private/utils.pyi
+++ b/numpy/testing/_private/utils.pyi
@@ -6,6 +6,7 @@ import warnings
import unittest
import contextlib
from typing import (
+ Literal as L,
Any,
AnyStr,
Callable,
@@ -23,6 +24,8 @@ from typing import (
type_check_only,
TypeVar,
Union,
+ Final,
+ SupportsIndex,
)
from numpy import generic, dtype, number, object_, bool_, _FloatValue
@@ -40,11 +43,6 @@ from unittest.case import (
SkipTest as SkipTest,
)
-if sys.version_info >= (3, 8):
- from typing import Final, SupportsIndex, Literal as L
-else:
- from typing_extensions import Final, SupportsIndex, Literal as L
-
_T = TypeVar("_T")
_ET = TypeVar("_ET", bound=BaseException)
_FT = TypeVar("_FT", bound=Callable[..., Any])
@@ -261,8 +259,9 @@ def raises(*args: Type[BaseException]) -> Callable[[_FT], _FT]: ...
@overload
def assert_raises( # type: ignore
- __expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...],
- __callable: Callable[..., Any],
+ expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...],
+ callable: Callable[..., Any],
+ /,
*args: Any,
**kwargs: Any,
) -> None: ...
@@ -275,9 +274,10 @@ def assert_raises(
@overload
def assert_raises_regex(
- __expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...],
- __expected_regex: str | bytes | Pattern[Any],
- __callable: Callable[..., Any],
+ expected_exception: Type[BaseException] | Tuple[Type[BaseException], ...],
+ expected_regex: str | bytes | Pattern[Any],
+ callable: Callable[..., Any],
+ /,
*args: Any,
**kwargs: Any,
) -> None: ...
@@ -341,8 +341,9 @@ def assert_warns(
) -> contextlib._GeneratorContextManager[None]: ...
@overload
def assert_warns(
- __warning_class: Type[Warning],
- __func: Callable[..., _T],
+ warning_class: Type[Warning],
+ func: Callable[..., _T],
+ /,
*args: Any,
**kwargs: Any,
) -> _T: ...
@@ -351,7 +352,8 @@ def assert_warns(
def assert_no_warnings() -> contextlib._GeneratorContextManager[None]: ...
@overload
def assert_no_warnings(
- __func: Callable[..., _T],
+ func: Callable[..., _T],
+ /,
*args: Any,
**kwargs: Any,
) -> _T: ...
@@ -388,7 +390,8 @@ def temppath(
def assert_no_gc_cycles() -> contextlib._GeneratorContextManager[None]: ...
@overload
def assert_no_gc_cycles(
- __func: Callable[..., Any],
+ func: Callable[..., Any],
+ /,
*args: Any,
**kwargs: Any,
) -> None: ...
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index 6e4a8dee0..5b1578500 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -45,8 +45,6 @@ def test_numpy_namespace():
'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose',
'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap',
'get_include': 'numpy.lib.utils.get_include',
- 'mafromtxt': 'numpy.lib.npyio.mafromtxt',
- 'ndfromtxt': 'numpy.lib.npyio.ndfromtxt',
'recfromcsv': 'numpy.lib.npyio.recfromcsv',
'recfromtxt': 'numpy.lib.npyio.recfromtxt',
'safe_eval': 'numpy.lib.utils.safe_eval',
@@ -54,22 +52,8 @@ def test_numpy_namespace():
'show_config': 'numpy.__config__.show',
'who': 'numpy.lib.utils.who',
}
- if sys.version_info < (3, 7):
- # These built-in types are re-exported by numpy.
- builtins = {
- 'bool': 'builtins.bool',
- 'complex': 'builtins.complex',
- 'float': 'builtins.float',
- 'int': 'builtins.int',
- 'long': 'builtins.int',
- 'object': 'builtins.object',
- 'str': 'builtins.str',
- 'unicode': 'builtins.str',
- }
- allowlist = dict(undocumented, **builtins)
- else:
- # after 3.7, we override dir to not show these members
- allowlist = undocumented
+ # We override dir to not show these members
+ allowlist = undocumented
bad_results = check_dir(np)
# pytest gives better error messages with the builtin assert than with
# assert_equal
@@ -137,6 +121,7 @@ def test_NPY_NO_EXPORT():
# current status is fine. For others it may make sense to work on making them
# private, to clean up our public API and avoid confusion.
PUBLIC_MODULES = ['numpy.' + s for s in [
+ "array_api",
"ctypeslib",
"distutils",
"distutils.cpuinfo",
diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py
index d731f00ef..d60ddb5bb 100644
--- a/numpy/typing/__init__.py
+++ b/numpy/typing/__init__.py
@@ -5,13 +5,6 @@ Typing (:mod:`numpy.typing`)
.. versionadded:: 1.20
-.. warning::
-
- Some of the types in this module rely on features only present in
- the standard library in Python 3.8 and greater. If you want to use
- these types in earlier versions of Python, you should install the
- typing-extensions_ package.
-
Large parts of the NumPy API have PEP-484-style type annotations. In
addition a number of type aliases are available to users, most prominently
the two below:
@@ -143,24 +136,7 @@ API
# NOTE: The API section will be appended with additional entries
# further down in this file
-from typing import TYPE_CHECKING, List, Any
-
-if TYPE_CHECKING:
- # typing_extensions is always available when type-checking
- from typing_extensions import Literal as L
- _HAS_TYPING_EXTENSIONS: L[True]
-else:
- try:
- import typing_extensions
- except ImportError:
- _HAS_TYPING_EXTENSIONS = False
- else:
- _HAS_TYPING_EXTENSIONS = True
-
-if TYPE_CHECKING:
- from typing_extensions import final
-else:
- def final(f): return f
+from typing import TYPE_CHECKING, List, Any, final
if not TYPE_CHECKING:
__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"]
diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py
index c562f3c1f..6ea0eb662 100644
--- a/numpy/typing/_array_like.py
+++ b/numpy/typing/_array_like.py
@@ -1,7 +1,6 @@
from __future__ import annotations
-import sys
-from typing import Any, Sequence, TYPE_CHECKING, Union, TypeVar, Generic
+from typing import Any, Sequence, Protocol, Union, TypeVar
from numpy import (
ndarray,
dtype,
@@ -19,28 +18,19 @@ from numpy import (
str_,
bytes_,
)
-from . import _HAS_TYPING_EXTENSIONS
-
-if sys.version_info >= (3, 8):
- from typing import Protocol
-elif _HAS_TYPING_EXTENSIONS:
- from typing_extensions import Protocol
_T = TypeVar("_T")
_ScalarType = TypeVar("_ScalarType", bound=generic)
_DType = TypeVar("_DType", bound="dtype[Any]")
_DType_co = TypeVar("_DType_co", covariant=True, bound="dtype[Any]")
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
- # The `_SupportsArray` protocol only cares about the default dtype
- # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned
- # array.
- # Concrete implementations of the protocol are responsible for adding
- # any and all remaining overloads
- class _SupportsArray(Protocol[_DType_co]):
- def __array__(self) -> ndarray[Any, _DType_co]: ...
-else:
- class _SupportsArray(Generic[_DType_co]): ...
+# The `_SupportsArray` protocol only cares about the default dtype
+# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned
+# array.
+# Concrete implementations of the protocol are responsible for adding
+# any and all remaining overloads
+class _SupportsArray(Protocol[_DType_co]):
+ def __array__(self) -> ndarray[Any, _DType_co]: ...
# TODO: Wait for support for recursive types
_NestedSequence = Union[
diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py
index 8f911da3b..44ad5c291 100644
--- a/numpy/typing/_callable.py
+++ b/numpy/typing/_callable.py
@@ -10,7 +10,6 @@ See the `Mypy documentation`_ on protocols for more details.
from __future__ import annotations
-import sys
from typing import (
Union,
TypeVar,
@@ -18,7 +17,7 @@ from typing import (
Any,
Tuple,
NoReturn,
- TYPE_CHECKING,
+ Protocol,
)
from numpy import (
@@ -45,312 +44,282 @@ from ._scalars import (
_FloatLike_co,
_NumberLike_co,
)
-from . import NBitBase, _HAS_TYPING_EXTENSIONS
+from . import NBitBase
from ._generic_alias import NDArray
-if sys.version_info >= (3, 8):
- from typing import Protocol
-elif _HAS_TYPING_EXTENSIONS:
- from typing_extensions import Protocol
-
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
- _T1 = TypeVar("_T1")
- _T2 = TypeVar("_T2")
- _2Tuple = Tuple[_T1, _T1]
-
- _NBit1 = TypeVar("_NBit1", bound=NBitBase)
- _NBit2 = TypeVar("_NBit2", bound=NBitBase)
-
- _IntType = TypeVar("_IntType", bound=integer)
- _FloatType = TypeVar("_FloatType", bound=floating)
- _NumberType = TypeVar("_NumberType", bound=number)
- _NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
- _GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
-
- class _BoolOp(Protocol[_GenericType_co]):
- @overload
- def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ...
- @overload # platform dependent
- def __call__(self, __other: int) -> int_: ...
- @overload
- def __call__(self, __other: float) -> float64: ...
- @overload
- def __call__(self, __other: complex) -> complex128: ...
- @overload
- def __call__(self, __other: _NumberType) -> _NumberType: ...
-
- class _BoolBitOp(Protocol[_GenericType_co]):
- @overload
- def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ...
- @overload # platform dependent
- def __call__(self, __other: int) -> int_: ...
- @overload
- def __call__(self, __other: _IntType) -> _IntType: ...
-
- class _BoolSub(Protocol):
- # Note that `__other: bool_` is absent here
- @overload
- def __call__(self, __other: bool) -> NoReturn: ...
- @overload # platform dependent
- def __call__(self, __other: int) -> int_: ...
- @overload
- def __call__(self, __other: float) -> float64: ...
- @overload
- def __call__(self, __other: complex) -> complex128: ...
- @overload
- def __call__(self, __other: _NumberType) -> _NumberType: ...
-
- class _BoolTrueDiv(Protocol):
- @overload
- def __call__(self, __other: float | _IntLike_co) -> float64: ...
- @overload
- def __call__(self, __other: complex) -> complex128: ...
- @overload
- def __call__(self, __other: _NumberType) -> _NumberType: ...
-
- class _BoolMod(Protocol):
- @overload
- def __call__(self, __other: _BoolLike_co) -> int8: ...
- @overload # platform dependent
- def __call__(self, __other: int) -> int_: ...
- @overload
- def __call__(self, __other: float) -> float64: ...
- @overload
- def __call__(self, __other: _IntType) -> _IntType: ...
- @overload
- def __call__(self, __other: _FloatType) -> _FloatType: ...
-
- class _BoolDivMod(Protocol):
- @overload
- def __call__(self, __other: _BoolLike_co) -> _2Tuple[int8]: ...
- @overload # platform dependent
- def __call__(self, __other: int) -> _2Tuple[int_]: ...
- @overload
- def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
- @overload
- def __call__(self, __other: _IntType) -> _2Tuple[_IntType]: ...
- @overload
- def __call__(self, __other: _FloatType) -> _2Tuple[_FloatType]: ...
-
- class _TD64Div(Protocol[_NumberType_co]):
- @overload
- def __call__(self, __other: timedelta64) -> _NumberType_co: ...
- @overload
- def __call__(self, __other: _BoolLike_co) -> NoReturn: ...
- @overload
- def __call__(self, __other: _FloatLike_co) -> timedelta64: ...
-
- class _IntTrueDiv(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> floating[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> floating[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: complex
- ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
- @overload
- def __call__(self, __other: integer[_NBit2]) -> floating[_NBit1 | _NBit2]: ...
-
- class _UnsignedIntOp(Protocol[_NBit1]):
- # NOTE: `uint64 + signedinteger -> float64`
- @overload
- def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
- @overload
- def __call__(
- self, __other: int | signedinteger[Any]
- ) -> Any: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: complex
- ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: unsignedinteger[_NBit2]
- ) -> unsignedinteger[_NBit1 | _NBit2]: ...
-
- class _UnsignedIntBitOp(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> signedinteger[Any]: ...
- @overload
- def __call__(self, __other: signedinteger[Any]) -> signedinteger[Any]: ...
- @overload
- def __call__(
- self, __other: unsignedinteger[_NBit2]
- ) -> unsignedinteger[_NBit1 | _NBit2]: ...
-
- class _UnsignedIntMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
- @overload
- def __call__(
- self, __other: int | signedinteger[Any]
- ) -> Any: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: unsignedinteger[_NBit2]
- ) -> unsignedinteger[_NBit1 | _NBit2]: ...
-
- class _UnsignedIntDivMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ...
- @overload
- def __call__(
- self, __other: int | signedinteger[Any]
- ) -> _2Tuple[Any]: ...
- @overload
- def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
- @overload
- def __call__(
- self, __other: unsignedinteger[_NBit2]
- ) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ...
-
- class _SignedIntOp(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> signedinteger[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: complex
- ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: signedinteger[_NBit2]
- ) -> signedinteger[_NBit1 | _NBit2]: ...
-
- class _SignedIntBitOp(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> signedinteger[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(
- self, __other: signedinteger[_NBit2]
- ) -> signedinteger[_NBit1 | _NBit2]: ...
-
- class _SignedIntMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> signedinteger[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: signedinteger[_NBit2]
- ) -> signedinteger[_NBit1 | _NBit2]: ...
-
- class _SignedIntDivMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ...
- @overload
- def __call__(self, __other: int) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ...
- @overload
- def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
- @overload
- def __call__(
- self, __other: signedinteger[_NBit2]
- ) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ...
-
- class _FloatOp(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> floating[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> floating[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: complex
- ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: integer[_NBit2] | floating[_NBit2]
- ) -> floating[_NBit1 | _NBit2]: ...
-
- class _FloatMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> floating[_NBit1]: ...
- @overload
- def __call__(self, __other: int) -> floating[_NBit1 | _NBitInt]: ...
- @overload
- def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self, __other: integer[_NBit2] | floating[_NBit2]
- ) -> floating[_NBit1 | _NBit2]: ...
-
- class _FloatDivMod(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> _2Tuple[floating[_NBit1]]: ...
- @overload
- def __call__(self, __other: int) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ...
- @overload
- def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
- @overload
- def __call__(
- self, __other: integer[_NBit2] | floating[_NBit2]
- ) -> _2Tuple[floating[_NBit1 | _NBit2]]: ...
-
- class _ComplexOp(Protocol[_NBit1]):
- @overload
- def __call__(self, __other: bool) -> complexfloating[_NBit1, _NBit1]: ...
- @overload
- def __call__(self, __other: int) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ...
- @overload
- def __call__(
- self, __other: complex
- ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
- @overload
- def __call__(
- self,
- __other: Union[
- integer[_NBit2],
- floating[_NBit2],
- complexfloating[_NBit2, _NBit2],
- ]
- ) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ...
-
- class _NumberOp(Protocol):
- def __call__(self, __other: _NumberLike_co) -> Any: ...
-
- class _ComparisonOp(Protocol[_T1, _T2]):
- @overload
- def __call__(self, __other: _T1) -> bool_: ...
- @overload
- def __call__(self, __other: _T2) -> NDArray[bool_]: ...
-
-else:
- _BoolOp = Any
- _BoolBitOp = Any
- _BoolSub = Any
- _BoolTrueDiv = Any
- _BoolMod = Any
- _BoolDivMod = Any
- _TD64Div = Any
- _IntTrueDiv = Any
- _UnsignedIntOp = Any
- _UnsignedIntBitOp = Any
- _UnsignedIntMod = Any
- _UnsignedIntDivMod = Any
- _SignedIntOp = Any
- _SignedIntBitOp = Any
- _SignedIntMod = Any
- _SignedIntDivMod = Any
- _FloatOp = Any
- _FloatMod = Any
- _FloatDivMod = Any
- _ComplexOp = Any
- _NumberOp = Any
- _ComparisonOp = Any
+_T1 = TypeVar("_T1")
+_T2 = TypeVar("_T2")
+_2Tuple = Tuple[_T1, _T1]
+
+_NBit1 = TypeVar("_NBit1", bound=NBitBase)
+_NBit2 = TypeVar("_NBit2", bound=NBitBase)
+
+_IntType = TypeVar("_IntType", bound=integer)
+_FloatType = TypeVar("_FloatType", bound=floating)
+_NumberType = TypeVar("_NumberType", bound=number)
+_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
+_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
+
+class _BoolOp(Protocol[_GenericType_co]):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: float, /) -> float64: ...
+ @overload
+ def __call__(self, other: complex, /) -> complex128: ...
+ @overload
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
+
+class _BoolBitOp(Protocol[_GenericType_co]):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: _IntType, /) -> _IntType: ...
+
+class _BoolSub(Protocol):
+ # Note that `other: bool_` is absent here
+ @overload
+ def __call__(self, other: bool, /) -> NoReturn: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: float, /) -> float64: ...
+ @overload
+ def __call__(self, other: complex, /) -> complex128: ...
+ @overload
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
+
+class _BoolTrueDiv(Protocol):
+ @overload
+ def __call__(self, other: float | _IntLike_co, /) -> float64: ...
+ @overload
+ def __call__(self, other: complex, /) -> complex128: ...
+ @overload
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
+
+class _BoolMod(Protocol):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> int8: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: float, /) -> float64: ...
+ @overload
+ def __call__(self, other: _IntType, /) -> _IntType: ...
+ @overload
+ def __call__(self, other: _FloatType, /) -> _FloatType: ...
+
+class _BoolDivMod(Protocol):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> _2Tuple[int_]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ...
+ @overload
+ def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ...
+
+class _TD64Div(Protocol[_NumberType_co]):
+ @overload
+ def __call__(self, other: timedelta64, /) -> _NumberType_co: ...
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> NoReturn: ...
+ @overload
+ def __call__(self, other: _FloatLike_co, /) -> timedelta64: ...
+
+class _IntTrueDiv(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(self, other: integer[_NBit2], /) -> floating[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntOp(Protocol[_NBit1]):
+ # NOTE: `uint64 + signedinteger -> float64`
+ @overload
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
+ @overload
+ def __call__(
+ self, other: int | signedinteger[Any], /
+ ) -> Any: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntBitOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[Any]: ...
+ @overload
+ def __call__(self, other: signedinteger[Any], /) -> signedinteger[Any]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
+ @overload
+ def __call__(
+ self, other: int | signedinteger[Any], /
+ ) -> Any: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntDivMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
+ @overload
+ def __call__(
+ self, other: int | signedinteger[Any], /
+ ) -> _2Tuple[Any]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ...
+
+class _SignedIntOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
+
+class _SignedIntBitOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
+
+class _SignedIntMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
+
+class _SignedIntDivMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
+ @overload
+ def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ...
+
+class _FloatOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: integer[_NBit2] | floating[_NBit2], /
+ ) -> floating[_NBit1 | _NBit2]: ...
+
+class _FloatMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: integer[_NBit2] | floating[_NBit2], /
+ ) -> floating[_NBit1 | _NBit2]: ...
+
+class _FloatDivMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ...
+ @overload
+ def __call__(self, other: int, /) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(
+ self, other: integer[_NBit2] | floating[_NBit2], /
+ ) -> _2Tuple[floating[_NBit1 | _NBit2]]: ...
+
+class _ComplexOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> complexfloating[_NBit1, _NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self,
+ other: Union[
+ integer[_NBit2],
+ floating[_NBit2],
+ complexfloating[_NBit2, _NBit2],
+ ], /,
+ ) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ...
+
+class _NumberOp(Protocol):
+ def __call__(self, other: _NumberLike_co, /) -> Any: ...
+
+class _ComparisonOp(Protocol[_T1, _T2]):
+ @overload
+ def __call__(self, other: _T1, /) -> bool_: ...
+ @overload
+ def __call__(self, other: _T2, /) -> NDArray[bool_]: ...
diff --git a/numpy/typing/_char_codes.py b/numpy/typing/_char_codes.py
index 22ee168e9..139471084 100644
--- a/numpy/typing/_char_codes.py
+++ b/numpy/typing/_char_codes.py
@@ -1,171 +1,111 @@
-import sys
-from typing import Any, TYPE_CHECKING
-
-from . import _HAS_TYPING_EXTENSIONS
-
-if sys.version_info >= (3, 8):
- from typing import Literal
-elif _HAS_TYPING_EXTENSIONS:
- from typing_extensions import Literal
-
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
- _BoolCodes = Literal["?", "=?", "<?", ">?", "bool", "bool_", "bool8"]
-
- _UInt8Codes = Literal["uint8", "u1", "=u1", "<u1", ">u1"]
- _UInt16Codes = Literal["uint16", "u2", "=u2", "<u2", ">u2"]
- _UInt32Codes = Literal["uint32", "u4", "=u4", "<u4", ">u4"]
- _UInt64Codes = Literal["uint64", "u8", "=u8", "<u8", ">u8"]
-
- _Int8Codes = Literal["int8", "i1", "=i1", "<i1", ">i1"]
- _Int16Codes = Literal["int16", "i2", "=i2", "<i2", ">i2"]
- _Int32Codes = Literal["int32", "i4", "=i4", "<i4", ">i4"]
- _Int64Codes = Literal["int64", "i8", "=i8", "<i8", ">i8"]
-
- _Float16Codes = Literal["float16", "f2", "=f2", "<f2", ">f2"]
- _Float32Codes = Literal["float32", "f4", "=f4", "<f4", ">f4"]
- _Float64Codes = Literal["float64", "f8", "=f8", "<f8", ">f8"]
-
- _Complex64Codes = Literal["complex64", "c8", "=c8", "<c8", ">c8"]
- _Complex128Codes = Literal["complex128", "c16", "=c16", "<c16", ">c16"]
-
- _ByteCodes = Literal["byte", "b", "=b", "<b", ">b"]
- _ShortCodes = Literal["short", "h", "=h", "<h", ">h"]
- _IntCCodes = Literal["intc", "i", "=i", "<i", ">i"]
- _IntPCodes = Literal["intp", "int0", "p", "=p", "<p", ">p"]
- _IntCodes = Literal["long", "int", "int_", "l", "=l", "<l", ">l"]
- _LongLongCodes = Literal["longlong", "q", "=q", "<q", ">q"]
-
- _UByteCodes = Literal["ubyte", "B", "=B", "<B", ">B"]
- _UShortCodes = Literal["ushort", "H", "=H", "<H", ">H"]
- _UIntCCodes = Literal["uintc", "I", "=I", "<I", ">I"]
- _UIntPCodes = Literal["uintp", "uint0", "P", "=P", "<P", ">P"]
- _UIntCodes = Literal["uint", "L", "=L", "<L", ">L"]
- _ULongLongCodes = Literal["ulonglong", "Q", "=Q", "<Q", ">Q"]
-
- _HalfCodes = Literal["half", "e", "=e", "<e", ">e"]
- _SingleCodes = Literal["single", "f", "=f", "<f", ">f"]
- _DoubleCodes = Literal["double", "float", "float_", "d", "=d", "<d", ">d"]
- _LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "<g", ">g"]
-
- _CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "<F", ">F"]
- _CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "<D", ">D"]
- _CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "<G", ">G"]
-
- _StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "<U", ">U"]
- _BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "<S", ">S"]
- _VoidCodes = Literal["void", "void0", "V", "=V", "<V", ">V"]
- _ObjectCodes = Literal["object", "object_", "O", "=O", "<O", ">O"]
-
- _DT64Codes = Literal[
- "datetime64", "=datetime64", "<datetime64", ">datetime64",
- "datetime64[Y]", "=datetime64[Y]", "<datetime64[Y]", ">datetime64[Y]",
- "datetime64[M]", "=datetime64[M]", "<datetime64[M]", ">datetime64[M]",
- "datetime64[W]", "=datetime64[W]", "<datetime64[W]", ">datetime64[W]",
- "datetime64[D]", "=datetime64[D]", "<datetime64[D]", ">datetime64[D]",
- "datetime64[h]", "=datetime64[h]", "<datetime64[h]", ">datetime64[h]",
- "datetime64[m]", "=datetime64[m]", "<datetime64[m]", ">datetime64[m]",
- "datetime64[s]", "=datetime64[s]", "<datetime64[s]", ">datetime64[s]",
- "datetime64[ms]", "=datetime64[ms]", "<datetime64[ms]", ">datetime64[ms]",
- "datetime64[us]", "=datetime64[us]", "<datetime64[us]", ">datetime64[us]",
- "datetime64[ns]", "=datetime64[ns]", "<datetime64[ns]", ">datetime64[ns]",
- "datetime64[ps]", "=datetime64[ps]", "<datetime64[ps]", ">datetime64[ps]",
- "datetime64[fs]", "=datetime64[fs]", "<datetime64[fs]", ">datetime64[fs]",
- "datetime64[as]", "=datetime64[as]", "<datetime64[as]", ">datetime64[as]",
- "M", "=M", "<M", ">M",
- "M8", "=M8", "<M8", ">M8",
- "M8[Y]", "=M8[Y]", "<M8[Y]", ">M8[Y]",
- "M8[M]", "=M8[M]", "<M8[M]", ">M8[M]",
- "M8[W]", "=M8[W]", "<M8[W]", ">M8[W]",
- "M8[D]", "=M8[D]", "<M8[D]", ">M8[D]",
- "M8[h]", "=M8[h]", "<M8[h]", ">M8[h]",
- "M8[m]", "=M8[m]", "<M8[m]", ">M8[m]",
- "M8[s]", "=M8[s]", "<M8[s]", ">M8[s]",
- "M8[ms]", "=M8[ms]", "<M8[ms]", ">M8[ms]",
- "M8[us]", "=M8[us]", "<M8[us]", ">M8[us]",
- "M8[ns]", "=M8[ns]", "<M8[ns]", ">M8[ns]",
- "M8[ps]", "=M8[ps]", "<M8[ps]", ">M8[ps]",
- "M8[fs]", "=M8[fs]", "<M8[fs]", ">M8[fs]",
- "M8[as]", "=M8[as]", "<M8[as]", ">M8[as]",
- ]
- _TD64Codes = Literal[
- "timedelta64", "=timedelta64", "<timedelta64", ">timedelta64",
- "timedelta64[Y]", "=timedelta64[Y]", "<timedelta64[Y]", ">timedelta64[Y]",
- "timedelta64[M]", "=timedelta64[M]", "<timedelta64[M]", ">timedelta64[M]",
- "timedelta64[W]", "=timedelta64[W]", "<timedelta64[W]", ">timedelta64[W]",
- "timedelta64[D]", "=timedelta64[D]", "<timedelta64[D]", ">timedelta64[D]",
- "timedelta64[h]", "=timedelta64[h]", "<timedelta64[h]", ">timedelta64[h]",
- "timedelta64[m]", "=timedelta64[m]", "<timedelta64[m]", ">timedelta64[m]",
- "timedelta64[s]", "=timedelta64[s]", "<timedelta64[s]", ">timedelta64[s]",
- "timedelta64[ms]", "=timedelta64[ms]", "<timedelta64[ms]", ">timedelta64[ms]",
- "timedelta64[us]", "=timedelta64[us]", "<timedelta64[us]", ">timedelta64[us]",
- "timedelta64[ns]", "=timedelta64[ns]", "<timedelta64[ns]", ">timedelta64[ns]",
- "timedelta64[ps]", "=timedelta64[ps]", "<timedelta64[ps]", ">timedelta64[ps]",
- "timedelta64[fs]", "=timedelta64[fs]", "<timedelta64[fs]", ">timedelta64[fs]",
- "timedelta64[as]", "=timedelta64[as]", "<timedelta64[as]", ">timedelta64[as]",
- "m", "=m", "<m", ">m",
- "m8", "=m8", "<m8", ">m8",
- "m8[Y]", "=m8[Y]", "<m8[Y]", ">m8[Y]",
- "m8[M]", "=m8[M]", "<m8[M]", ">m8[M]",
- "m8[W]", "=m8[W]", "<m8[W]", ">m8[W]",
- "m8[D]", "=m8[D]", "<m8[D]", ">m8[D]",
- "m8[h]", "=m8[h]", "<m8[h]", ">m8[h]",
- "m8[m]", "=m8[m]", "<m8[m]", ">m8[m]",
- "m8[s]", "=m8[s]", "<m8[s]", ">m8[s]",
- "m8[ms]", "=m8[ms]", "<m8[ms]", ">m8[ms]",
- "m8[us]", "=m8[us]", "<m8[us]", ">m8[us]",
- "m8[ns]", "=m8[ns]", "<m8[ns]", ">m8[ns]",
- "m8[ps]", "=m8[ps]", "<m8[ps]", ">m8[ps]",
- "m8[fs]", "=m8[fs]", "<m8[fs]", ">m8[fs]",
- "m8[as]", "=m8[as]", "<m8[as]", ">m8[as]",
- ]
-
-else:
- _BoolCodes = Any
-
- _UInt8Codes = Any
- _UInt16Codes = Any
- _UInt32Codes = Any
- _UInt64Codes = Any
-
- _Int8Codes = Any
- _Int16Codes = Any
- _Int32Codes = Any
- _Int64Codes = Any
-
- _Float16Codes = Any
- _Float32Codes = Any
- _Float64Codes = Any
-
- _Complex64Codes = Any
- _Complex128Codes = Any
-
- _ByteCodes = Any
- _ShortCodes = Any
- _IntCCodes = Any
- _IntPCodes = Any
- _IntCodes = Any
- _LongLongCodes = Any
-
- _UByteCodes = Any
- _UShortCodes = Any
- _UIntCCodes = Any
- _UIntPCodes = Any
- _UIntCodes = Any
- _ULongLongCodes = Any
-
- _HalfCodes = Any
- _SingleCodes = Any
- _DoubleCodes = Any
- _LongDoubleCodes = Any
-
- _CSingleCodes = Any
- _CDoubleCodes = Any
- _CLongDoubleCodes = Any
-
- _StrCodes = Any
- _BytesCodes = Any
- _VoidCodes = Any
- _ObjectCodes = Any
-
- _DT64Codes = Any
- _TD64Codes = Any
+from typing import Literal
+
+_BoolCodes = Literal["?", "=?", "<?", ">?", "bool", "bool_", "bool8"]
+
+_UInt8Codes = Literal["uint8", "u1", "=u1", "<u1", ">u1"]
+_UInt16Codes = Literal["uint16", "u2", "=u2", "<u2", ">u2"]
+_UInt32Codes = Literal["uint32", "u4", "=u4", "<u4", ">u4"]
+_UInt64Codes = Literal["uint64", "u8", "=u8", "<u8", ">u8"]
+
+_Int8Codes = Literal["int8", "i1", "=i1", "<i1", ">i1"]
+_Int16Codes = Literal["int16", "i2", "=i2", "<i2", ">i2"]
+_Int32Codes = Literal["int32", "i4", "=i4", "<i4", ">i4"]
+_Int64Codes = Literal["int64", "i8", "=i8", "<i8", ">i8"]
+
+_Float16Codes = Literal["float16", "f2", "=f2", "<f2", ">f2"]
+_Float32Codes = Literal["float32", "f4", "=f4", "<f4", ">f4"]
+_Float64Codes = Literal["float64", "f8", "=f8", "<f8", ">f8"]
+
+_Complex64Codes = Literal["complex64", "c8", "=c8", "<c8", ">c8"]
+_Complex128Codes = Literal["complex128", "c16", "=c16", "<c16", ">c16"]
+
+_ByteCodes = Literal["byte", "b", "=b", "<b", ">b"]
+_ShortCodes = Literal["short", "h", "=h", "<h", ">h"]
+_IntCCodes = Literal["intc", "i", "=i", "<i", ">i"]
+_IntPCodes = Literal["intp", "int0", "p", "=p", "<p", ">p"]
+_IntCodes = Literal["long", "int", "int_", "l", "=l", "<l", ">l"]
+_LongLongCodes = Literal["longlong", "q", "=q", "<q", ">q"]
+
+_UByteCodes = Literal["ubyte", "B", "=B", "<B", ">B"]
+_UShortCodes = Literal["ushort", "H", "=H", "<H", ">H"]
+_UIntCCodes = Literal["uintc", "I", "=I", "<I", ">I"]
+_UIntPCodes = Literal["uintp", "uint0", "P", "=P", "<P", ">P"]
+_UIntCodes = Literal["uint", "L", "=L", "<L", ">L"]
+_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "<Q", ">Q"]
+
+_HalfCodes = Literal["half", "e", "=e", "<e", ">e"]
+_SingleCodes = Literal["single", "f", "=f", "<f", ">f"]
+_DoubleCodes = Literal["double", "float", "float_", "d", "=d", "<d", ">d"]
+_LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "<g", ">g"]
+
+_CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "<F", ">F"]
+_CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "<D", ">D"]
+_CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "<G", ">G"]
+
+_StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "<U", ">U"]
+_BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "<S", ">S"]
+_VoidCodes = Literal["void", "void0", "V", "=V", "<V", ">V"]
+_ObjectCodes = Literal["object", "object_", "O", "=O", "<O", ">O"]
+
+_DT64Codes = Literal[
+ "datetime64", "=datetime64", "<datetime64", ">datetime64",
+ "datetime64[Y]", "=datetime64[Y]", "<datetime64[Y]", ">datetime64[Y]",
+ "datetime64[M]", "=datetime64[M]", "<datetime64[M]", ">datetime64[M]",
+ "datetime64[W]", "=datetime64[W]", "<datetime64[W]", ">datetime64[W]",
+ "datetime64[D]", "=datetime64[D]", "<datetime64[D]", ">datetime64[D]",
+ "datetime64[h]", "=datetime64[h]", "<datetime64[h]", ">datetime64[h]",
+ "datetime64[m]", "=datetime64[m]", "<datetime64[m]", ">datetime64[m]",
+ "datetime64[s]", "=datetime64[s]", "<datetime64[s]", ">datetime64[s]",
+ "datetime64[ms]", "=datetime64[ms]", "<datetime64[ms]", ">datetime64[ms]",
+ "datetime64[us]", "=datetime64[us]", "<datetime64[us]", ">datetime64[us]",
+ "datetime64[ns]", "=datetime64[ns]", "<datetime64[ns]", ">datetime64[ns]",
+ "datetime64[ps]", "=datetime64[ps]", "<datetime64[ps]", ">datetime64[ps]",
+ "datetime64[fs]", "=datetime64[fs]", "<datetime64[fs]", ">datetime64[fs]",
+ "datetime64[as]", "=datetime64[as]", "<datetime64[as]", ">datetime64[as]",
+ "M", "=M", "<M", ">M",
+ "M8", "=M8", "<M8", ">M8",
+ "M8[Y]", "=M8[Y]", "<M8[Y]", ">M8[Y]",
+ "M8[M]", "=M8[M]", "<M8[M]", ">M8[M]",
+ "M8[W]", "=M8[W]", "<M8[W]", ">M8[W]",
+ "M8[D]", "=M8[D]", "<M8[D]", ">M8[D]",
+ "M8[h]", "=M8[h]", "<M8[h]", ">M8[h]",
+ "M8[m]", "=M8[m]", "<M8[m]", ">M8[m]",
+ "M8[s]", "=M8[s]", "<M8[s]", ">M8[s]",
+ "M8[ms]", "=M8[ms]", "<M8[ms]", ">M8[ms]",
+ "M8[us]", "=M8[us]", "<M8[us]", ">M8[us]",
+ "M8[ns]", "=M8[ns]", "<M8[ns]", ">M8[ns]",
+ "M8[ps]", "=M8[ps]", "<M8[ps]", ">M8[ps]",
+ "M8[fs]", "=M8[fs]", "<M8[fs]", ">M8[fs]",
+ "M8[as]", "=M8[as]", "<M8[as]", ">M8[as]",
+]
+_TD64Codes = Literal[
+ "timedelta64", "=timedelta64", "<timedelta64", ">timedelta64",
+ "timedelta64[Y]", "=timedelta64[Y]", "<timedelta64[Y]", ">timedelta64[Y]",
+ "timedelta64[M]", "=timedelta64[M]", "<timedelta64[M]", ">timedelta64[M]",
+ "timedelta64[W]", "=timedelta64[W]", "<timedelta64[W]", ">timedelta64[W]",
+ "timedelta64[D]", "=timedelta64[D]", "<timedelta64[D]", ">timedelta64[D]",
+ "timedelta64[h]", "=timedelta64[h]", "<timedelta64[h]", ">timedelta64[h]",
+ "timedelta64[m]", "=timedelta64[m]", "<timedelta64[m]", ">timedelta64[m]",
+ "timedelta64[s]", "=timedelta64[s]", "<timedelta64[s]", ">timedelta64[s]",
+ "timedelta64[ms]", "=timedelta64[ms]", "<timedelta64[ms]", ">timedelta64[ms]",
+ "timedelta64[us]", "=timedelta64[us]", "<timedelta64[us]", ">timedelta64[us]",
+ "timedelta64[ns]", "=timedelta64[ns]", "<timedelta64[ns]", ">timedelta64[ns]",
+ "timedelta64[ps]", "=timedelta64[ps]", "<timedelta64[ps]", ">timedelta64[ps]",
+ "timedelta64[fs]", "=timedelta64[fs]", "<timedelta64[fs]", ">timedelta64[fs]",
+ "timedelta64[as]", "=timedelta64[as]", "<timedelta64[as]", ">timedelta64[as]",
+ "m", "=m", "<m", ">m",
+ "m8", "=m8", "<m8", ">m8",
+ "m8[Y]", "=m8[Y]", "<m8[Y]", ">m8[Y]",
+ "m8[M]", "=m8[M]", "<m8[M]", ">m8[M]",
+ "m8[W]", "=m8[W]", "<m8[W]", ">m8[W]",
+ "m8[D]", "=m8[D]", "<m8[D]", ">m8[D]",
+ "m8[h]", "=m8[h]", "<m8[h]", ">m8[h]",
+ "m8[m]", "=m8[m]", "<m8[m]", ">m8[m]",
+ "m8[s]", "=m8[s]", "<m8[s]", ">m8[s]",
+ "m8[ms]", "=m8[ms]", "<m8[ms]", ">m8[ms]",
+ "m8[us]", "=m8[us]", "<m8[us]", ">m8[us]",
+ "m8[ns]", "=m8[ns]", "<m8[ns]", ">m8[ns]",
+ "m8[ps]", "=m8[ps]", "<m8[ps]", ">m8[ps]",
+ "m8[fs]", "=m8[fs]", "<m8[fs]", ">m8[fs]",
+ "m8[as]", "=m8[as]", "<m8[as]", ">m8[as]",
+]
diff --git a/numpy/typing/_dtype_like.py b/numpy/typing/_dtype_like.py
index b2ce3adb4..0955f5b18 100644
--- a/numpy/typing/_dtype_like.py
+++ b/numpy/typing/_dtype_like.py
@@ -1,19 +1,10 @@
-import sys
-from typing import Any, List, Sequence, Tuple, Union, Type, TypeVar, TYPE_CHECKING
+from typing import Any, List, Sequence, Tuple, Union, Type, TypeVar, Protocol, TypedDict
import numpy as np
-from . import _HAS_TYPING_EXTENSIONS
from ._shape import _ShapeLike
from ._generic_alias import _DType as DType
-if sys.version_info >= (3, 8):
- from typing import Protocol, TypedDict
-elif _HAS_TYPING_EXTENSIONS:
- from typing_extensions import Protocol, TypedDict
-else:
- from ._generic_alias import _GenericAlias as GenericAlias
-
from ._char_codes import (
_BoolCodes,
_UInt8Codes,
@@ -59,30 +50,22 @@ from ._char_codes import (
_DTypeLikeNested = Any # TODO: wait for support for recursive types
_DType_co = TypeVar("_DType_co", covariant=True, bound=DType[Any])
-if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
- # Mandatory keys
- class _DTypeDictBase(TypedDict):
- names: Sequence[str]
- formats: Sequence[_DTypeLikeNested]
-
- # Mandatory + optional keys
- class _DTypeDict(_DTypeDictBase, total=False):
- offsets: Sequence[int]
- titles: Sequence[Any] # Only `str` elements are usable as indexing aliases, but all objects are legal
- itemsize: int
- aligned: bool
-
- # A protocol for anything with the dtype attribute
- class _SupportsDType(Protocol[_DType_co]):
- @property
- def dtype(self) -> _DType_co: ...
-
-else:
- _DTypeDict = Any
-
- class _SupportsDType: ...
- _SupportsDType = GenericAlias(_SupportsDType, _DType_co)
-
+# Mandatory keys
+class _DTypeDictBase(TypedDict):
+ names: Sequence[str]
+ formats: Sequence[_DTypeLikeNested]
+
+# Mandatory + optional keys
+class _DTypeDict(_DTypeDictBase, total=False):
+ offsets: Sequence[int]
+ titles: Sequence[Any] # Only `str` elements are usable as indexing aliases, but all objects are legal
+ itemsize: int
+ aligned: bool
+
+# A protocol for anything with the dtype attribute
+class _SupportsDType(Protocol[_DType_co]):
+ @property
+ def dtype(self) -> _DType_co: ...
# Would create a dtype[np.void]
_VoidDTypeLike = Union[
diff --git a/numpy/typing/_shape.py b/numpy/typing/_shape.py
index 75698f3d3..c28859b19 100644
--- a/numpy/typing/_shape.py
+++ b/numpy/typing/_shape.py
@@ -1,14 +1,4 @@
-import sys
-from typing import Sequence, Tuple, Union, Any
-
-from . import _HAS_TYPING_EXTENSIONS
-
-if sys.version_info >= (3, 8):
- from typing import SupportsIndex
-elif _HAS_TYPING_EXTENSIONS:
- from typing_extensions import SupportsIndex
-else:
- SupportsIndex = Any
+from typing import Sequence, Tuple, Union, SupportsIndex
_Shape = Tuple[int, ...]
diff --git a/numpy/typing/_ufunc.pyi b/numpy/typing/_ufunc.pyi
index be1e654c2..1be3500c1 100644
--- a/numpy/typing/_ufunc.pyi
+++ b/numpy/typing/_ufunc.pyi
@@ -14,6 +14,8 @@ from typing import (
overload,
Tuple,
TypeVar,
+ Literal,
+ SupportsIndex,
)
from numpy import ufunc, _CastingKind, _OrderKACF
@@ -24,8 +26,6 @@ from ._scalars import _ScalarLike_co
from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co
from ._dtype_like import DTypeLike
-from typing_extensions import Literal, SupportsIndex
-
_T = TypeVar("_T")
_2Tuple = Tuple[_T, _T]
_3Tuple = Tuple[_T, _T, _T]
@@ -105,8 +105,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
def at(
self,
- __a: NDArray[Any],
- __indices: _ArrayLikeInt_co,
+ a: NDArray[Any],
+ indices: _ArrayLikeInt_co,
+ /,
) -> None: ...
class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
@@ -158,9 +159,10 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
def at(
self,
- __a: NDArray[Any],
- __indices: _ArrayLikeInt_co,
- __b: ArrayLike,
+ a: NDArray[Any],
+ indices: _ArrayLikeInt_co,
+ b: ArrayLike,
+ /,
) -> None: ...
def reduce(
@@ -195,9 +197,9 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
@overload
def outer(
self,
- __A: _ScalarLike_co,
- __B: _ScalarLike_co,
- *,
+ A: _ScalarLike_co,
+ B: _ScalarLike_co,
+ /, *,
out: None = ...,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
@@ -210,9 +212,9 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]):
@overload
def outer( # type: ignore[misc]
self,
- __A: ArrayLike,
- __B: ArrayLike,
- *,
+ A: ArrayLike,
+ B: ArrayLike,
+ /, *,
out: None | NDArray[Any] | Tuple[NDArray[Any]] = ...,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
diff --git a/numpy/typing/tests/data/fail/modules.py b/numpy/typing/tests/data/fail/modules.py
index 7b9309329..59e724f22 100644
--- a/numpy/typing/tests/data/fail/modules.py
+++ b/numpy/typing/tests/data/fail/modules.py
@@ -12,7 +12,6 @@ np.math # E: Module has no attribute
# Public sub-modules that are not imported to their parent module by default;
# e.g. one must first execute `import numpy.lib.recfunctions`
np.lib.recfunctions # E: Module has no attribute
-np.ma.mrecords # E: Module has no attribute
np.__NUMPY_SETUP__ # E: Module has no attribute
np.__deprecated_attrs__ # E: Module has no attribute
diff --git a/numpy/typing/tests/data/fail/npyio.py b/numpy/typing/tests/data/fail/npyio.py
new file mode 100644
index 000000000..c91b4c9cb
--- /dev/null
+++ b/numpy/typing/tests/data/fail/npyio.py
@@ -0,0 +1,30 @@
+import pathlib
+from typing import IO
+
+import numpy.typing as npt
+import numpy as np
+
+str_path: str
+bytes_path: bytes
+pathlib_path: pathlib.Path
+str_file: IO[str]
+AR_i8: npt.NDArray[np.int64]
+
+np.load(str_file) # E: incompatible type
+
+np.save(bytes_path, AR_i8) # E: incompatible type
+np.save(str_file, AR_i8) # E: incompatible type
+
+np.savez(bytes_path, AR_i8) # E: incompatible type
+np.savez(str_file, AR_i8) # E: incompatible type
+
+np.savez_compressed(bytes_path, AR_i8) # E: incompatible type
+np.savez_compressed(str_file, AR_i8) # E: incompatible type
+
+np.loadtxt(bytes_path) # E: incompatible type
+
+np.fromregex(bytes_path, ".", np.int64) # E: No overload variant
+
+np.recfromtxt(bytes_path) # E: incompatible type
+
+np.recfromcsv(bytes_path) # E: incompatible type
diff --git a/numpy/typing/tests/data/fail/scalars.py b/numpy/typing/tests/data/fail/scalars.py
index 099418e67..94fe3f71e 100644
--- a/numpy/typing/tests/data/fail/scalars.py
+++ b/numpy/typing/tests/data/fail/scalars.py
@@ -87,7 +87,6 @@ round(c8) # E: No overload variant
c8.__getnewargs__() # E: Invalid self argument
f2.__getnewargs__() # E: Invalid self argument
-f2.is_integer() # E: Invalid self argument
f2.hex() # E: Invalid self argument
np.float16.fromhex("0x0.0p+0") # E: Invalid self argument
f2.__trunc__() # E: Invalid self argument
diff --git a/numpy/typing/tests/data/fail/stride_tricks.py b/numpy/typing/tests/data/fail/stride_tricks.py
new file mode 100644
index 000000000..f2bfba743
--- /dev/null
+++ b/numpy/typing/tests/data/fail/stride_tricks.py
@@ -0,0 +1,9 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+
+np.lib.stride_tricks.as_strided(AR_f8, shape=8) # E: No overload variant
+np.lib.stride_tricks.as_strided(AR_f8, strides=8) # E: No overload variant
+
+np.lib.stride_tricks.sliding_window_view(AR_f8, axis=(1,)) # E: No overload variant
diff --git a/numpy/typing/tests/data/reveal/arraypad.py b/numpy/typing/tests/data/reveal/arraypad.py
index ba5577ee0..03c03fb4e 100644
--- a/numpy/typing/tests/data/reveal/arraypad.py
+++ b/numpy/typing/tests/data/reveal/arraypad.py
@@ -1,5 +1,4 @@
-from typing import List, Any, Mapping, Tuple
-from typing_extensions import SupportsIndex
+from typing import List, Any, Mapping, Tuple, SupportsIndex
import numpy as np
import numpy.typing as npt
diff --git a/numpy/typing/tests/data/reveal/npyio.py b/numpy/typing/tests/data/reveal/npyio.py
new file mode 100644
index 000000000..d66201dd3
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/npyio.py
@@ -0,0 +1,91 @@
+import re
+import pathlib
+from typing import IO, List
+
+import numpy.typing as npt
+import numpy as np
+
+str_path: str
+pathlib_path: pathlib.Path
+str_file: IO[str]
+bytes_file: IO[bytes]
+
+bag_obj: np.lib.npyio.BagObj[int]
+npz_file: np.lib.npyio.NpzFile
+
+AR_i8: npt.NDArray[np.int64]
+AR_LIKE_f8: List[float]
+
+class BytesWriter:
+ def write(self, data: bytes) -> None: ...
+
+class BytesReader:
+ def read(self, n: int = ...) -> bytes: ...
+ def seek(self, offset: int, whence: int = ...) -> int: ...
+
+bytes_writer: BytesWriter
+bytes_reader: BytesReader
+
+reveal_type(bag_obj.a) # E: int
+reveal_type(bag_obj.b) # E: int
+
+reveal_type(npz_file.zip) # E: zipfile.ZipFile
+reveal_type(npz_file.fid) # E: Union[None, typing.IO[builtins.str]]
+reveal_type(npz_file.files) # E: list[builtins.str]
+reveal_type(npz_file.allow_pickle) # E: bool
+reveal_type(npz_file.pickle_kwargs) # E: Union[None, typing.Mapping[builtins.str, Any]]
+reveal_type(npz_file.f) # E: numpy.lib.npyio.BagObj[numpy.lib.npyio.NpzFile]
+reveal_type(npz_file["test"]) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(len(npz_file)) # E: int
+with npz_file as f:
+ reveal_type(f) # E: numpy.lib.npyio.NpzFile
+
+reveal_type(np.load(bytes_file)) # E: Any
+reveal_type(np.load(pathlib_path, allow_pickle=True)) # E: Any
+reveal_type(np.load(str_path, encoding="bytes")) # E: Any
+reveal_type(np.load(bytes_reader)) # E: Any
+
+reveal_type(np.save(bytes_file, AR_LIKE_f8)) # E: None
+reveal_type(np.save(pathlib_path, AR_i8, allow_pickle=True)) # E: None
+reveal_type(np.save(str_path, AR_LIKE_f8)) # E: None
+reveal_type(np.save(bytes_writer, AR_LIKE_f8)) # E: None
+
+reveal_type(np.savez(bytes_file, AR_LIKE_f8)) # E: None
+reveal_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8)) # E: None
+reveal_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8)) # E: None
+reveal_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8)) # E: None
+
+reveal_type(np.savez_compressed(bytes_file, AR_LIKE_f8)) # E: None
+reveal_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8)) # E: None
+reveal_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8)) # E: None
+reveal_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8)) # E: None
+
+reveal_type(np.loadtxt(bytes_file)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.loadtxt(pathlib_path, dtype=np.str_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(np.loadtxt(str_path, dtype=str, skiprows=2)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.loadtxt(str_file, comments="test")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.loadtxt(str_path, delimiter="\n")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.loadtxt(str_path, ndmin=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.loadtxt(["1", "2", "3"])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.fromregex(bytes_file, "test", np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.fromregex(str_file, b"test", dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8")) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(np.fromregex(pathlib_path, "test", np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.fromregex(bytes_reader, "test", np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.genfromtxt(bytes_file)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.genfromtxt(pathlib_path, dtype=np.str_)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(np.genfromtxt(str_path, dtype=str, skiprows=2)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.genfromtxt(str_file, comments="test")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.genfromtxt(str_path, delimiter="\n")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.genfromtxt(str_path, ndmin=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.genfromtxt(["1", "2", "3"], ndmin=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.recfromtxt(bytes_file)) # E: numpy.recarray[Any, numpy.dtype[numpy.void]]
+reveal_type(np.recfromtxt(pathlib_path, usemask=True)) # E: numpy.ma.mrecords.MaskedRecords[Any, numpy.dtype[numpy.void]]
+reveal_type(np.recfromtxt(["1", "2", "3"])) # E: numpy.recarray[Any, numpy.dtype[numpy.void]]
+
+reveal_type(np.recfromcsv(bytes_file)) # E: numpy.recarray[Any, numpy.dtype[numpy.void]]
+reveal_type(np.recfromcsv(pathlib_path, usemask=True)) # E: numpy.ma.mrecords.MaskedRecords[Any, numpy.dtype[numpy.void]]
+reveal_type(np.recfromcsv(["1", "2", "3"])) # E: numpy.recarray[Any, numpy.dtype[numpy.void]]
diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py
index c36813004..e83d579e9 100644
--- a/numpy/typing/tests/data/reveal/scalars.py
+++ b/numpy/typing/tests/data/reveal/scalars.py
@@ -156,3 +156,5 @@ reveal_type(round(f8, 3)) # E: {float64}
if sys.version_info >= (3, 9):
reveal_type(f8.__ceil__()) # E: int
reveal_type(f8.__floor__()) # E: int
+
+reveal_type(i8.is_integer()) # E: Literal[True]
diff --git a/numpy/typing/tests/data/reveal/stride_tricks.py b/numpy/typing/tests/data/reveal/stride_tricks.py
new file mode 100644
index 000000000..152d9cea6
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/stride_tricks.py
@@ -0,0 +1,28 @@
+from typing import List, Dict, Any
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_LIKE_f: List[float]
+interface_dict: Dict[str, Any]
+
+reveal_type(np.lib.stride_tricks.DummyArray(interface_dict)) # E: numpy.lib.stride_tricks.DummyArray
+
+reveal_type(np.lib.stride_tricks.as_strided(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.lib.stride_tricks.as_strided(AR_LIKE_f)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.lib.stride_tricks.as_strided(AR_f8, strides=(1, 5))) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.lib.stride_tricks.as_strided(AR_f8, shape=[9, 20])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.lib.stride_tricks.sliding_window_view(AR_f8, 5)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.lib.stride_tricks.sliding_window_view(AR_LIKE_f, (1, 5))) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.lib.stride_tricks.sliding_window_view(AR_f8, [9], axis=1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.broadcast_to(AR_f8, 5)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.broadcast_to(AR_LIKE_f, (1, 5))) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.broadcast_to(AR_f8, [4, 6], subok=True)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+
+reveal_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2))) # E: tuple[builtins.int]
+reveal_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7))) # E: tuple[builtins.int]
+
+reveal_type(np.broadcast_arrays(AR_f8, AR_f8)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]]
+reveal_type(np.broadcast_arrays(AR_f8, AR_LIKE_f)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]]
diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py
index e82b08ac2..151b06bed 100644
--- a/numpy/typing/tests/test_runtime.py
+++ b/numpy/typing/tests/test_runtime.py
@@ -3,18 +3,12 @@
from __future__ import annotations
import sys
-from typing import get_type_hints, Union, Tuple, NamedTuple
+from typing import get_type_hints, Union, Tuple, NamedTuple, get_args, get_origin
import pytest
import numpy as np
import numpy.typing as npt
-try:
- from typing_extensions import get_args, get_origin
- SKIP = False
-except ImportError:
- SKIP = True
-
class TypeTup(NamedTuple):
typ: type
@@ -36,7 +30,6 @@ TYPES = {
@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
-@pytest.mark.skipif(SKIP, reason="requires typing-extensions")
def test_get_args(name: type, tup: TypeTup) -> None:
"""Test `typing.get_args`."""
typ, ref = tup.typ, tup.args
@@ -45,7 +38,6 @@ def test_get_args(name: type, tup: TypeTup) -> None:
@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
-@pytest.mark.skipif(SKIP, reason="requires typing-extensions")
def test_get_origin(name: type, tup: TypeTup) -> None:
"""Test `typing.get_origin`."""
typ, ref = tup.typ, tup.origin
diff --git a/numpy/typing/tests/test_typing_extensions.py b/numpy/typing/tests/test_typing_extensions.py
deleted file mode 100644
index f59f222fb..000000000
--- a/numpy/typing/tests/test_typing_extensions.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""Tests for the optional typing-extensions dependency."""
-
-import sys
-import textwrap
-import subprocess
-
-CODE = textwrap.dedent(r"""
- import sys
- import importlib
-
- assert "typing_extensions" not in sys.modules
- assert "numpy.typing" not in sys.modules
-
- # Importing `typing_extensions` will now raise an `ImportError`
- sys.modules["typing_extensions"] = None
- assert importlib.import_module("numpy.typing")
-""")
-
-
-def test_no_typing_extensions() -> None:
- """Import `numpy.typing` in the absence of typing-extensions.
-
- Notes
- -----
- Ideally, we'd just run the normal typing tests in an environment where
- typing-extensions is not installed, but unfortunatelly this is currently
- impossible as it is an indirect hard dependency of pytest.
-
- """
- p = subprocess.run([sys.executable, '-c', CODE], capture_output=True)
- if p.returncode:
- raise AssertionError(
- f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}"
- )
-
diff --git a/pyproject.toml b/pyproject.toml
index 14f275e97..941c8fa8c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ requires = [
"packaging==20.5; platform_machine=='arm64'", # macos M1
"setuptools<49.2.0",
"wheel==0.36.2",
- "Cython>=0.29.21,<3.0", # Note: keep in sync with tools/cythonize.py
+ "Cython>=0.29.24,<3.0", # Note: keep in sync with tools/cythonize.py
]
diff --git a/runtests.py b/runtests.py
index 452ccbc64..8ce9a639c 100755
--- a/runtests.py
+++ b/runtests.py
@@ -474,9 +474,24 @@ def build_project(args):
'--single-version-externally-managed',
'--record=' + dst_dir + 'tmp_install_log.txt']
- from distutils.sysconfig import get_python_lib
- site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
- site_dir_noarch = get_python_lib(prefix=dst_dir, plat_specific=False)
+ py_v_s = sysconfig.get_config_var('py_version_short')
+ platlibdir = getattr(sys, 'platlibdir', '') # Python3.9+
+ site_dir_template = os.path.normpath(sysconfig.get_path(
+ 'platlib', expand=False
+ ))
+ site_dir = site_dir_template.format(platbase=dst_dir,
+ py_version_short=py_v_s,
+ platlibdir=platlibdir,
+ base=dst_dir,
+ )
+ noarch_template = os.path.normpath(sysconfig.get_path(
+ 'purelib', expand=False
+ ))
+ site_dir_noarch = noarch_template.format(base=dst_dir,
+ py_version_short=py_v_s,
+ platlibdir=platlibdir,
+ )
+
# easy_install won't install to a path that Python by default cannot see
# and isn't on the PYTHONPATH. Plus, it has to exist.
if not os.path.exists(site_dir):
@@ -609,7 +624,7 @@ def asv_substitute_config(in_config, out_config, **custom_vars):
hash_line = wfd.readline().split('hash:')
if len(hash_line) > 1 and int(hash_line[1]) == vars_hash:
return True
- except IOError:
+ except OSError:
pass
custom_vars = {f'{{{k}}}':v for k, v in custom_vars.items()}
diff --git a/setup.py b/setup.py
index 826610466..245c23676 100755
--- a/setup.py
+++ b/setup.py
@@ -30,8 +30,9 @@ import re
# Python supported version checks. Keep right after stdlib imports to ensure we
# get a sensible error for older Python versions
+# This needs to be changed to 3.8 for 1.22 release, but 3.7 is needed for LGTM.
if sys.version_info[:2] < (3, 7):
- raise RuntimeError("Python version >= 3.7 required.")
+ raise RuntimeError("Python version >= 3.8 required.")
import versioneer
@@ -90,9 +91,9 @@ License :: OSI Approved :: BSD License
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
-Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
+Programming Language :: Python :: 3.10
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
@@ -405,7 +406,7 @@ def setup_package():
test_suite='pytest',
version=versioneer.get_version(),
cmdclass=cmdclass,
- python_requires='>=3.7',
+ python_requires='>=3.8',
zip_safe=False,
entry_points={
'console_scripts': f2py_cmds
diff --git a/test_requirements.txt b/test_requirements.txt
index 9f2ee2001..ee9bc9a84 100644
--- a/test_requirements.txt
+++ b/test_requirements.txt
@@ -1,7 +1,7 @@
cython==0.29.24
-wheel<0.36.3
+wheel<0.37.1
setuptools<49.2.0
-hypothesis==6.14.5
+hypothesis==6.17.3
pytest==6.2.4
pytz==2021.1
pytest-cov==2.12.1
@@ -10,6 +10,4 @@ pickle5; python_version == '3.7' and platform_python_implementation != 'PyPy'
cffi
# For testing types. Notes on the restrictions:
# - Mypy relies on C API features not present in PyPy
-# - There is no point in installing typing_extensions without mypy
mypy==0.910; platform_python_implementation != "PyPy"
-typing_extensions==3.10.0.0; platform_python_implementation != "PyPy"
diff --git a/tools/cythonize.py b/tools/cythonize.py
index 06cf54c9a..c4c25ae26 100755
--- a/tools/cythonize.py
+++ b/tools/cythonize.py
@@ -40,12 +40,6 @@ HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'numpy'
VENDOR = 'NumPy'
-# WindowsError is not defined on unix systems
-try:
- WindowsError
-except NameError:
- WindowsError = None
-
#
# Rules
#
@@ -72,7 +66,8 @@ def process_pyx(fromfile, tofile):
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
# Note: keep in sync with that in pyproject.toml
- required_version = LooseVersion('0.29.21')
+ # Update for Python 3.10
+ required_version = LooseVersion('0.29.24')
if LooseVersion(cython_version) < required_version:
cython_path = Cython.__file__
diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py
index 43a6003f4..29bf96fe2 100755
--- a/tools/swig/test/testFarray.py
+++ b/tools/swig/test/testFarray.py
@@ -28,7 +28,7 @@ class FarrayTestCase(unittest.TestCase):
def testConstructor1(self):
"Test Farray size constructor"
- self.failUnless(isinstance(self.array, Farray.Farray))
+ self.assertTrue(isinstance(self.array, Farray.Farray))
def testConstructor2(self):
"Test Farray copy constructor"
@@ -36,7 +36,7 @@ class FarrayTestCase(unittest.TestCase):
for j in range(self.ncols):
self.array[i, j] = i + j
arrayCopy = Farray.Farray(self.array)
- self.failUnless(arrayCopy == self.array)
+ self.assertTrue(arrayCopy == self.array)
def testConstructorBad1(self):
"Test Farray size constructor, negative nrows"
@@ -48,15 +48,15 @@ class FarrayTestCase(unittest.TestCase):
def testNrows(self):
"Test Farray nrows method"
- self.failUnless(self.array.nrows() == self.nrows)
+ self.assertTrue(self.array.nrows() == self.nrows)
def testNcols(self):
"Test Farray ncols method"
- self.failUnless(self.array.ncols() == self.ncols)
+ self.assertTrue(self.array.ncols() == self.ncols)
def testLen(self):
"Test Farray __len__ method"
- self.failUnless(len(self.array) == self.nrows*self.ncols)
+ self.assertTrue(len(self.array) == self.nrows*self.ncols)
def testSetGet(self):
"Test Farray __setitem__, __getitem__ methods"
@@ -67,7 +67,7 @@ class FarrayTestCase(unittest.TestCase):
self.array[i, j] = i*j
for i in range(m):
for j in range(n):
- self.failUnless(self.array[i, j] == i*j)
+ self.assertTrue(self.array[i, j] == i*j)
def testSetBad1(self):
"Test Farray __setitem__ method, negative row"
@@ -113,7 +113,7 @@ class FarrayTestCase(unittest.TestCase):
for i in range(self.nrows):
for j in range(self.ncols):
self.array[i, j] = i+j
- self.failUnless(self.array.asString() == result)
+ self.assertTrue(self.array.asString() == result)
def testStr(self):
"Test Farray __str__ method"
@@ -127,7 +127,7 @@ class FarrayTestCase(unittest.TestCase):
for i in range(self.nrows):
for j in range(self.ncols):
self.array[i, j] = i-j
- self.failUnless(str(self.array) == result)
+ self.assertTrue(str(self.array) == result)
def testView(self):
"Test Farray view method"
@@ -135,11 +135,11 @@ class FarrayTestCase(unittest.TestCase):
for j in range(self.ncols):
self.array[i, j] = i+j
a = self.array.view()
- self.failUnless(isinstance(a, np.ndarray))
- self.failUnless(a.flags.f_contiguous)
+ self.assertTrue(isinstance(a, np.ndarray))
+ self.assertTrue(a.flags.f_contiguous)
for i in range(self.nrows):
for j in range(self.ncols):
- self.failUnless(a[i, j] == i+j)
+ self.assertTrue(a[i, j] == i+j)
######################################################################