summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--BENTO_BUILD.txt22
-rw-r--r--bento.info92
-rw-r--r--bscript121
-rw-r--r--doc/release/1.11.0-notes.rst21
-rw-r--r--doc/source/dev/development_environment.rst15
-rw-r--r--doc/source/reference/arrays.dtypes.rst2
-rw-r--r--doc/source/reference/arrays.interface.rst2
-rw-r--r--numpy/add_newdocs.py4
-rw-r--r--numpy/bento.info22
-rw-r--r--numpy/core/bento.info41
-rw-r--r--numpy/core/bscript555
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c94
-rw-r--r--numpy/core/src/multiarray/mapping.c126
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c2
-rw-r--r--numpy/core/src/multiarray/number.c13
-rw-r--r--numpy/core/src/private/npy_extint128.h2
-rw-r--r--numpy/core/tests/test_deprecations.py231
-rw-r--r--numpy/core/tests/test_indexing.py243
-rw-r--r--numpy/doc/basics.py39
-rw-r--r--numpy/fft/bento.info6
-rw-r--r--numpy/fft/bscript7
-rw-r--r--numpy/lib/function_base.py50
-rw-r--r--numpy/lib/tests/test_function_base.py43
-rw-r--r--numpy/linalg/bento.info21
-rw-r--r--numpy/linalg/bscript26
-rw-r--r--numpy/random/bento.info9
-rw-r--r--numpy/random/bscript38
-rw-r--r--numpy/random/mtrand/mtrand.pyx2
-rw-r--r--numpy/setup.py1
-rw-r--r--tools/swig/numpy.i2
30 files changed, 384 insertions, 1468 deletions
diff --git a/BENTO_BUILD.txt b/BENTO_BUILD.txt
deleted file mode 100644
index ac11b0441..000000000
--- a/BENTO_BUILD.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-No-frill version:
-
- * Clone bento::
-
- git clone git://github.com/cournape/Bento.git bento-git
-
- * Bootstrap bento::
-
- cd bento-git && python bootstrap.py
-
- * Download waf >= 1.6.5 from a release (or from git)::
-
- git clone https://code.google.com/p/waf/
-
- * Build numpy with bento:
-
- export WAFDIR=ROOT_OF_WAF_CHECKOUT # WAFDIR should be such as $WAFDIR/waflib exists
- $BENTO_ROOT/bentomaker build -j 4 # 4 threads in parallel
- # or with progress bar
- $BENTO_ROOT/bentomaker build -p
- # or with verbose output
- $BENTO_ROOT/bentomaker build -v
diff --git a/bento.info b/bento.info
deleted file mode 100644
index d2adcc394..000000000
--- a/bento.info
+++ /dev/null
@@ -1,92 +0,0 @@
-Name: numpy
-Version: 1.10.0
-Summary: NumPy: array processing for numbers, strings, records, and objects.
-Url: http://www.numpy.org
-DownloadUrl: http://sourceforge.net/project/showfiles.php?group_id=1369&package_id=175103
-Description:
- NumPy is a general-purpose array-processing package designed to
- efficiently manipulate large multi-dimensional arrays of arbitrary
- records without sacrificing too much speed for small multi-dimensional
- arrays. NumPy is built on the Numeric code base and adds features
- introduced by numarray as well as an extended C-API and the ability to
- create arrays of arbitrary type which also makes NumPy suitable for
- interfacing with general-purpose data-base applications.
-
- There are also basic facilities for discrete fourier transform,
- basic linear algebra and random number generation.
-Author: Travis E. Oliphant, et.al.
-AuthorEmail: oliphant@enthought.com
-Maintainer: NumPy Developers
-MaintainerEmail: numpy-discussion@scipy.org
-License: BSD
-Platforms: Windows,Linux,Solaris,Mac OS-X,Unix
-Classifiers:
- Development Status :: 5 - Production/Stable,
- Intended Audience :: Science/Research,
- Intended Audience :: Developers,
- License :: OSI Approved,
- Programming Language :: C,
- Programming Language :: Python,
- Programming Language :: Python :: 2,
- Programming Language :: Python :: 2.6,
- Programming Language :: Python :: 2.7,
- Programming Language :: Python :: 3,
- Programming Language :: Python :: 3.2,
- Programming Language :: Python :: 3.3,
- Programming Language :: Python :: 3.4,
- Programming Language :: Python :: 3.5,
- Programming Language :: Python :: Implementation :: CPython,
- Topic :: Software Development,
- Topic :: Scientific/Engineering,
- Operating System :: Microsoft :: Windows,
- Operating System :: POSIX,
- Operating System :: Unix,
- Operating System :: MacOS
-
-ExtraSourceFiles:
- setup.py,
- numpy/core/include/numpy/*.h,
- numpy/core/include/numpy/*.in,
- numpy/core/*.ini.in,
- numpy/core/src/npymath/*.h,
- numpy/core/src/multiarray/*.c,
- numpy/core/src/multiarray/*.c.src,
- numpy/core/src/multiarray/*.h,
- numpy/core/src/private/*.h,
- numpy/core/src/private/*.h.src,
- numpy/core/src/umath/*.src,
- numpy/core/src/umath/*.h,
- numpy/core/src/umath/*.c,
- numpy/fft/*.h,
- numpy/random/mtrand/*.h
-
-DataFiles: tests
- TargetDir: $sitedir/numpy
- SourceDir: numpy
- Files:
- **/tests/*.py,
- core/tests/data/*.fits,
- core/tests/data/*.pkl,
- f2py/tests/src/array_from_pyobj/*.c,
- f2py/src/test/*.c,
- f2py/src/test/*.f,
- f2py/src/test/*.f90
-
-DataFiles: f2py-data
- TargetDir: $sitedir
- Files:
- numpy/f2py/src/fortranobject.*
-
-DataFiles: numpy-includes
- TargetDir: $sitedir
- Files:
- numpy/core/include/numpy/*.h
-
-HookFile: bscript
-MetaTemplateFiles: numpy/version.py.in, numpy/__config__.py.in
-Recurse: numpy
-UseBackends: Waf
-
-Library:
- Packages:
- numpy
diff --git a/bscript b/bscript
deleted file mode 100644
index e5d1a89c5..000000000
--- a/bscript
+++ /dev/null
@@ -1,121 +0,0 @@
-"""
-See BENTO_BUILD.txt.
-
-Caveats:
-
- - no automatic detection for BLAS/LAPACK/etc... You need to set it up
- manually for now (except on Mac OS X and Debian/Ubuntu). The upside is
- that it is extremely easy to do so
- - bento is still in flux, and some things may changes between releases.
-"""
-
-import os
-import sys
-import subprocess
-
-# Ugly but necessary hack: import numpy here so that wscript in sub directories
-# will see this numpy and not an already installed one
-import __builtin__
-__builtin__.__NUMPY_SETUP__ = True
-
-import waflib
-
-from numpy.distutils.conv_template \
- import \
- process_str as process_c_str
-
-from bento.commands import hooks
-from bento.utils.utils \
- import \
- cmd_is_runnable
-from bento.backends.waf_backend \
- import \
- WAF_TOOLDIR
-from bento.backends.waf_tools \
- import \
- blas_lapack
-
-sys.path.insert(0, os.getcwd())
-try:
- _SETUP_PY = __import__("setup")
-finally:
- sys.path.pop(0)
-
-def compute_git_revision(top_node):
- git_repo_node = top_node.find_node(".git")
- if git_repo_node and cmd_is_runnable(["git", "--version"]):
- s = subprocess.Popen(["git", "rev-parse", "HEAD"],
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=top_node.abspath())
- out = s.communicate()[0]
- return out.decode().strip()
- else:
- return ""
-
-def _register_metadata(context):
- git_revision = compute_git_revision(context.top_node)
- full_version = context.pkg.version
- if not _SETUP_PY.ISRELEASED:
- full_version += '.dev-' + git_revision[:7]
-
- context.register_metadata("git_revision", git_revision)
- context.register_metadata("is_released", _SETUP_PY.ISRELEASED)
- context.register_metadata("full_version", full_version)
-
-def _generate_cython():
- print("Cythonizing sources")
- cwd = os.path.abspath(os.path.dirname(__file__))
- p = subprocess.call([sys.executable,
- os.path.join(cwd, 'tools', 'cythonize.py'),
- 'numpy'],
- cwd=cwd)
- if p != 0:
- raise RuntimeError("Running cythonize failed!")
-
-@hooks.post_configure
-def post_configure(context):
- conf = context.waf_context
- if conf.env["CC_NAME"] == "gcc":
- conf.env.CFLAGS_PYEXT.append("-Wfatal-errors")
-
- conf.load("arch", tooldir=[WAF_TOOLDIR])
- if sys.platform == "darwin":
- conf.env["MACOSX_DEPLOYMENT_TARGET"] = "10.4"
- conf.check_cc_default_arch()
- archs = [conf.env.DEFAULT_CC_ARCH]
- conf.env.ARCH = archs
-
- blas_lapack.check_blas_lapack(context)
-
- _generate_cython()
-
-@hooks.pre_build
-def pre_build(context):
- _register_metadata(context)
-
-@hooks.pre_sdist
-def pre_sdist(context):
- _register_metadata(context)
-
-@hooks.options
-def options(global_context):
- blas_lapack.add_options(global_context)
-
-
-class CTemplateTask(waflib.Task.Task):
- color = 'BLUE'
- before = ['c']
- def run(self):
- s = self.inputs[0]
- cnt = s.read()
- writestr = process_c_str(cnt)
- o = self.outputs[0]
- o.write(writestr)
-
-@waflib.TaskGen.extension(".src")
-def c_template(self, node):
- outs = []
- outs.append(node.change_ext(""))
-
- tsk = self.create_task('CTemplateTask', node, outs)
- if "c" in self.features and not node.name[-6:] == (".h.src"):
- self.source.append(outs[0])
diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst
index 3ca77eea5..9afe6e866 100644
--- a/doc/release/1.11.0-notes.rst
+++ b/doc/release/1.11.0-notes.rst
@@ -10,6 +10,9 @@ Highlights
Dropped Support:
+* Building with Bento is no longer supported and the associated files have
+ been removed.
+
Future Changes:
@@ -17,6 +20,20 @@ Future Changes:
Compatibility notes
===================
+Deprecated to error
+~~~~~~~~~~~~~~~~~~~
+
+* Indexing with floats raises IndexError,
+ e.g., a[0, 0.0].
+* Indexing with non-integer array_like raises IndexError,
+ e.g., a['1', '2']
+* Indexing with multiple ellipsis raises IndexError,
+ e.g., a[..., ...].
+* Indexing with boolean where integer expected raises IndexError,
+ e.g., a[False:True:True].
+* Non-integers used as index values raise TypeError,
+ e.g., in reshape, take, and specifying reduce axis.
+
New Features
============
@@ -36,6 +53,10 @@ has an option to spend more effort to reduce false positives.
Improvements
============
+*np.gradient* now supports an ``axis`` argument
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``axis`` parameter was added to *np.gradient* for consistency.
+It allows to specify over which axes the gradient is calculated.
Changes
=======
diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst
index 6221353ce..b09728e03 100644
--- a/doc/source/dev/development_environment.rst
+++ b/doc/source/dev/development_environment.rst
@@ -82,19 +82,12 @@ source tree is to use::
$ python setup.py install --prefix /some/owned/folder
$ export PYTHONPATH=/some/owned/folder/lib/python3.4/site-packages
-Besides ``numpy.distutils``, NumPy supports building with `Bento`_.
-This provides (among other things) faster builds and a build log that's much
-more readable than the ``distutils`` one. Note that support is still fairly
-experimental, partly due to Bento relying on `Waf`_ which tends to have
-non-backwards-compatible API changes. Working versions of Bento and Waf are
-run on TravisCI, see ``tools/travis-test.sh``.
-
Using virtualenvs
-----------------
A frequently asked question is "How do I set up a development version of NumPy
-in parallel to a released version that I use to do my job/research?".
+in parallel to a released version that I use to do my job/research?".
One simple way to achieve this is to install the released version in
site-packages, by using a binary installer or pip for example, and set
@@ -190,15 +183,13 @@ For example to see where in the Python code you are, use ``py-list``. For more
details, see `DebuggingWithGdb`_.
Instead of plain ``gdb`` you can of course use your favourite
-alternative debugger; run it on the python binary with arguments
+alternative debugger; run it on the python binary with arguments
``runtests.py -g --python mytest.py``.
Building NumPy with a Python built with debug support (on Linux distributions
-typically packaged as ``python-dbg``) is highly recommended.
-
+typically packaged as ``python-dbg``) is highly recommended.
-.. _Bento: http://cournape.github.io/Bento/
.. _DebuggingWithGdb: https://wiki.python.org/moin/DebuggingWithGdb
diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst
index 8f14118d6..2db32e30d 100644
--- a/doc/source/reference/arrays.dtypes.rst
+++ b/doc/source/reference/arrays.dtypes.rst
@@ -230,6 +230,8 @@ Array-protocol type strings (see :ref:`arrays.interface`)
``'u'`` unsigned integer
``'f'`` floating-point
``'c'`` complex-floating point
+ ``'m'`` timedelta
+ ``'M'`` datetime
``'O'`` (Python) objects
``'S'``, ``'a'`` (byte-)string
``'U'`` Unicode
diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst
index f707c382e..db640b2d1 100644
--- a/doc/source/reference/arrays.interface.rst
+++ b/doc/source/reference/arrays.interface.rst
@@ -88,6 +88,8 @@ This approach to the interface consists of the object having an
``u`` Unsigned integer
``f`` Floating point
``c`` Complex floating point
+ ``m`` Timedelta
+ ``M`` Datetime
``O`` Object (i.e. the memory contains a pointer to :c:type:`PyObject`)
``S`` String (fixed-length sequence of char)
``U`` Unicode (fixed-length sequence of :c:type:`Py_UNICODE`)
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 607e28a28..ce5ef6d09 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -6223,7 +6223,7 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
- A character code (one of 'biufcOSUV') identifying the general kind of data.
+ A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
@@ -6231,6 +6231,8 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
u unsigned integer
f floating-point
c complex floating-point
+ m timedelta
+ M datetime
O object
S (byte-)string
U Unicode
diff --git a/numpy/bento.info b/numpy/bento.info
deleted file mode 100644
index 52b257543..000000000
--- a/numpy/bento.info
+++ /dev/null
@@ -1,22 +0,0 @@
-Recurse:
- core, fft, linalg, random
-
-Library:
- Packages:
- _build_utils,
- compat,
- core,
- core.code_generators,
- distutils,
- distutils.command,
- distutils.fcompiler,
- doc,
- f2py,
- fft,
- lib,
- linalg,
- ma,
- matrixlib,
- polynomial,
- random,
- testing
diff --git a/numpy/core/bento.info b/numpy/core/bento.info
deleted file mode 100644
index 0c335e08a..000000000
--- a/numpy/core/bento.info
+++ /dev/null
@@ -1,41 +0,0 @@
-HookFile: bscript
-
-Library:
- CompiledLibrary: lib/npymath
- Sources:
- src/npymath/_signbit.c,
- src/npymath/ieee754.c.src,
- src/npymath/npy_math.c.src,
- src/npymath/npy_math_complex.c.src,
- src/npymath/halffloat.c
- CompiledLibrary: npysort
- Sources:
- src/private/npy_partition.h.src,
- src/private/npy_binsearch.h.src,
- src/npysort/quicksort.c.src,
- src/npysort/mergesort.c.src,
- src/npysort/heapsort.c.src,
- src/npysort/selection.c.src,
- src/npysort/binsearch.c.src
- Extension: multiarray
- Sources:
- src/multiarray/multiarraymodule_onefile.c
- Extension: multiarray_tests
- Sources:
- src/multiarray/multiarray_tests.c.src,
- src/private/mem_overlap.c
- Extension: umath
- Sources:
- src/umath/umathmodule_onefile.c
- Extension: umath_tests
- Sources:
- src/umath/umath_tests.c.src
- Extension: test_rational
- Sources:
- src/umath/test_rational.c.src
- Extension: struct_ufunc_test
- Sources:
- src/umath/struct_ufunc_test.c.src
- Extension: operand_flag_tests
- Sources:
- src/umath/operand_flag_tests.c.src
diff --git a/numpy/core/bscript b/numpy/core/bscript
deleted file mode 100644
index 944c2996f..000000000
--- a/numpy/core/bscript
+++ /dev/null
@@ -1,555 +0,0 @@
-import os
-import sys
-
-from bento.commands import hooks
-
-import waflib
-import waflib.Errors
-from waflib.Task \
- import \
- Task
-waflib.Logs.verbose = 1
-
-# Importing this adds new checkers to waf configure context - I don't like this
-# way of working, should find a more explicit way to attach new functions to
-# context.
-import numpy._build_utils.waf
-
-from numpy._build_utils.apple_accelerate \
- import \
- get_sgemv_fix
-
-from code_generators.numpy_api \
- import \
- multiarray_api, ufunc_api
-from code_generators import generate_numpy_api, generate_ufunc_api, \
- generate_umath
-
-from setup_common \
- import \
- OPTIONAL_STDFUNCS_MAYBE, OPTIONAL_STDFUNCS, C99_FUNCS_EXTENDED, \
- C99_FUNCS_SINGLE, C99_COMPLEX_TYPES, C99_COMPLEX_FUNCS, \
- MANDATORY_FUNCS, C_ABI_VERSION, C_API_VERSION
-
-def make_relpath(f):
- return os.path.relpath(f, os.path.abspath(os.path.dirname(__file__)))
-
-ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
-NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
-
-NUMPYCONFIG_SYM = []
-
-# FIXME
-if ENABLE_SEPARATE_COMPILATION:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', '#define NPY_ENABLE_SEPARATE_COMPILATION 1'))
-else:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', ''))
-
-if NPY_RELAXED_STRIDES_CHECKING:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_RELAXED_STRIDES_CHECKING', '#define NPY_RELAXED_STRIDES_CHECKING 1'))
-else:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_RELAXED_STRIDES_CHECKING', ''))
-
-NUMPYCONFIG_SYM.append(('VISIBILITY_HIDDEN', '__attribute__((visibility("hidden")))'))
-
-NUMPYCONFIG_SYM.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
-NUMPYCONFIG_SYM.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
-
-global PYTHON_HAS_UNICODE_WIDE
-
-def is_npy_no_signal():
- """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
- header."""
- return sys.platform == 'win32'
-
-def define_no_smp():
- """Returns True if we should define NPY_NOSMP, False otherwise."""
- #--------------------------------
- # Checking SMP and thread options
- #--------------------------------
- # Perhaps a fancier check is in order here.
- # so that threads are only enabled if there
- # are actually multiple CPUS? -- but
- # threaded code can be nice even on a single
- # CPU so that long-calculating code doesn't
- # block.
- return 'NPY_NOSMP' in os.environ
-
-def write_numpy_config(conf):
- subst_dict = {}
- for key, value in NUMPYCONFIG_SYM:
- subst_dict["@%s@" % key] = str(value)
- node = conf.path.find_node("include/numpy/_numpyconfig.h.in")
- cnt = node.read()
- for k, v in subst_dict.items():
- cnt = cnt.replace(k, v)
- assert node is not None
- onode = conf.bldnode.make_node(node.path_from(conf.srcnode)).change_ext("")
- onode.write(cnt)
-
-def write_numpy_inifiles(conf):
- subst_dict = dict([("@sep@", os.path.sep), ("@pkgname@", "numpy.core")])
- for inifile in ["mlib.ini.in", "npymath.ini.in"]:
- node = conf.path.find_node(inifile)
- cnt = node.read()
- for k, v in subst_dict.items():
- cnt = cnt.replace(k, v)
- assert node is not None
- outfile = os.path.join("lib", "npy-pkg-config", inifile)
- onode = conf.bldnode.make_node(node.path_from(conf.srcnode).replace(
- inifile, outfile)).change_ext("")
- onode.write(cnt)
-
-def type_checks(conf):
- header_name = "Python.h"
- features = "c pyext"
- for c_type in ("int", "long", "short"):
- macro_name = "SIZEOF_%s" % numpy._build_utils.waf.sanitize_string(c_type)
- conf.check_declaration(macro_name, header_name=header_name,
- features=features)
- NUMPYCONFIG_SYM.append((macro_name, macro_name))
-
- for c_type, e_size in (("float", 4), ("double", 8), ("long double", [12, 16])):
- macro_name = "SIZEOF_%s" % numpy._build_utils.waf.sanitize_string(c_type)
- size = conf.check_type_size(c_type, header_name=header_name,
- features=features, expected_sizes=e_size)
- NUMPYCONFIG_SYM.append((macro_name, str(size)))
-
- macro_name = "SIZEOF_COMPLEX_%s" % numpy._build_utils.waf.sanitize_string(c_type)
- complex_def = "struct {%s __x; %s __y;}" % (c_type, c_type)
- size = conf.check_type_size(complex_def, header_name=header_name,
- features=features, expected_sizes=2*size)
- NUMPYCONFIG_SYM.append((macro_name, str(size)))
-
- if sys.platform != 'darwin':
- conf.check_ldouble_representation()
-
- size = conf.check_type_size("Py_intptr_t", header_name=header_name,
- expected_sizes=[4, 8], features=features)
- NUMPYCONFIG_SYM.append(('SIZEOF_%s' % numpy._build_utils.waf.sanitize_string("Py_intptr_t"),
- '%d' % size))
-
- size = conf.check_type_size("off_t", header_name=header_name,
- expected_sizes=[4, 8], features=features)
- NUMPYCONFIG_SYM.append(('SIZEOF_%s' % numpy._build_utils.waf.sanitize_string("off_t"),
- '%d' % size))
-
- # We check declaration AND type because that's how distutils does it.
- try:
- conf.check_declaration("PY_LONG_LONG", header_name=header_name,
- features=features)
- size = conf.check_type_size("PY_LONG_LONG", header_name=header_name,
- features=features, expected_sizes=[4, 8])
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_SIZEOF_LONGLONG",
- "#define NPY_SIZEOF_LONGLONG %d" % size))
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_SIZEOF_PY_LONG_LONG",
- "#define NPY_SIZEOF_PY_LONG_LONG %d" % size))
- except waflib.Errors.ConfigurationError:
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_SIZEOF_LONGLONG", ""))
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_SIZEOF_PY_LONG_LONG", ""))
-
- conf.check_declaration("CHAR_BIT", header_name=header_name, features=features)
-
- # Check whether we need our own wide character support
- global PYTHON_HAS_UNICODE_WIDE
- try:
- conf.check_declaration('Py_UNICODE_WIDE', header_name=header_name, features=features)
- PYTHON_HAS_UNICODE_WIDE = False
- except waflib.Errors.ConfigurationError:
- PYTHON_HAS_UNICODE_WIDE = True
-
- try:
- conf.check_declaration('PyOS_ascii_strtod', header_name=header_name, features=features)
- except waflib.Errors.ConfigurationError:
- try:
- conf.check_func('strtod')
- conf.define('PyOS_ascii_strtod', 'strtod')
- except waflib.Errors.ConfigurationError:
- pass
-
-def signal_smp_checks(conf):
- if is_npy_no_signal():
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_NO_SIGNAL", "#define NPY_NO_SIGNAL\n"))
- conf.define("__NPY_PRIVATE_NO_SIGNAL", 1)
- else:
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_NO_SIGNAL", ""))
-
- if define_no_smp():
- NUMPYCONFIG_SYM.append(("NPY_NO_SMP", 1))
- else:
- NUMPYCONFIG_SYM.append(("NPY_NO_SMP", 0))
-
-def check_math_runtime(conf):
- header_name = "Python.h math.h"
- features = "c cprogram pyext"
-
- mlibs = [None, "m", "cpml"]
- mathlib = os.environ.get('MATHLIB')
- if mathlib:
- mlibs.insert(0, mathlib)
-
- mlib = None
- for lib in mlibs:
- try:
- if lib is None:
- kw = {}
- else:
- kw = {"lib": lib}
- st = conf.check_functions_at_once(["exp"], uselib_store="M", **kw)
- mlib = lib or []
- break
- except waflib.Errors.ConfigurationError:
- pass
- if mlib is None:
- raise waflib.Errors.ConfigurationError("No math lib found !")
-
- # XXX: this is ugly: mathlib has nothing to do in a public header file
- NUMPYCONFIG_SYM.append(('MATHLIB', ','.join(mlib)))
-
- # FIXME: look more into those additional mandatory functions
- MANDATORY_FUNCS.extend(["pow"])
- conf.check_functions_at_once(MANDATORY_FUNCS, use="M")
-
- #mfuncs = ('expl', 'expf', 'log1p', 'expm1', 'asinh', 'atanhf', 'atanhl',
- # 'rint', 'trunc')
- #conf.check_functions_at_once(mfuncs, use="M")
-
- header_name = "Python.h math.h"
- # XXX: with MSVC compiler, one needs to have cprogram defined. Find out why.
- features = "c pyext cprogram"
- for f in OPTIONAL_STDFUNCS_MAYBE:
- try:
- conf.check_declaration("HAVE_%s" % numpy._build_utils.waf.sanitize_string(f),
- header_name=header_name,
- features=features)
- OPTIONAL_STDFUNCS.remove(f)
- except waflib.Errors.ConfigurationError:
- pass
-
- conf.check_functions_at_once(OPTIONAL_STDFUNCS,
- features=features, mandatory=False, use="M")
- conf.check_functions_at_once(C99_FUNCS_SINGLE,
- features=features, mandatory=False, use="M")
- conf.check_functions_at_once(C99_FUNCS_EXTENDED,
- features=features, mandatory=False, use="M")
- # TODO: add OPTIONAL_HEADERS, OPTIONAL_INTRINSICS and
- # OPTIONAL_GCC_ATTRIBUTES (see setup.py and gh-3766). These are
- # performance optimizations for GCC.
-
- for f in ["isnan", "isinf", "signbit", "isfinite"]:
- try:
- conf.check_declaration("HAVE_DECL_%s" % f.upper(), header_name=header_name,
- features=features)
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(),
- '#define NPY_HAVE_DECL_%s' % f.upper()))
- except waflib.Errors.ConfigurationError:
- try:
- conf.check_declaration(f, header_name=header_name, features=features)
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(),
- '#define NPY_HAVE_DECL_%s' % f.upper()))
- except waflib.Errors.ConfigurationError:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(), ''))
-
-def check_complex(conf):
- if conf.check_header("complex.h", mandatory=False):
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_USE_C99_COMPLEX',
- '#define NPY_USE_C99_COMPLEX 1'))
- for t in C99_COMPLEX_TYPES:
- try:
- conf.check_type(t, header_name='complex.h')
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_%s' % numpy._build_utils.waf.sanitize_string(t),
- '#define NPY_HAVE_%s' % numpy._build_utils.waf.sanitize_string(t)))
- except waflib.Errors.ConfigurationError:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_%s' % numpy._build_utils.waf.sanitize_string(t), ''))
-
- for prec in ["", "f", "l"]:
- flist = [f + prec for f in C99_COMPLEX_FUNCS]
- conf.check_functions_at_once(flist, use="M")
- else:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_USE_C99_COMPLEX', ''))
- for t in C99_COMPLEX_TYPES:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_%s' % numpy._build_utils.waf.sanitize_string(t), ''))
-
-def check_win32_specifics(conf):
- from numpy.distutils.misc_util import get_build_architecture
- arch = get_build_architecture()
-
- # On win32, force long double format string to be 'g', not
- # 'Lg', since the MS runtime does not support long double whose
- # size is > sizeof(double)
- if arch == "Intel" or arch == "AMD64":
- conf.define('FORCE_NO_LONG_DOUBLE_FORMATTING', 1)
-
-@hooks.post_configure
-def post_configure(context):
- conf = context.waf_context
-
- try:
- conf.check_header("endian.h")
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_ENDIAN_H',
- '#define NPY_HAVE_ENDIAN_H 1'))
- except waflib.Errors.ConfigurationError:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_ENDIAN_H', ''))
-
- try:
- conf.check_declaration('PRIdPTR', header_name='inttypes.h')
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_USE_C99_FORMATS', '#define NPY_USE_C99_FORMATS 1'))
- except waflib.Errors.ConfigurationError:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_USE_C99_FORMATS', ''))
-
- type_checks(conf)
- signal_smp_checks(conf)
- check_math_runtime(conf)
- numpy._build_utils.waf.check_inline(conf)
- check_complex(conf)
- check_win32_specifics(conf)
-
- if ENABLE_SEPARATE_COMPILATION:
- conf.define("ENABLE_SEPARATE_COMPILATION", 1)
-
- conf.env["CONFIG_HEADER_TEMPLATE"] = """\
-%(content)s
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif"""
- conf.write_config_header("config.h")
-
- write_numpy_config(conf)
-
- write_numpy_inifiles(conf)
-
- conf.env.INCLUDES = [".", "include", "include/numpy"]
-
- # FIXME: Should be handled in bento context
- conf.store()
-
-class numpy_api_generator(Task):
- vars = ["API_TUPLE"]
- color = "BLUE"
- before = ["c"]
- def run(self):
- targets = [o.path_from(self.generator.bld.srcnode) for o in self.outputs]
- generate_numpy_api.do_generate_api(targets, self.env.API_TUPLE)
- return 0
-
-class ufunc_api_generator(Task):
- vars = ["API_TUPLE"]
- color = "BLUE"
- before = ["c"]
- def run(self):
- targets = [o.path_from(self.generator.bld.srcnode) for o in self.outputs]
- generate_ufunc_api.do_generate_api(targets, self.env.API_TUPLE)
- return 0
-
-@waflib.TaskGen.feature("numpy_api_gen")
-def process_multiarray_api_generator(self):
- tsk = self.create_task("numpy_api_generator")
- if hasattr(self, "api_tuple"):
- tsk.env.API_TUPLE = self.api_tuple
- else:
- if not "API_TUPLE" in tsk.env:
- tsk.env.API_TUPLE = ()
- header = "__%s.h" % self.pattern
- source = "__%s.c" % self.pattern
- txt = self.pattern + ".txt"
- files = [header, source, txt]
- tsk.set_outputs([self.path.find_or_declare(f) for f in files])
-
- self.bld.register_outputs("numpy_gen_headers", "multiarray",
- [output for output in tsk.outputs if output.suffix() == ".h"],
- target_dir="$sitedir/numpy/core/include/numpy")
-
- return tsk
-
-@waflib.TaskGen.feature("ufunc_api_gen")
-def process_api_ufunc_generator(self):
- tsk = self.create_task("ufunc_api_generator")
- if hasattr(self, "api_tuple"):
- tsk.env.API_TUPLE = self.api_tuple
- else:
- if not "API_TUPLE" in tsk.env:
- tsk.env.API_TUPLE = ()
- header = "__%s.h" % self.pattern
- source = "__%s.c" % self.pattern
- txt = self.pattern + ".txt"
- files = [header, source, txt]
- tsk.set_outputs([self.path.find_or_declare(f) for f in files])
-
- headers = [output for output in tsk.outputs if output.suffix() == ".h"]
- self.bld.register_outputs("numpy_gen_headers", "ufunc", headers,
- target_dir="$sitedir/numpy/core/include/numpy")
- return tsk
-
-class umath_generator(Task):
- vars = ["API_TUPLE"]
- color = "BLUE"
- before = ["c"]
- ext_in = ".in"
- def run(self):
- if len(self.outputs) > 1:
- raise ValueError("Only one target (the .c file) is expected in the umath generator task")
- code = generate_umath.make_code(generate_umath.defdict, generate_umath.__file__)
- self.outputs[0].write(code)
- return 0
-
-@waflib.TaskGen.feature("umath_gen")
-def process_umath_generator(self):
- tsk = self.create_task("umath_generator")
- source = "__%s.c" % self.pattern
- tsk.set_outputs(self.path.find_or_declare(source))
- return tsk
-
-from os.path import join as pjoin
-@hooks.pre_build
-def pre_build(context):
- bld = context.waf_context
-
- context.register_category("numpy_gen_inifiles")
- inifile_mlib = context.local_node.declare(os.path.join(
- "lib", "npy-pkg-config", "mlib.ini"))
- inifile_npymath = context.local_node.declare(os.path.join(
- "lib", "npy-pkg-config", "npymath.ini"))
- context.register_outputs("numpy_gen_inifiles", "numpyconfig",
- [inifile_mlib, inifile_npymath])
-
- context.register_category("numpy_gen_headers")
-
- numpyconfig_h = context.local_node.declare(os.path.join("include", "numpy", "_numpyconfig.h"))
- context.register_outputs("numpy_gen_headers", "numpyconfig", [numpyconfig_h])
-
- context.tweak_library("lib/npymath", includes=["src/private", "src/npymath", "include"])
-
- context.tweak_library("npysort",
- includes=[".", "src/private", "src/npysort"],
- use="npymath")
-
- def builder_multiarray(extension):
- bld(name="multiarray_api",
- features="numpy_api_gen",
- api_tuple=multiarray_api,
- pattern="multiarray_api")
-
- multiarray_templates = ["src/multiarray/scalartypes.c.src",
- "src/multiarray/arraytypes.c.src",
- "src/multiarray/nditer_templ.c.src",
- "src/multiarray/lowlevel_strided_loops.c.src",
- "src/private/templ_common.h.src",
- "src/multiarray/einsum.c.src"]
- bld(target="multiarray_templates", source=multiarray_templates)
- if ENABLE_SEPARATE_COMPILATION:
- sources = [
- pjoin('src', 'multiarray', 'arrayobject.c'),
- pjoin('src', 'multiarray', 'alloc.c'),
- pjoin('src', 'multiarray', 'arraytypes.c.src'),
- pjoin('src', 'multiarray', 'array_assign.c'),
- pjoin('src', 'multiarray', 'array_assign_array.c'),
- pjoin('src', 'multiarray', 'array_assign_scalar.c'),
- pjoin('src', 'multiarray', 'buffer.c'),
- pjoin('src', 'multiarray', 'calculation.c'),
- pjoin('src', 'multiarray', 'common.c'),
- pjoin('src', 'multiarray', 'compiled_base.c'),
- pjoin('src', 'multiarray', 'conversion_utils.c'),
- pjoin('src', 'multiarray', 'convert.c'),
- pjoin('src', 'multiarray', 'convert_datatype.c'),
- pjoin('src', 'multiarray', 'ctors.c'),
- pjoin('src', 'multiarray', 'datetime.c'),
- pjoin('src', 'multiarray', 'datetime_busday.c'),
- pjoin('src', 'multiarray', 'datetime_busdaycal.c'),
- pjoin('src', 'multiarray', 'datetime_strings.c'),
- pjoin('src', 'multiarray', 'descriptor.c'),
- pjoin('src', 'multiarray', 'dtype_transfer.c'),
- pjoin('src', 'multiarray', 'einsum.c.src'),
- pjoin('src', 'multiarray', 'flagsobject.c'),
- pjoin('src', 'multiarray', 'getset.c'),
- pjoin('src', 'multiarray', 'hashdescr.c'),
- pjoin('src', 'multiarray', 'item_selection.c'),
- pjoin('src', 'multiarray', 'iterators.c'),
- pjoin('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
- pjoin('src', 'multiarray', 'mapping.c'),
- pjoin('src', 'multiarray', 'methods.c'),
- pjoin('src', 'multiarray', 'multiarraymodule.c'),
- pjoin('src', 'multiarray', 'nditer_templ.c.src'),
- pjoin('src', 'multiarray', 'nditer_api.c'),
- pjoin('src', 'multiarray', 'nditer_constr.c'),
- pjoin('src', 'multiarray', 'nditer_pywrap.c'),
- pjoin('src', 'multiarray', 'number.c'),
- pjoin('src', 'multiarray', 'numpymemoryview.c'),
- pjoin('src', 'multiarray', 'numpyos.c'),
- pjoin('src', 'multiarray', 'refcount.c'),
- pjoin('src', 'multiarray', 'scalarapi.c'),
- pjoin('src', 'multiarray', 'scalartypes.c.src'),
- pjoin('src', 'multiarray', 'sequence.c'),
- pjoin('src', 'multiarray', 'shape.c'),
- pjoin('src', 'multiarray', 'ucsnarrow.c'),
- pjoin('src', 'multiarray', 'usertypes.c'),
- pjoin('src', 'multiarray', 'vdot.c'),
- pjoin('src', 'private', 'templ_common.h.src'),
- pjoin('src', 'private', 'mem_overlap.c'),
- ]
-
- if bld.env.HAS_CBLAS:
- sources.extend([pjoin('src', 'multiarray', 'cblasfuncs.c'),
- pjoin('src', 'multiarray', 'python_xerbla.c'),
- ])
- if "Accelerate" in bld.env.FRAMEWORK_CBLAS:
- sources.extend([make_relpath(get_sgemv_fix()[0])])
- else:
- sources = extension.sources
-
- use = 'npysort npymath'
- defines = ['_FILE_OFFSET_BITS=64',
- '_LARGEFILE_SOURCE=1',
- '_LARGEFILE64_SOURCE=1']
-
- if bld.env.HAS_CBLAS:
- use += ' CBLAS'
- defines.append('HAVE_CBLAS')
-
- includes = ["src/multiarray", "src/private"]
- return context.default_builder(extension,
- includes=includes,
- source=sources,
- use=use,
- defines=defines
- )
- context.register_builder("multiarray", builder_multiarray)
-
- def build_ufunc(extension):
- bld(features="ufunc_api_gen",
- api_tuple=ufunc_api,
- pattern="ufunc_api",
- name="ufunc_api")
-
- ufunc_templates = [
- "src/umath/scalarmath.c.src",
- "src/umath/loops.h.src",
- "src/umath/loops.c.src",
- "src/umath/funcs.inc.src",
- "src/umath/simd.inc.src"]
- bld(target="ufunc_templates", source=ufunc_templates)
-
- bld(features="umath_gen",
- pattern="umath_generated",
- name="umath_gen")
-
- includes = ["src/umath", "src/multiarray", "src/private"]
- if ENABLE_SEPARATE_COMPILATION:
- sources = [
- pjoin("src", "umath", "scalarmath.c.src"),
- pjoin("src", "umath", "loops.h.src"),
- pjoin("src", "umath", "loops.c.src"),
- pjoin('src', 'umath', 'reduction.c'),
- pjoin('src', 'umath', 'ufunc_object.c'),
- pjoin('src', 'umath', 'ufunc_type_resolution.c'),
- pjoin("src", "umath", "umathmodule.c"),
- ]
- else:
- sources = extension.sources
- return context.default_builder(extension,
- includes=includes,
- source=sources,
- use="npymath")
- context.register_builder("umath", build_ufunc)
-
- context.tweak_extension("multiarray_tests", use="npymath", includes=["src/private"])
- context.tweak_extension("umath_tests", use="npymath", includes=["src/private"])
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 3b9a10da5..88064c1d6 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -777,20 +777,16 @@ PyArray_PyIntAsIntp_ErrMsg(PyObject *o, const char * msg)
#endif
PyObject *obj, *err;
- if (!o) {
+ /*
+ * Be a bit stricter and not allow bools.
+ * np.bool_ is also disallowed as Boolean arrays do not currently
+ * support index.
+ */
+ if (!o || PyBool_Check(o) || PyArray_IsScalar(o, Bool)) {
PyErr_SetString(PyExc_TypeError, msg);
return -1;
}
- /* Be a bit stricter and not allow bools, np.bool_ is handled later */
- if (PyBool_Check(o)) {
- /* 2013-04-13, 1.8 */
- if (DEPRECATE("using a boolean instead of an integer"
- " will result in an error in the future") < 0) {
- return -1;
- }
- }
-
/*
* Since it is the usual case, first check if o is an integer. This is
* an exact check, since otherwise __index__ is used.
@@ -816,84 +812,22 @@ PyArray_PyIntAsIntp_ErrMsg(PyObject *o, const char * msg)
return (npy_intp)long_value;
}
- /* Disallow numpy.bool_. Boolean arrays do not currently support index. */
- if (PyArray_IsScalar(o, Bool)) {
- /* 2013-06-09, 1.8 */
- if (DEPRECATE("using a boolean instead of an integer"
- " will result in an error in the future") < 0) {
- return -1;
- }
- }
-
/*
* The most general case. PyNumber_Index(o) covers everything
* including arrays. In principle it may be possible to replace
* the whole function by PyIndex_AsSSize_t after deprecation.
*/
obj = PyNumber_Index(o);
- if (obj) {
+ if (obj == NULL) {
+ return -1;
+ }
#if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
- long_value = PyLong_AsLongLong(obj);
+ long_value = PyLong_AsLongLong(obj);
#else
- long_value = PyLong_AsLong(obj);
+ long_value = PyLong_AsLong(obj);
#endif
- Py_DECREF(obj);
- goto finish;
- }
- else {
- /*
- * Set the TypeError like PyNumber_Index(o) would after trying
- * the general case.
- */
- PyErr_Clear();
- }
+ Py_DECREF(obj);
- /*
- * For backward compatibility check the number C-Api number protcol
- * This should be removed up the finish label after deprecation.
- */
- if (Py_TYPE(o)->tp_as_number != NULL &&
- Py_TYPE(o)->tp_as_number->nb_int != NULL) {
- obj = Py_TYPE(o)->tp_as_number->nb_int(o);
- if (obj == NULL) {
- return -1;
- }
- #if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
- long_value = PyLong_AsLongLong(obj);
- #else
- long_value = PyLong_AsLong(obj);
- #endif
- Py_DECREF(obj);
- }
-#if !defined(NPY_PY3K)
- else if (Py_TYPE(o)->tp_as_number != NULL &&
- Py_TYPE(o)->tp_as_number->nb_long != NULL) {
- obj = Py_TYPE(o)->tp_as_number->nb_long(o);
- if (obj == NULL) {
- return -1;
- }
- #if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
- long_value = PyLong_AsLongLong(obj);
- #else
- long_value = PyLong_AsLong(obj);
- #endif
- Py_DECREF(obj);
- }
-#endif
- else {
- PyErr_SetString(PyExc_TypeError, msg);
- return -1;
- }
- /* Give a deprecation warning, unless there was already an error */
- if (!error_converting(long_value)) {
- /* 2013-04-13, 1.8 */
- if (DEPRECATE("using a non-integer number instead of an integer"
- " will result in an error in the future") < 0) {
- return -1;
- }
- }
-
- finish:
if (error_converting(long_value)) {
err = PyErr_Occurred();
/* Only replace TypeError's here, which are the normal errors. */
@@ -902,9 +836,9 @@ PyArray_PyIntAsIntp_ErrMsg(PyObject *o, const char * msg)
}
return -1;
}
-
goto overflow_check; /* silence unused warning */
- overflow_check:
+
+overflow_check:
#if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
#if (NPY_SIZEOF_LONGLONG > NPY_SIZEOF_INTP)
if ((long_value < NPY_MIN_INTP) || (long_value > NPY_MAX_INTP)) {
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index b6e831498..42a12db14 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -285,35 +285,13 @@ prepare_index(PyArrayObject *self, PyObject *index,
/* Index is an ellipsis (`...`) */
if (obj == Py_Ellipsis) {
- /*
- * If there is more then one Ellipsis, it is replaced. Deprecated,
- * since it is hard to imagine anyone using two Ellipsis and
- * actually planning on all but the first being automatically
- * replaced with a slice.
- */
+ /* At most one ellipsis in an index */
if (index_type & HAS_ELLIPSIS) {
- /* 2013-04-14, 1.8 */
- if (DEPRECATE(
- "an index can only have a single Ellipsis (`...`); "
- "replace all but one with slices (`:`).") < 0) {
- goto failed_building_indices;
- }
- index_type |= HAS_SLICE;
-
- indices[curr_idx].type = HAS_SLICE;
- indices[curr_idx].object = PySlice_New(NULL, NULL, NULL);
-
- if (indices[curr_idx].object == NULL) {
- goto failed_building_indices;
- }
-
- used_ndim += 1;
- new_ndim += 1;
- curr_idx += 1;
- continue;
+ PyErr_Format(PyExc_IndexError,
+ "an index can only have a single ellipsis ('...')");
+ goto failed_building_indices;
}
index_type |= HAS_ELLIPSIS;
-
indices[curr_idx].type = HAS_ELLIPSIS;
indices[curr_idx].object = NULL;
/* number of slices it is worth, won't update if it is 0: */
@@ -415,102 +393,8 @@ prepare_index(PyArrayObject *self, PyObject *index,
goto failed_building_indices;
}
}
- /*
- * Special case to allow 0-d boolean indexing with
- * scalars. Should be removed after boolean-array
- * like as integer-array like deprecation.
- * (does not cover ufunc.at, because it does not use the
- * boolean special case, but that should not matter...)
- * Since all but strictly boolean indices are invalid,
- * there is no need for any further conversion tries.
- */
- else if (PyArray_NDIM(self) == 0) {
- arr = tmp_arr;
- }
else {
- /*
- * These Checks can be removed after deprecation, since
- * they should then be either correct already or error out
- * later just like a normal array.
- */
- if (PyArray_ISBOOL(tmp_arr)) {
- /* 2013-04-14, 1.8 */
- if (DEPRECATE_FUTUREWARNING(
- "in the future, boolean array-likes will be "
- "handled as a boolean array index") < 0) {
- Py_DECREF(tmp_arr);
- goto failed_building_indices;
- }
- if (PyArray_NDIM(tmp_arr) == 0) {
- /*
- * Need to raise an error here, since the
- * DeprecationWarning before was not triggered.
- * TODO: A `False` triggers a Deprecation *not* a
- * a FutureWarning.
- */
- PyErr_SetString(PyExc_IndexError,
- "in the future, 0-d boolean arrays will be "
- "interpreted as a valid boolean index");
- Py_DECREF(tmp_arr);
- goto failed_building_indices;
- }
- else {
- arr = tmp_arr;
- }
- }
- /*
- * Note: Down the road, the integers will be cast to intp.
- * The user has to make sure they can be safely cast.
- * If not, we might index wrong instead of an giving
- * an error.
- */
- else if (!PyArray_ISINTEGER(tmp_arr)) {
- if (PyArray_NDIM(tmp_arr) == 0) {
- /* match integer deprecation warning */
- /* 2013-09-25, 1.8 */
- if (DEPRECATE(
- "using a non-integer number instead of an "
- "integer will result in an error in the "
- "future") < 0) {
-
- /* The error message raised in the future */
- PyErr_SetString(PyExc_IndexError,
- "only integers, slices (`:`), ellipsis (`...`), "
- "numpy.newaxis (`None`) and integer or boolean "
- "arrays are valid indices");
- Py_DECREF((PyObject *)tmp_arr);
- goto failed_building_indices;
- }
- }
- else {
- /* 2013-09-25, 1.8 */
- if (DEPRECATE(
- "non integer (and non boolean) array-likes "
- "will not be accepted as indices in the "
- "future") < 0) {
-
- /* Error message to be raised in the future */
- PyErr_SetString(PyExc_IndexError,
- "non integer (and non boolean) array-likes will "
- "not be accepted as indices in the future");
- Py_DECREF((PyObject *)tmp_arr);
- goto failed_building_indices;
- }
- }
- }
-
- arr = (PyArrayObject *)PyArray_FromArray(tmp_arr,
- PyArray_DescrFromType(NPY_INTP),
- NPY_ARRAY_FORCECAST);
-
- if (arr == NULL) {
- /* Since this will be removed, handle this later */
- PyErr_Clear();
- arr = tmp_arr;
- }
- else {
- Py_DECREF((PyObject *)tmp_arr);
- }
+ arr = tmp_arr;
}
}
else {
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index e72c355dc..0cf6a7cbb 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -2119,8 +2119,6 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
}
fp = npy_PyFile_Dup2(file, "rb", &orig_pos);
if (fp == NULL) {
- PyErr_SetString(PyExc_IOError,
- "first argument must be an open file");
Py_DECREF(file);
return NULL;
}
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 953a84eef..fec015a30 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -1025,18 +1025,11 @@ _array_copy_nice(PyArrayObject *self)
static PyObject *
array_index(PyArrayObject *v)
{
- if (!PyArray_ISINTEGER(v) || PyArray_SIZE(v) != 1) {
- PyErr_SetString(PyExc_TypeError, "only integer arrays with " \
- "one element can be converted to an index");
+ if (!PyArray_ISINTEGER(v) || PyArray_NDIM(v) != 0) {
+ PyErr_SetString(PyExc_TypeError,
+ "only integer scalar arrays can be converted to a scalar index");
return NULL;
}
- if (PyArray_NDIM(v) != 0) {
- /* 2013-04-20, 1.8 */
- if (DEPRECATE("converting an array with ndim > 0 to an index"
- " will result in an error in the future") < 0) {
- return NULL;
- }
- }
return PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v);
}
diff --git a/numpy/core/src/private/npy_extint128.h b/numpy/core/src/private/npy_extint128.h
index 6a35e736f..a887ff317 100644
--- a/numpy/core/src/private/npy_extint128.h
+++ b/numpy/core/src/private/npy_extint128.h
@@ -3,7 +3,7 @@
typedef struct {
- char sign;
+ signed char sign;
npy_uint64 lo, hi;
} npy_extint128_t;
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 8ec0f5e7f..3e76409c5 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -115,237 +115,6 @@ class _DeprecationTestCase(object):
exceptions=tuple(), args=args, kwargs=kwargs)
-class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase):
- """
- These test that ``DeprecationWarning`` is given when you try to use
- non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
- and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
-
- After deprecation, changes need to be done inside conversion_utils.c
- in PyArray_PyIntAsIntp and possibly PyArray_IntpConverter.
- In iterators.c the function slice_GetIndices could be removed in favor
- of its python equivalent and in mapping.c the function _tuple_of_integers
- can be simplified (if ``np.array([1]).__index__()`` is also deprecated).
-
- As for the deprecation time-frame: via Ralf Gommers,
-
- "Hard to put that as a version number, since we don't know if the
- version after 1.8 will be 6 months or 2 years after. I'd say 2
- years is reasonable."
-
- I interpret this to mean 2 years after the 1.8 release. Possibly
- giving a PendingDeprecationWarning before that (which is visible
- by default)
-
- """
- message = "using a non-integer number instead of an integer " \
- "will result in an error in the future"
-
- def test_indexing(self):
- a = np.array([[[5]]])
-
- def assert_deprecated(*args, **kwargs):
- self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs)
-
- assert_deprecated(lambda: a[0.0])
- assert_deprecated(lambda: a[0, 0.0])
- assert_deprecated(lambda: a[0.0, 0])
- assert_deprecated(lambda: a[0.0,:])
- assert_deprecated(lambda: a[:, 0.0])
- assert_deprecated(lambda: a[:, 0.0,:])
- assert_deprecated(lambda: a[0.0,:,:])
- assert_deprecated(lambda: a[0, 0, 0.0])
- assert_deprecated(lambda: a[0.0, 0, 0])
- assert_deprecated(lambda: a[0, 0.0, 0])
- assert_deprecated(lambda: a[-1.4])
- assert_deprecated(lambda: a[0, -1.4])
- assert_deprecated(lambda: a[-1.4, 0])
- assert_deprecated(lambda: a[-1.4,:])
- assert_deprecated(lambda: a[:, -1.4])
- assert_deprecated(lambda: a[:, -1.4,:])
- assert_deprecated(lambda: a[-1.4,:,:])
- assert_deprecated(lambda: a[0, 0, -1.4])
- assert_deprecated(lambda: a[-1.4, 0, 0])
- assert_deprecated(lambda: a[0, -1.4, 0])
-
- # Test that the slice parameter deprecation warning doesn't mask
- # the scalar index warning.
- assert_deprecated(lambda: a[0.0:, 0.0], num=2)
- assert_deprecated(lambda: a[0.0:, 0.0,:], num=2)
-
- def test_valid_indexing(self):
- a = np.array([[[5]]])
- assert_not_deprecated = self.assert_not_deprecated
-
- assert_not_deprecated(lambda: a[np.array([0])])
- assert_not_deprecated(lambda: a[[0, 0]])
- assert_not_deprecated(lambda: a[:, [0, 0]])
- assert_not_deprecated(lambda: a[:, 0,:])
- assert_not_deprecated(lambda: a[:,:,:])
-
- def test_slicing(self):
- a = np.array([[5]])
-
- def assert_deprecated(*args, **kwargs):
- self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs)
-
- # start as float.
- assert_deprecated(lambda: a[0.0:])
- assert_deprecated(lambda: a[0:, 0.0:2])
- assert_deprecated(lambda: a[0.0::2, :0])
- assert_deprecated(lambda: a[0.0:1:2,:])
- assert_deprecated(lambda: a[:, 0.0:])
- # stop as float.
- assert_deprecated(lambda: a[:0.0])
- assert_deprecated(lambda: a[:0, 1:2.0])
- assert_deprecated(lambda: a[:0.0:2, :0])
- assert_deprecated(lambda: a[:0.0,:])
- assert_deprecated(lambda: a[:, 0:4.0:2])
- # step as float.
- assert_deprecated(lambda: a[::1.0])
- assert_deprecated(lambda: a[0:, :2:2.0])
- assert_deprecated(lambda: a[1::4.0, :0])
- assert_deprecated(lambda: a[::5.0,:])
- assert_deprecated(lambda: a[:, 0:4:2.0])
- # mixed.
- assert_deprecated(lambda: a[1.0:2:2.0], num=2)
- assert_deprecated(lambda: a[1.0::2.0], num=2)
- assert_deprecated(lambda: a[0:, :2.0:2.0], num=2)
- assert_deprecated(lambda: a[1.0:1:4.0, :0], num=2)
- assert_deprecated(lambda: a[1.0:5.0:5.0,:], num=3)
- assert_deprecated(lambda: a[:, 0.4:4.0:2.0], num=3)
- # should still get the DeprecationWarning if step = 0.
- assert_deprecated(lambda: a[::0.0], function_fails=True)
-
- def test_valid_slicing(self):
- a = np.array([[[5]]])
- assert_not_deprecated = self.assert_not_deprecated
-
- assert_not_deprecated(lambda: a[::])
- assert_not_deprecated(lambda: a[0:])
- assert_not_deprecated(lambda: a[:2])
- assert_not_deprecated(lambda: a[0:2])
- assert_not_deprecated(lambda: a[::2])
- assert_not_deprecated(lambda: a[1::2])
- assert_not_deprecated(lambda: a[:2:2])
- assert_not_deprecated(lambda: a[1:2:2])
-
- def test_non_integer_argument_deprecations(self):
- a = np.array([[5]])
-
- self.assert_deprecated(np.reshape, args=(a, (1., 1., -1)), num=2)
- self.assert_deprecated(np.reshape, args=(a, (np.array(1.), -1)))
- self.assert_deprecated(np.take, args=(a, [0], 1.))
- self.assert_deprecated(np.take, args=(a, [0], np.float64(1.)))
-
- def test_non_integer_sequence_multiplication(self):
- # Numpy scalar sequence multiply should not work with non-integers
- def mult(a, b):
- return a * b
-
- self.assert_deprecated(mult, args=([1], np.float_(3)))
- self.assert_not_deprecated(mult, args=([1], np.int_(3)))
-
- def test_reduce_axis_float_index(self):
- d = np.zeros((3,3,3))
- self.assert_deprecated(np.min, args=(d, 0.5))
- self.assert_deprecated(np.min, num=1, args=(d, (0.5, 1)))
- self.assert_deprecated(np.min, num=1, args=(d, (1, 2.2)))
- self.assert_deprecated(np.min, num=2, args=(d, (.2, 1.2)))
-
-
-class TestBooleanArgumentDeprecation(_DeprecationTestCase):
- """This tests that using a boolean as integer argument/indexing is
- deprecated.
-
- This should be kept in sync with TestFloatNonIntegerArgumentDeprecation
- and like it is handled in PyArray_PyIntAsIntp.
- """
- message = "using a boolean instead of an integer " \
- "will result in an error in the future"
-
- def test_bool_as_int_argument(self):
- a = np.array([[[1]]])
-
- self.assert_deprecated(np.reshape, args=(a, (True, -1)))
- self.assert_deprecated(np.reshape, args=(a, (np.bool_(True), -1)))
- # Note that operator.index(np.array(True)) does not work, a boolean
- # array is thus also deprecated, but not with the same message:
- assert_raises(TypeError, operator.index, np.array(True))
- self.assert_deprecated(np.take, args=(a, [0], False))
- self.assert_deprecated(lambda: a[False:True:True], exceptions=IndexError, num=3)
- self.assert_deprecated(lambda: a[False, 0], exceptions=IndexError)
- self.assert_deprecated(lambda: a[False, 0, 0], exceptions=IndexError)
-
-
-class TestArrayToIndexDeprecation(_DeprecationTestCase):
- """This tests that creating an an index from an array is deprecated
- if the array is not 0d.
-
- This can probably be deprecated somewhat faster then the integer
- deprecations. The deprecation period started with NumPy 1.8.
- For deprecation this needs changing of array_index in number.c
- """
- message = "converting an array with ndim \> 0 to an index will result " \
- "in an error in the future"
-
- def test_array_to_index_deprecation(self):
- # This drops into the non-integer deprecation, which is ignored here,
- # so no exception is expected. The raising is effectively tested above.
- a = np.array([[[1]]])
-
- self.assert_deprecated(operator.index, args=(np.array([1]),))
- self.assert_deprecated(np.reshape, args=(a, (a, -1)), exceptions=())
- self.assert_deprecated(np.take, args=(a, [0], a), exceptions=())
- # Check slicing. Normal indexing checks arrays specifically.
- self.assert_deprecated(lambda: a[a:a:a], exceptions=(), num=3)
-
-class TestNonIntegerArrayLike(_DeprecationTestCase):
- """Tests that array likes, i.e. lists give a deprecation warning
- when they cannot be safely cast to an integer.
- """
- message = "non integer \(and non boolean\) array-likes will not be " \
- "accepted as indices in the future"
-
- def test_basic(self):
- a = np.arange(10)
- self.assert_deprecated(a.__getitem__, args=([0.5, 1.5],),
- exceptions=IndexError)
- self.assert_deprecated(a.__getitem__, args=((['1', '2'],),),
- exceptions=IndexError)
-
- self.assert_not_deprecated(a.__getitem__, ([],))
-
- def test_boolean_futurewarning(self):
- a = np.arange(10)
- with warnings.catch_warnings():
- warnings.filterwarnings('always')
- assert_warns(FutureWarning, a.__getitem__, [True])
- # Unfortunatly, the deprecation warning takes precedence:
- #assert_warns(FutureWarning, a.__getitem__, True)
-
- with warnings.catch_warnings():
- warnings.filterwarnings('error')
- assert_raises(FutureWarning, a.__getitem__, [True])
- #assert_raises(FutureWarning, a.__getitem__, True)
-
-
-class TestMultipleEllipsisDeprecation(_DeprecationTestCase):
- message = "an index can only have a single Ellipsis \(`...`\); replace " \
- "all but one with slices \(`:`\)."
-
- def test_basic(self):
- a = np.arange(10)
- self.assert_deprecated(a.__getitem__, args=((Ellipsis, Ellipsis),))
-
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- # Just check that this works:
- b = a[...,...]
- assert_array_equal(a, b)
- assert_raises(IndexError, a.__getitem__, ((Ellipsis, ) * 3,))
-
-
class TestBooleanUnaryMinusDeprecation(_DeprecationTestCase):
"""Test deprecation of unary boolean `-`. While + and * are well
defined, unary - is not and even a corrected form seems to have
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 4bc937e0b..38280d05e 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -3,6 +3,7 @@ from __future__ import division, absolute_import, print_function
import sys
import warnings
import functools
+import operator
import numpy as np
from numpy.core.multiarray_tests import array_indexing
@@ -21,6 +22,69 @@ except ImportError:
class TestIndexing(TestCase):
+ def test_index_no_floats(self):
+ a = np.array([[[5]]])
+
+ assert_raises(IndexError, lambda: a[0.0])
+ assert_raises(IndexError, lambda: a[0, 0.0])
+ assert_raises(IndexError, lambda: a[0.0, 0])
+ assert_raises(IndexError, lambda: a[0.0,:])
+ assert_raises(IndexError, lambda: a[:, 0.0])
+ assert_raises(IndexError, lambda: a[:, 0.0,:])
+ assert_raises(IndexError, lambda: a[0.0,:,:])
+ assert_raises(IndexError, lambda: a[0, 0, 0.0])
+ assert_raises(IndexError, lambda: a[0.0, 0, 0])
+ assert_raises(IndexError, lambda: a[0, 0.0, 0])
+ assert_raises(IndexError, lambda: a[-1.4])
+ assert_raises(IndexError, lambda: a[0, -1.4])
+ assert_raises(IndexError, lambda: a[-1.4, 0])
+ assert_raises(IndexError, lambda: a[-1.4,:])
+ assert_raises(IndexError, lambda: a[:, -1.4])
+ assert_raises(IndexError, lambda: a[:, -1.4,:])
+ assert_raises(IndexError, lambda: a[-1.4,:,:])
+ assert_raises(IndexError, lambda: a[0, 0, -1.4])
+ assert_raises(IndexError, lambda: a[-1.4, 0, 0])
+ assert_raises(IndexError, lambda: a[0, -1.4, 0])
+ assert_raises(IndexError, lambda: a[0.0:, 0.0])
+ assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
+
+ def test_slicing_no_floats(self):
+ a = np.array([[5]])
+
+ # start as float.
+ assert_raises(IndexError, lambda: a[0.0:])
+ assert_raises(IndexError, lambda: a[0:, 0.0:2])
+ assert_raises(IndexError, lambda: a[0.0::2, :0])
+ assert_raises(IndexError, lambda: a[0.0:1:2,:])
+ assert_raises(IndexError, lambda: a[:, 0.0:])
+ # stop as float.
+ assert_raises(IndexError, lambda: a[:0.0])
+ assert_raises(IndexError, lambda: a[:0, 1:2.0])
+ assert_raises(IndexError, lambda: a[:0.0:2, :0])
+ assert_raises(IndexError, lambda: a[:0.0,:])
+ assert_raises(IndexError, lambda: a[:, 0:4.0:2])
+ # step as float.
+ assert_raises(IndexError, lambda: a[::1.0])
+ assert_raises(IndexError, lambda: a[0:, :2:2.0])
+ assert_raises(IndexError, lambda: a[1::4.0, :0])
+ assert_raises(IndexError, lambda: a[::5.0,:])
+ assert_raises(IndexError, lambda: a[:, 0:4:2.0])
+ # mixed.
+ assert_raises(IndexError, lambda: a[1.0:2:2.0])
+ assert_raises(IndexError, lambda: a[1.0::2.0])
+ assert_raises(IndexError, lambda: a[0:, :2.0:2.0])
+ assert_raises(IndexError, lambda: a[1.0:1:4.0, :0])
+ assert_raises(IndexError, lambda: a[1.0:5.0:5.0,:])
+ assert_raises(IndexError, lambda: a[:, 0.4:4.0:2.0])
+ # should still get the DeprecationWarning if step = 0.
+ assert_raises(IndexError, lambda: a[::0.0])
+
+ def test_index_no_array_to_index(self):
+ # No non-scalar arrays.
+ a = np.array([[[1]]])
+
+ assert_raises(IndexError, lambda: a[a:a:a])
+
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
@@ -35,19 +99,9 @@ class TestIndexing(TestCase):
a = np.array(0)
assert_(isinstance(a[()], np.int_))
- # Regression, it needs to fall through integer and fancy indexing
- # cases, so need the with statement to ignore the non-integer error.
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- a = np.array([1.])
- assert_(isinstance(a[0.], np.float_))
-
- a = np.array([np.array(1)], dtype=object)
- assert_(isinstance(a[0.], np.ndarray))
-
def test_same_kind_index_casting(self):
- # Indexes should be cast with same-kind and not safe, even if
- # that is somewhat unsafe. So test various different code paths.
+ # Indexes should be cast with same-kind and not safe, even if that
+ # is somewhat unsafe. So test various different code paths.
index = np.arange(5)
u_index = index.astype(np.uintp)
arr = np.arange(10)
@@ -85,7 +139,8 @@ class TestIndexing(TestCase):
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[...], a)
- assert_(a[...].base is a) # `a[...]` was `a` in numpy <1.9.)
+ # `a[...]` was `a` in numpy <1.9.
+ assert_(a[...].base is a)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
@@ -645,7 +700,8 @@ class TestMultiIndexingAutomated(TestCase):
np.zeros([1]*31, dtype=int), # trigger too large array.
np.array([0., 1.])] # invalid datatype
# Some simpler indices that still cover a bit more
- self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip']
+ self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
+ 'skip']
# Very simple ones to fill the rest:
self.fill_indices = [slice(None, None), 0]
@@ -719,16 +775,18 @@ class TestMultiIndexingAutomated(TestCase):
indx = np.array(indx, dtype=np.intp)
in_indices[i] = indx
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
- raise IndexError('arrays used as indices must be of integer (or boolean) type')
+ raise IndexError('arrays used as indices must be of '
+ 'integer (or boolean) type')
if indx.ndim != 0:
no_copy = False
ndim += 1
fancy_dim += 1
if arr.ndim - ndim < 0:
- # we can't take more dimensions then we have, not even for 0-d arrays.
- # since a[()] makes sense, but not a[(),]. We will raise an error
- # later on, unless a broadcasting error occurs first.
+ # we can't take more dimensions then we have, not even for 0-d
+ # arrays. since a[()] makes sense, but not a[(),]. We will
+ # raise an error later on, unless a broadcasting error occurs
+ # first.
raise IndexError
if ndim == 0 and None not in in_indices:
@@ -736,7 +794,8 @@ class TestMultiIndexingAutomated(TestCase):
return arr.copy(), no_copy
if ellipsis_pos is not None:
- in_indices[ellipsis_pos:ellipsis_pos+1] = [slice(None, None)] * (arr.ndim - ndim)
+ in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
+ (arr.ndim - ndim))
for ax, indx in enumerate(in_indices):
if isinstance(indx, slice):
@@ -779,21 +838,23 @@ class TestMultiIndexingAutomated(TestCase):
if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
raise IndexError
if indx.ndim == 0:
- # The index is a scalar. This used to be two fold, but if fancy
- # indexing was active, the check was done later, possibly
- # after broadcasting it away (1.7. or earlier). Now it is always
- # done.
+ # The index is a scalar. This used to be two fold, but if
+ # fancy indexing was active, the check was done later,
+ # possibly after broadcasting it away (1.7. or earlier).
+ # Now it is always done.
if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
raise IndexError
- if len(indices) > 0 and indices[-1][0] == 'f' and ax != ellipsis_pos:
+ if (len(indices) > 0 and
+ indices[-1][0] == 'f' and
+ ax != ellipsis_pos):
# NOTE: There could still have been a 0-sized Ellipsis
# between them. Checked that with ellipsis_pos.
indices[-1].append(indx)
else:
# We have a fancy index that is not after an existing one.
- # NOTE: A 0-d array triggers this as well, while
- # one may expect it to not trigger it, since a scalar
- # would not be considered fancy indexing.
+ # NOTE: A 0-d array triggers this as well, while one may
+ # expect it to not trigger it, since a scalar would not be
+ # considered fancy indexing.
num_fancy += 1
indices.append(['f', indx])
@@ -854,13 +915,15 @@ class TestMultiIndexingAutomated(TestCase):
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
- mi = np.ravel_multi_index(indx[1:], orig_slice, mode='raise')
+ mi = np.ravel_multi_index(indx[1:], orig_slice,
+ mode='raise')
except:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
else:
- mi = np.ravel_multi_index(indx[1:], orig_slice, mode='wrap')
+ mi = np.ravel_multi_index(indx[1:], orig_slice,
+ mode='wrap')
else:
# Maybe never happens...
raise ValueError
@@ -962,9 +1025,12 @@ class TestMultiIndexingAutomated(TestCase):
# it is aligned to the left. This is probably correct for
# consistency with arr[boolean_array,] also no broadcasting
# is done at all
- self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool),))
- self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
- self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool),))
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
def test_multidim(self):
# Automatically test combinations with complex indexes on 2nd (or 1st)
@@ -1003,6 +1069,119 @@ class TestMultiIndexingAutomated(TestCase):
for index in self.complex_indices:
self._check_single_index(a, index)
+class TestFloatNonIntegerArgument(TestCase):
+ """
+ These test that ``TypeError`` is raised when you try to use
+ non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
+ and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
+
+ """
+ def test_valid_indexing(self):
+ # These should raise no errors.
+ a = np.array([[[5]]])
+
+ a[np.array([0])]
+ a[[0, 0]]
+ a[:, [0, 0]]
+ a[:, 0,:]
+ a[:,:,:]
+
+ def test_valid_slicing(self):
+ # These should raise no errors.
+ a = np.array([[[5]]])
+
+ a[::]
+ a[0:]
+ a[:2]
+ a[0:2]
+ a[::2]
+ a[1::2]
+ a[:2:2]
+ a[1:2:2]
+
+ def test_non_integer_argument_errors(self):
+ a = np.array([[5]])
+
+ assert_raises(TypeError, np.reshape, a, (1., 1., -1))
+ assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
+ assert_raises(TypeError, np.take, a, [0], 1.)
+ assert_raises(TypeError, np.take, a, [0], np.float64(1.))
+
+ def test_non_integer_sequence_multiplication(self):
+ # Numpy scalar sequence multiply should not work with non-integers
+ def mult(a, b):
+ return a * b
+
+ assert_raises(TypeError, mult, [1], np.float_(3))
+ # following should be OK
+ mult([1], np.int_(3))
+
+ def test_reduce_axis_float_index(self):
+ d = np.zeros((3,3,3))
+ assert_raises(TypeError, np.min, d, 0.5)
+ assert_raises(TypeError, np.min, d, (0.5, 1))
+ assert_raises(TypeError, np.min, d, (1, 2.2))
+ assert_raises(TypeError, np.min, d, (.2, 1.2))
+
+
+class TestBooleanArgumentErrors(TestCase):
+ """Using a boolean as integer argument/indexing is an error.
+
+ """
+ def test_bool_as_int_argument(self):
+ a = np.array([[[1]]])
+
+ assert_raises(TypeError, np.reshape, a, (True, -1))
+ assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
+ # Note that operator.index(np.array(True)) does not work, a boolean
+ # array is thus also deprecated, but not with the same message:
+ assert_raises(TypeError, operator.index, np.array(True))
+ assert_raises(TypeError, np.take, args=(a, [0], False))
+ assert_raises(IndexError, lambda: a[False:True:True])
+ assert_raises(IndexError, lambda: a[False, 0])
+ assert_raises(IndexError, lambda: a[False, 0, 0])
+
+
+class TestArrayToIndexDeprecation(TestCase):
+ """Creating an an index from array not 0-D is an error.
+
+ """
+ def test_array_to_index_error(self):
+ # so no exception is expected. The raising is effectively tested above.
+ a = np.array([[[1]]])
+
+ assert_raises(TypeError, operator.index, np.array([1]))
+ assert_raises(TypeError, np.reshape, a, (a, -1))
+ assert_raises(TypeError, np.take, a, [0], a)
+
+
+class TestNonIntegerArrayLike(TestCase):
+ """Tests that array_likes only valid if can safely cast to integer.
+
+ For instance, lists give IndexError when they cannot be safely cast to
+ an integer.
+
+ """
+ def test_basic(self):
+ a = np.arange(10)
+
+ assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
+ assert_raises(IndexError, a.__getitem__, (['1', '2'],))
+
+ # The following is valid
+ a.__getitem__([])
+
+
+class TestMultipleEllipsisError(TestCase):
+ """An index can only have a single ellipsis.
+
+ """
+ def test_basic(self):
+ a = np.arange(10)
+ assert_raises(IndexError, lambda: a[..., ...])
+ assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
+ assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
+
class TestCApiAccess(TestCase):
def test_getitem(self):
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
index 86a3984c2..745bff15a 100644
--- a/numpy/doc/basics.py
+++ b/numpy/doc/basics.py
@@ -142,5 +142,44 @@ identical behaviour between arrays and scalars, irrespective of whether the
value is inside an array or not. NumPy scalars also have many of the same
methods arrays do.
+Extended Precision
+==================
+
+Python's floating-point numbers are usually 64-bit floating-point numbers,
+nearly equivalent to ``np.float64``. In some unusual situations it may be
+useful to use floating-point numbers with more precision. Whether this
+is possible in numpy depends on the hardware and on the development
+environment: specifically, x86 machines provide hardware floating-point
+with 80-bit precision, and while most C compilers provide this as their
+``long double`` type, MSVC (standard for Windows builds) makes
+``long double`` identical to ``double`` (64 bits). Numpy makes the
+compiler's ``long double`` available as ``np.longdouble`` (and
+``np.clongdouble`` for the complex numbers). You can find out what your
+numpy provides with``np.finfo(np.longdouble)``.
+
+Numpy does not provide a dtype with more precision than C
+``long double``s; in particular, the 128-bit IEEE quad precision
+data type (FORTRAN's ``REAL*16``) is not available.
+
+For efficient memory alignment, ``np.longdouble`` is usually stored
+padded with zero bits, either to 96 or 128 bits. Which is more efficient
+depends on hardware and development environment; typically on 32-bit
+systems they are padded to 96 bits, while on 64-bit systems they are
+typically padded to 128 bits. ``np.longdouble`` is padded to the system
+default; ``np.float96`` and ``np.float128`` are provided for users who
+want specific padding. In spite of the names, ``np.float96`` and
+``np.float128`` provide only as much precision as ``np.longdouble``,
+that is, 80 bits on most x86 machines and 64 bits in standard
+Windows builds.
+
+Be warned that even if ``np.longdouble`` offers more precision than
+python ``float``, it is easy to lose that extra precision, since
+python often forces values to pass through ``float``. For example,
+the ``%`` formatting operator requires its arguments to be converted
+to standard python types, and it is therefore impossible to preserve
+extended precision even if many decimal places are requested. It can
+be useful to test your code with the value
+``1 + np.finfo(np.longdouble).eps``.
+
"""
from __future__ import division, absolute_import, print_function
diff --git a/numpy/fft/bento.info b/numpy/fft/bento.info
deleted file mode 100644
index 7627b319e..000000000
--- a/numpy/fft/bento.info
+++ /dev/null
@@ -1,6 +0,0 @@
-HookFile: bscript
-
-Library:
- Extension: fftpack_lite
- Sources:
- fftpack_litemodule.c, fftpack.c
diff --git a/numpy/fft/bscript b/numpy/fft/bscript
deleted file mode 100644
index ac1506496..000000000
--- a/numpy/fft/bscript
+++ /dev/null
@@ -1,7 +0,0 @@
-from bento.commands import hooks
-
-@hooks.pre_build
-def build(context):
- context.tweak_extension("fftpack_lite",
- includes=["../core/include", "../core/include/numpy",
- "../core", "../core/src/private"])
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 820168663..007ff42a4 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1122,19 +1122,28 @@ def gradient(f, *varargs, **kwargs):
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
- varargs : list of scalar, optional
+ varargs : scalar or list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
+ single scalar specifies sample distance for all dimensions.
+ if `axis` is given, the number of varargs must equal the number of axes.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
+ axis : None or int or tuple of ints, optional
+ Gradient is calculated only along the given axis or axes
+ The default (axis = None) is to calculate the gradient for all the axes of the input array.
+ axis may be negative, in which case it counts from the last to the first axis.
+
+ .. versionadded:: 1.11.0
+
Returns
-------
gradient : list of ndarray
- Each element of `list` has the same shape as `f` giving the derivative
+ Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
@@ -1145,10 +1154,10 @@ def gradient(f, *varargs, **kwargs):
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
- For two dimensional arrays, the return will be two arrays ordered by
- axis. In this example the first array stands for the gradient in
+ For two dimensional arrays, the return will be two arrays ordered by
+ axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
-
+
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
@@ -1159,15 +1168,38 @@ def gradient(f, *varargs, **kwargs):
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
+
+ The axis keyword can be used to specify a subset of axes of which the gradient is calculated
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
+ array([[ 2., 2., -1.],
+ [ 2., 2., -1.]])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
+
+ axes = kwargs.pop('axis', None)
+ if axes is None:
+ axes = tuple(range(N))
+ # check axes to have correct type and no duplicate entries
+ if isinstance(axes, int):
+ axes = (axes,)
+ if not isinstance(axes, tuple):
+ raise TypeError("A tuple of integers or a single integer is required")
+
+ # normalize axis values:
+ axes = tuple(x + N if x < 0 else x for x in axes)
+ if max(axes) >= N or min(axes) < 0:
+ raise ValueError("'axis' entry is out of bounds")
+
+ if len(set(axes)) != len(axes):
+ raise ValueError("duplicate value in 'axis'")
+
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
- elif n == N:
+ elif n == len(axes):
dx = list(varargs)
else:
raise SyntaxError(
@@ -1211,7 +1243,7 @@ def gradient(f, *varargs, **kwargs):
else:
y = f
- for axis in range(N):
+ for i, axis in enumerate(axes):
if y.shape[axis] < 2:
raise ValueError(
@@ -1267,7 +1299,7 @@ def gradient(f, *varargs, **kwargs):
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
- out /= dx[axis]
+ out /= dx[i]
outvals.append(out)
# reset the slice object in this dimension to ":"
@@ -1276,7 +1308,7 @@ def gradient(f, *varargs, **kwargs):
slice3[axis] = slice(None)
slice4[axis] = slice(None)
- if N == 1:
+ if len(axes) == 1:
return outvals[0]
else:
return outvals
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index af9315d83..5e758fb89 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -601,6 +601,31 @@ class TestGradient(TestCase):
num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1)
assert_(np.all(num_error < 0.03) == True)
+ def test_specific_axes(self):
+ # Testing that gradient can work on a given axis only
+ v = [[1, 1], [3, 4]]
+ x = np.array(v)
+ dx = [np.array([[2., 3.], [2., 3.]]),
+ np.array([[0., 0.], [1., 1.]])]
+ assert_array_equal(gradient(x, axis=0), dx[0])
+ assert_array_equal(gradient(x, axis=1), dx[1])
+ assert_array_equal(gradient(x, axis=-1), dx[1])
+ assert_array_equal(gradient(x, axis=(1,0)), [dx[1], dx[0]])
+
+ # test axis=None which means all axes
+ assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
+ # and is the same as no axis keyword given
+ assert_almost_equal(gradient(x, axis=None), gradient(x))
+
+ # test vararg order
+ assert_array_equal(gradient(x, 2, 3, axis=(1,0)), [dx[1]/2.0, dx[0]/3.0])
+ # test maximal number of varargs
+ assert_raises(SyntaxError, gradient, x, 1, 2, axis=1)
+
+ assert_raises(ValueError, gradient, x, axis=3)
+ assert_raises(ValueError, gradient, x, axis=-3)
+ assert_raises(TypeError, gradient, x, axis=[1,])
+
class TestAngle(TestCase):
@@ -1888,19 +1913,25 @@ class TestBincount(TestCase):
def test_with_incorrect_minlength(self):
x = np.array([], dtype=int)
- assert_raises_regex(TypeError, "an integer is required",
+ assert_raises_regex(TypeError,
+ "'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
- assert_raises_regex(ValueError, "must be positive",
+ assert_raises_regex(ValueError,
+ "must be positive",
lambda: np.bincount(x, minlength=-1))
- assert_raises_regex(ValueError, "must be positive",
+ assert_raises_regex(ValueError,
+ "must be positive",
lambda: np.bincount(x, minlength=0))
x = np.arange(5)
- assert_raises_regex(TypeError, "an integer is required",
+ assert_raises_regex(TypeError,
+ "'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
- assert_raises_regex(ValueError, "minlength must be positive",
+ assert_raises_regex(ValueError,
+ "minlength must be positive",
lambda: np.bincount(x, minlength=-1))
- assert_raises_regex(ValueError, "minlength must be positive",
+ assert_raises_regex(ValueError,
+ "minlength must be positive",
lambda: np.bincount(x, minlength=0))
diff --git a/numpy/linalg/bento.info b/numpy/linalg/bento.info
deleted file mode 100644
index 52d036753..000000000
--- a/numpy/linalg/bento.info
+++ /dev/null
@@ -1,21 +0,0 @@
-HookFile: bscript
-
-Library:
- Extension: _umath_linalg
- Sources:
- umath_linalg.c.src,
- lapack_lite/blas_lite.c,
- lapack_lite/dlamch.c,
- lapack_lite/dlapack_lite.c,
- lapack_lite/f2c_lite.c,
- lapack_lite/python_xerbla.c,
- lapack_lite/zlapack_lite.c
- Extension: lapack_lite
- Sources:
- lapack_lite/blas_lite.c,
- lapack_lite/dlamch.c,
- lapack_lite/dlapack_lite.c,
- lapack_lite/f2c_lite.c,
- lapack_litemodule.c,
- lapack_lite/python_xerbla.c,
- lapack_lite/zlapack_lite.c
diff --git a/numpy/linalg/bscript b/numpy/linalg/bscript
deleted file mode 100644
index 70fdd9de3..000000000
--- a/numpy/linalg/bscript
+++ /dev/null
@@ -1,26 +0,0 @@
-from bento.commands.hooks \
- import \
- pre_build
-
-@pre_build
-def pbuild(context):
- bld = context.waf_context
-
- def build_lapack_lite(extension):
- kw = {}
- kw["use"] = "npymath"
- if bld.env.HAS_LAPACK:
- for s in ['python_xerbla.c', 'zlapack_lite.c', 'dlapack_lite.c',
- 'blas_lite.c', 'dlamch.c', 'f2c_lite.c']:
- extension.sources.pop(extension.sources.index('lapack_lite/' + s))
- kw["use"] = "npymath LAPACK"
-
- includes = ["../core/include", "../core/include/numpy", "../core",
- "../core/src/private"]
- return context.default_builder(extension,
- includes=includes,
- **kw)
-
- context.register_builder("lapack_lite", build_lapack_lite)
- context.register_builder("_umath_linalg", build_lapack_lite)
-
diff --git a/numpy/random/bento.info b/numpy/random/bento.info
deleted file mode 100644
index f51da0131..000000000
--- a/numpy/random/bento.info
+++ /dev/null
@@ -1,9 +0,0 @@
-HookFile: bscript
-
-Library:
- Extension: mtrand
- Sources:
- mtrand/mtrand.c,
- mtrand/randomkit.c,
- mtrand/initarray.c,
- mtrand/distributions.c
diff --git a/numpy/random/bscript b/numpy/random/bscript
deleted file mode 100644
index cecc65e33..000000000
--- a/numpy/random/bscript
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-import sys
-
-from bento.commands import hooks
-import waflib
-
-@hooks.post_configure
-def configure(context):
- conf = context.waf_context
-
- conf.env.USE_WINCRYPT = False
- if conf.check_declaration("_WIN32", mandatory=False):
- conf.env.USE_WINCRYPT = True
-
- conf.env.NEEDS_MINGW32_WORKAROUND = False
- if sys.platform == "win32" and conf.check_declaration("__GNUC__", mandatory=False):
- conf.env.NEEDS_MINGW32_WORKAROUND = True
-
-@hooks.pre_build
-def build(context):
- bld = context.waf_context
-
- if bld.env.NEEDS_MINGW32_WORKAROUND:
- raise NotImplementedError("Check for mingw time workaround stuff")
-
- def builder(extension):
- includes = ["../core/include", "../core/include/numpy", "../core",
- "../core/src/private"]
- kw = {}
- # enable unix large file support on 32 bit systems
- # (64 bit off_t, lseek -> lseek64 etc.)
- kw['defines'] = ['_FILE_OFFSET_BITS=64',
- '_LARGEFILE_SOURCE=1',
- '_LARGEFILE64_SOURCE=1']
- if bld.env.USE_WINCRYPT:
- kw["lib"] = "ADVAPI32"
- return context.default_builder(extension, includes=includes, **kw)
- context.register_builder("mtrand", builder)
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 9861204d9..59f9dcd6b 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -3520,7 +3520,7 @@ cdef class RandomState:
.. math:: P(x;l, m, r) = \\begin{cases}
\\frac{2(x-l)}{(r-l)(m-l)}& \\text{for $l \\leq x \\leq m$},\\\\
- \\frac{2(m-x)}{(r-l)(r-m)}& \\text{for $m \\leq x \\leq r$},\\\\
+ \\frac{2(r-x)}{(r-l)(r-m)}& \\text{for $m \\leq x \\leq r$},\\\\
0& \\text{otherwise}.
\\end{cases}
diff --git a/numpy/setup.py b/numpy/setup.py
index 58bb7dc93..4ccdaeea5 100644
--- a/numpy/setup.py
+++ b/numpy/setup.py
@@ -6,7 +6,6 @@ def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('numpy', parent_package, top_path)
- # If you update this list, then also update the file numpy/bento.info
config.add_subpackage('compat')
config.add_subpackage('core')
config.add_subpackage('distutils')
diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i
index b6a588c03..2ddc11de7 100644
--- a/tools/swig/numpy.i
+++ b/tools/swig/numpy.i
@@ -123,7 +123,7 @@
if (PyInstance_Check(py_obj)) return "instance" ;
%#endif
- return "unkown type";
+ return "unknown type";
}
/* Given a NumPy typecode, return a string describing the type.