summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/lib/_datasource.py138
-rw-r--r--numpy/lib/_iotools.py171
-rw-r--r--numpy/lib/_version.py1
-rw-r--r--numpy/lib/arraypad.py6
-rw-r--r--numpy/lib/arraysetops.py25
-rw-r--r--numpy/lib/arrayterator.py14
-rw-r--r--numpy/lib/financial.py68
-rw-r--r--numpy/lib/format.py102
-rw-r--r--numpy/lib/function_base.py41
-rw-r--r--numpy/lib/index_tricks.py80
-rw-r--r--numpy/lib/nanfunctions.py2
-rw-r--r--numpy/lib/npyio.py35
-rw-r--r--numpy/lib/polynomial.py67
-rw-r--r--numpy/lib/recfunctions.py137
-rw-r--r--numpy/lib/scimath.py16
-rw-r--r--numpy/lib/setup.py3
-rw-r--r--numpy/lib/shape_base.py47
-rw-r--r--numpy/lib/stride_tricks.py22
-rw-r--r--numpy/lib/tests/test__datasource.py11
-rw-r--r--numpy/lib/tests/test__iotools.py8
-rw-r--r--numpy/lib/tests/test_arraypad.py362
-rw-r--r--numpy/lib/tests/test_arraysetops.py14
-rw-r--r--numpy/lib/tests/test_financial.py6
-rw-r--r--numpy/lib/tests/test_format.py14
-rw-r--r--numpy/lib/tests/test_index_tricks.py94
-rw-r--r--numpy/lib/tests/test_nanfunctions.py24
-rw-r--r--numpy/lib/tests/test_polynomial.py13
-rw-r--r--numpy/lib/tests/test_recfunctions.py53
-rw-r--r--numpy/lib/tests/test_regression.py62
-rw-r--r--numpy/lib/tests/test_shape_base.py277
-rw-r--r--numpy/lib/tests/test_stride_tricks.py50
-rw-r--r--numpy/lib/tests/test_twodim_base.py70
-rw-r--r--numpy/lib/tests/test_type_check.py217
-rw-r--r--numpy/lib/tests/test_ufunclike.py5
-rw-r--r--numpy/lib/tests/test_utils.py6
-rw-r--r--numpy/lib/twodim_base.py16
-rw-r--r--numpy/lib/type_check.py62
-rw-r--r--numpy/lib/user_array.py143
-rw-r--r--numpy/lib/utils.py138
39 files changed, 1411 insertions, 1209 deletions
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index 96d9af905..338c8b331 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -1,19 +1,21 @@
"""A file interface for handling local and remote data files.
-The goal of datasource is to abstract some of the file system operations when
-dealing with data files so the researcher doesn't have to know all the
+
+The goal of datasource is to abstract some of the file system operations
+when dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
-It should work seemlessly with standard file IO operations and the os module.
+It should work seemlessly with standard file IO operations and the os
+module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
-DataSource files can also be compressed or uncompressed. Currently only gzip
-and bz2 are supported.
+DataSource files can also be compressed or uncompressed. Currently only
+gzip and bz2 are supported.
Example::
@@ -33,14 +35,13 @@ Example::
"""
from __future__ import division, absolute_import, print_function
-__docformat__ = "restructuredtext en"
-
import os
import sys
-from shutil import rmtree, copyfile, copyfileobj
+import shutil
_open = open
+
# Using a class instead of a module-level dictionary
# to reduce the inital 'import numpy' overhead by
# deferring the import of bz2 and gzip until needed
@@ -51,9 +52,9 @@ class _FileOpeners(object):
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
- supported file format. Attribute lookup is implemented in such a way that
- an instance of `_FileOpeners` itself can be indexed with the keys of that
- dictionary. Currently uncompressed files as well as files
+ supported file format. Attribute lookup is implemented in such a way
+ that an instance of `_FileOpeners` itself can be indexed with the keys
+ of that dictionary. Currently uncompressed files as well as files
compressed with ``gzip`` or ``bz2`` compression are supported.
Notes
@@ -69,9 +70,11 @@ class _FileOpeners(object):
True
"""
+
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
+
def _load(self):
if self._loaded:
return
@@ -105,6 +108,7 @@ class _FileOpeners(object):
"""
self._load()
return list(self._file_openers.keys())
+
def __getitem__(self, key):
self._load()
return self._file_openers[key]
@@ -115,8 +119,8 @@ def open(path, mode='r', destpath=os.curdir):
"""
Open `path` with `mode` and return the file object.
- If ``path`` is an URL, it will be downloaded, stored in the `DataSource`
- `destpath` directory and opened from there.
+ If ``path`` is an URL, it will be downloaded, stored in the
+ `DataSource` `destpath` directory and opened from there.
Parameters
----------
@@ -124,12 +128,12 @@ def open(path, mode='r', destpath=os.curdir):
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
- append. Available modes depend on the type of object specified by path.
- Default is 'r'.
+ append. Available modes depend on the type of object specified by
+ path. Default is 'r'.
destpath : str, optional
- Path to the directory where the source file gets downloaded to for use.
- If `destpath` is None, a temporary directory will be created. The
- default path is the current directory.
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
+ The default path is the current directory.
Returns
-------
@@ -154,15 +158,15 @@ class DataSource (object):
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
- also be compressed or uncompressed. DataSource hides some of the low-level
- details of downloading the file, allowing you to simply pass in a valid
- file path (or URL) and obtain a file object.
+ also be compressed or uncompressed. DataSource hides some of the
+ low-level details of downloading the file, allowing you to simply pass
+ in a valid file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
- Path to the directory where the source file gets downloaded to for use.
- If `destpath` is None, a temporary directory will be created.
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
@@ -202,17 +206,18 @@ class DataSource (object):
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
- import tempfile # deferring import to improve startup time
+ import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if self._istmpdest:
- rmtree(self._destpath)
+ shutil.rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
+
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
@@ -294,7 +299,7 @@ class DataSource (object):
openedurl = urlopen(path)
f = _open(upath, 'wb')
try:
- copyfileobj(openedurl, f)
+ shutil.copyfileobj(openedurl, f)
finally:
f.close()
openedurl.close()
@@ -307,13 +312,12 @@ class DataSource (object):
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
- If path is an URL, _findfile will cache a local copy and return
- the path to the cached file.
- If path is a local file, _findfile will return a path to that local
- file.
+ If path is an URL, _findfile will cache a local copy and return the
+ path to the cached file. If path is a local file, _findfile will
+ return a path to that local file.
- The search will include possible compressed versions of the file and
- return the first occurence found.
+ The search will include possible compressed versions of the file
+ and return the first occurence found.
"""
@@ -392,7 +396,7 @@ class DataSource (object):
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
- drive, path = os.path.splitdrive(path) # for Windows
+ drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
@@ -404,7 +408,8 @@ class DataSource (object):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- - a remote URL that has not been downloaded, but is valid and accessible.
+ - a remote URL that has not been downloaded, but is valid and
+ accessible.
Parameters
----------
@@ -418,10 +423,10 @@ class DataSource (object):
Notes
-----
- When `path` is an URL, `exists` will return True if it's either stored
- locally in the `DataSource` directory, or is a valid remote URL.
- `DataSource` does not discriminate between the two, the file is accessible
- if it exists in either location.
+ When `path` is an URL, `exists` will return True if it's either
+ stored locally in the `DataSource` directory, or is a valid remote
+ URL. `DataSource` does not discriminate between the two, the file
+ is accessible if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
@@ -457,17 +462,17 @@ class DataSource (object):
"""
Open and return file-like object.
- If `path` is an URL, it will be downloaded, stored in the `DataSource`
- directory and opened from there.
+ If `path` is an URL, it will be downloaded, stored in the
+ `DataSource` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
- Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
- append. Available modes depend on the type of object specified by
- `path`. Default is 'r'.
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
+ 'a' to append. Available modes depend on the type of object
+ specified by `path`. Default is 'r'.
Returns
-------
@@ -500,12 +505,14 @@ class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
- A data repository where multiple DataSource's share a base URL/directory.
+ A data repository where multiple DataSource's share a base
+ URL/directory.
- `Repository` extends `DataSource` by prepending a base URL (or directory)
- to all the files it handles. Use `Repository` when you will be working
- with multiple files from one base URL. Initialize `Repository` with the
- base URL, then refer to each file by its filename only.
+ `Repository` extends `DataSource` by prepending a base URL (or
+ directory) to all the files it handles. Use `Repository` when you will
+ be working with multiple files from one base URL. Initialize
+ `Repository` with the base URL, then refer to each file by its filename
+ only.
Parameters
----------
@@ -513,8 +520,8 @@ class Repository (DataSource):
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
- Path to the directory where the source file gets downloaded to for use.
- If `destpath` is None, a temporary directory will be created.
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
@@ -566,8 +573,9 @@ class Repository (DataSource):
Parameters
----------
path : str
- Can be a local file or a remote URL. This may, but does not have
- to, include the `baseurl` with which the `Repository` was initialized.
+ Can be a local file or a remote URL. This may, but does not
+ have to, include the `baseurl` with which the `Repository` was
+ initialized.
Returns
-------
@@ -592,8 +600,9 @@ class Repository (DataSource):
Parameters
----------
path : str
- Can be a local file or a remote URL. This may, but does not have
- to, include the `baseurl` with which the `Repository` was initialized.
+ Can be a local file or a remote URL. This may, but does not
+ have to, include the `baseurl` with which the `Repository` was
+ initialized.
Returns
-------
@@ -602,10 +611,10 @@ class Repository (DataSource):
Notes
-----
- When `path` is an URL, `exists` will return True if it's either stored
- locally in the `DataSource` directory, or is a valid remote URL.
- `DataSource` does not discriminate between the two, the file is accessible
- if it exists in either location.
+ When `path` is an URL, `exists` will return True if it's either
+ stored locally in the `DataSource` directory, or is a valid remote
+ URL. `DataSource` does not discriminate between the two, the file
+ is accessible if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
@@ -614,18 +623,19 @@ class Repository (DataSource):
"""
Open and return file-like object prepending Repository base URL.
- If `path` is an URL, it will be downloaded, stored in the DataSource
- directory and opened from there.
+ If `path` is an URL, it will be downloaded, stored in the
+ DataSource directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
- include the `baseurl` with which the `Repository` was initialized.
+ include the `baseurl` with which the `Repository` was
+ initialized.
mode : {'r', 'w', 'a'}, optional
- Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
- append. Available modes depend on the type of object specified by
- `path`. Default is 'r'.
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
+ 'a' to append. Available modes depend on the type of object
+ specified by `path`. Default is 'r'.
Returns
-------
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index aa39e25a1..1b1180893 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -8,10 +8,10 @@ __docformat__ = "restructuredtext en"
import sys
import numpy as np
import numpy.core.numeric as nx
-from numpy.compat import asbytes, bytes, asbytes_nested, long, basestring
+from numpy.compat import asbytes, bytes, asbytes_nested, basestring
if sys.version_info[0] >= 3:
- from builtins import bool, int, float, complex, object, str
+ from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
@@ -20,6 +20,7 @@ else:
if sys.version_info[0] >= 3:
def _bytes_to_complex(s):
return complex(s.decode('ascii'))
+
def _bytes_to_name(s):
return s.decode('ascii')
else:
@@ -148,10 +149,6 @@ def flatten_dtype(ndtype, flatten_base=False):
return types
-
-
-
-
class LineSplitter(object):
"""
Object to split a string at a given delimiter or at given places.
@@ -188,6 +185,7 @@ class LineSplitter(object):
"""
return lambda input: [_.strip() for _ in method(input)]
#
+
def __init__(self, delimiter=None, comments=asbytes('#'), autostrip=True):
self.comments = comments
# Delimiter is a character
@@ -203,7 +201,8 @@ class LineSplitter(object):
delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
# Delimiter is a single integer
elif int(delimiter):
- (_handyman, delimiter) = (self._fixedwidth_splitter, int(delimiter))
+ (_handyman, delimiter) = (
+ self._fixedwidth_splitter, int(delimiter))
else:
(_handyman, delimiter) = (self._delimited_splitter, None)
self.delimiter = delimiter
@@ -212,6 +211,7 @@ class LineSplitter(object):
else:
self._handyman = _handyman
#
+
def _delimited_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
@@ -220,6 +220,7 @@ class LineSplitter(object):
return []
return line.split(self.delimiter)
#
+
def _fixedwidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
@@ -230,6 +231,7 @@ class LineSplitter(object):
slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
return [line[s] for s in slices]
#
+
def _variablewidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
@@ -238,32 +240,32 @@ class LineSplitter(object):
slices = self.delimiter
return [line[s] for s in slices]
#
+
def __call__(self, line):
return self._handyman(line)
-
class NameValidator(object):
"""
Object to validate a list of strings to use as field names.
The strings are stripped of any non alphanumeric character, and spaces
- are replaced by '_'. During instantiation, the user can define a list of
- names to exclude, as well as a list of invalid characters. Names in the
- exclusion list are appended a '_' character.
+ are replaced by '_'. During instantiation, the user can define a list
+ of names to exclude, as well as a list of invalid characters. Names in
+ the exclusion list are appended a '_' character.
- Once an instance has been created, it can be called with a list of names,
- and a list of valid names will be created.
- The `__call__` method accepts an optional keyword "default" that sets
- the default name in case of ambiguity. By default this is 'f', so
- that names will default to `f0`, `f1`, etc.
+ Once an instance has been created, it can be called with a list of
+ names, and a list of valid names will be created. The `__call__`
+ method accepts an optional keyword "default" that sets the default name
+ in case of ambiguity. By default this is 'f', so that names will
+ default to `f0`, `f1`, etc.
Parameters
----------
excludelist : sequence, optional
- A list of names to exclude. This list is appended to the default list
- ['return', 'file', 'print']. Excluded names are appended an underscore:
- for example, `file` becomes `file_` if supplied.
+ A list of names to exclude. This list is appended to the default
+ list ['return', 'file', 'print']. Excluded names are appended an
+ underscore: for example, `file` becomes `file_` if supplied.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
@@ -278,8 +280,8 @@ class NameValidator(object):
Notes
-----
- Calling an instance of `NameValidator` is the same as calling its method
- `validate`.
+ Calling an instance of `NameValidator` is the same as calling its
+ method `validate`.
Examples
--------
@@ -298,6 +300,7 @@ class NameValidator(object):
defaultexcludelist = ['return', 'file', 'print']
defaultdeletechars = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
#
+
def __init__(self, excludelist=None, deletechars=None,
case_sensitive=None, replace_space='_'):
# Process the exclusion list ..
@@ -326,18 +329,18 @@ class NameValidator(object):
def validate(self, names, defaultfmt="f%i", nbfields=None):
"""
- Validate a list of strings to use as field names for a structured array.
+ Validate a list of strings as field names for a structured array.
Parameters
----------
names : sequence of str
Strings to be validated.
defaultfmt : str, optional
- Default format string, used if validating a given string reduces its
- length to zero.
+ Default format string, used if validating a given string
+ reduces its length to zero.
nboutput : integer, optional
- Final number of validated names, used to expand or shrink the initial
- list of names.
+ Final number of validated names, used to expand or shrink the
+ initial list of names.
Returns
-------
@@ -346,8 +349,8 @@ class NameValidator(object):
Notes
-----
- A `NameValidator` instance can be called directly, which is the same as
- calling `validate`. For examples, see `NameValidator`.
+ A `NameValidator` instance can be called directly, which is the
+ same as calling `validate`. For examples, see `NameValidator`.
"""
# Initial checks ..............
@@ -394,11 +397,11 @@ class NameValidator(object):
seen[item] = cnt + 1
return tuple(validatednames)
#
+
def __call__(self, names, defaultfmt="f%i", nbfields=None):
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
-
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
@@ -462,22 +465,22 @@ class ConversionWarning(UserWarning):
pass
-
class StringConverter(object):
"""
- Factory class for function transforming a string into another object (int,
- float).
+ Factory class for function transforming a string into another object
+ (int, float).
After initialization, an instance can be called to transform a string
- into another object. If the string is recognized as representing a missing
- value, a default value is returned.
+ into another object. If the string is recognized as representing a
+ missing value, a default value is returned.
Attributes
----------
func : function
Function used for the conversion.
default : any
- Default value to return when the input corresponds to a missing value.
+ Default value to return when the input corresponds to a missing
+ value.
type : type
Type of the output.
_status : int
@@ -494,14 +497,13 @@ class StringConverter(object):
If a `dtype`, specifies the input data type, used to define a basic
function and a default value for missing data. For example, when
`dtype` is float, the `func` attribute is set to `float` and the
- default value to `np.nan`.
- If a function, this function is used to convert a string to another
- object. In this case, it is recommended to give an associated default
- value as input.
+ default value to `np.nan`. If a function, this function is used to
+ convert a string to another object. In this case, it is recommended
+ to give an associated default value as input.
default : any, optional
- Value to return by default, that is, when the string to be converted
- is flagged as missing. If not given, `StringConverter` tries to supply
- a reasonable default value.
+ Value to return by default, that is, when the string to be
+ converted is flagged as missing. If not given, `StringConverter`
+ tries to supply a reasonable default value.
missing_values : sequence of str, optional
Sequence of strings indicating a missing value.
locked : bool, optional
@@ -517,19 +519,23 @@ class StringConverter(object):
(nx.string_, bytes, asbytes('???'))]
(_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
#
+
@classmethod
def _getdtype(cls, val):
"""Returns the dtype of the input variable."""
return np.array(val).dtype
#
+
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
#
- # This is a bit annoying. We want to return the "general" type in most cases
- # (ie. "string" rather than "S10"), but we want to return the specific type
- # for datetime64 (ie. "datetime64[us]" rather than "datetime64").
+ # This is a bit annoying. We want to return the "general" type in most
+ # cases (ie. "string" rather than "S10"), but we want to return the
+ # specific type for datetime64 (ie. "datetime64[us]" rather than
+ # "datetime64").
+
@classmethod
def _dtypeortype(cls, dtype):
"""Returns dtype for datetime64 and type of dtype otherwise."""
@@ -537,15 +543,17 @@ class StringConverter(object):
return dtype
return dtype.type
#
+
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
- Upgrade the mapper of a StringConverter by adding a new function and its
- corresponding default.
+ Upgrade the mapper of a StringConverter by adding a new function and
+ its corresponding default.
- The input function (or sequence of functions) and its associated default
- value (if any) is inserted in penultimate position of the mapper.
- The corresponding type is estimated from the dtype of the default value.
+ The input function (or sequence of functions) and its associated
+ default value (if any) is inserted in penultimate position of the
+ mapper. The corresponding type is estimated from the dtype of the
+ default value.
Parameters
----------
@@ -577,6 +585,7 @@ class StringConverter(object):
for (fct, dft) in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
#
+
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Convert unicode (for Py3)
@@ -600,12 +609,13 @@ class StringConverter(object):
except TypeError:
# dtype_or_func must be a function, then
if not hasattr(dtype_or_func, '__call__'):
- errmsg = "The input argument `dtype` is neither a function"\
- " or a dtype (got '%s' instead)"
+ errmsg = ("The input argument `dtype` is neither a"
+ " function nor a dtype (got '%s' instead)")
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
- # If we don't have a default, try to guess it or set it to None
+ # If we don't have a default, try to guess it or set it to
+ # None
if default is None:
try:
default = self.func(asbytes('0'))
@@ -638,7 +648,7 @@ class StringConverter(object):
elif issubclass(dtype.type, np.int64):
self.func = np.int64
else:
- self.func = lambda x : int(float(x))
+ self.func = lambda x: int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = set([asbytes('')])
@@ -652,12 +662,14 @@ class StringConverter(object):
self._checked = False
self._initial_default = default
#
+
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
#
+
def _strict_call(self, value):
try:
return self.func(value)
@@ -668,18 +680,20 @@ class StringConverter(object):
return self.default
raise ValueError("Cannot convert string '%s'" % value)
#
+
def __call__(self, value):
return self._callingfunction(value)
#
+
def upgrade(self, value):
"""
- Try to find the best converter for a given string, and return the result.
+ Rind the best converter for a given string, and return the result.
The supplied string `value` is converted by testing different
- converters in order. First the `func` method of the `StringConverter`
- instance is tried, if this fails other available converters are tried.
- The order in which these other converters are tried is determined by the
- `_status` attribute of the instance.
+ converters in order. First the `func` method of the
+ `StringConverter` instance is tried, if this fails other available
+ converters are tried. The order in which these other converters
+ are tried is determined by the `_status` attribute of the instance.
Parameters
----------
@@ -733,7 +747,9 @@ class StringConverter(object):
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
- raise ConverterError("Could not find a valid conversion function")
+ raise ConverterError(
+ "Could not find a valid conversion function"
+ )
elif _status < _statusmax - 1:
_status += 1
(self.type, self.func, default) = self._mapper[_status]
@@ -754,23 +770,24 @@ class StringConverter(object):
func : function
Conversion function.
default : any, optional
- Value to return by default, that is, when the string to be converted
- is flagged as missing. If not given, `StringConverter` tries to supply
- a reasonable default value.
+ Value to return by default, that is, when the string to be
+ converted is flagged as missing. If not given,
+ `StringConverter` tries to supply a reasonable default value.
testing_value : str, optional
A string representing a standard input value of the converter.
- This string is used to help defining a reasonable default value.
+ This string is used to help defining a reasonable default
+ value.
missing_values : sequence of str, optional
Sequence of strings indicating a missing value.
locked : bool, optional
- Whether the StringConverter should be locked to prevent automatic
- upgrade or not. Default is False.
+ Whether the StringConverter should be locked to prevent
+ automatic upgrade or not. Default is False.
Notes
-----
- `update` takes the same parameters as the constructor of `StringConverter`,
- except that `func` does not accept a `dtype` whereas `dtype_or_func` in
- the constructor does.
+ `update` takes the same parameters as the constructor of
+ `StringConverter`, except that `func` does not accept a `dtype`
+ whereas `dtype_or_func` in the constructor does.
"""
self.func = func
@@ -796,7 +813,6 @@ class StringConverter(object):
self.missing_values = []
-
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
"""
Convenience function to create a `np.dtype` object.
@@ -807,17 +823,18 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
Parameters
----------
ndtype : var
- Definition of the dtype. Can be any string or dictionary
- recognized by the `np.dtype` function, or a sequence of types.
+ Definition of the dtype. Can be any string or dictionary recognized
+ by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
- For convenience, `names` can be a string of a comma-separated list of
- names.
+ For convenience, `names` can be a string of a comma-separated list
+ of names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
- A series of optional arguments used to initialize a `NameValidator`.
+ A series of optional arguments used to initialize a
+ `NameValidator`.
Examples
--------
@@ -865,8 +882,8 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
elif (nbtypes > 0):
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
- if (ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and \
- (defaultfmt != "f%i"):
+ if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and
+ (defaultfmt != "f%i")):
ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py
index 4eaabd0ff..54b9c1dc7 100644
--- a/numpy/lib/_version.py
+++ b/numpy/lib/_version.py
@@ -52,6 +52,7 @@ class NumpyVersion():
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
+
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index 82f741df9..bbfdce794 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -8,6 +8,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import long
+
__all__ = ['pad']
@@ -243,7 +244,6 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
ramp_arr = _arange_ndarray(arr, padshape, axis,
reverse=True).astype(np.float64)
-
# Appropriate slicing to extract n-dimensional edge along `axis`
edge_slice = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
@@ -1419,7 +1419,6 @@ def pad(array, pad_width, mode=None, **kwargs):
method = kwargs['reflect_type']
safe_pad = newmat.shape[axis] - 1
- repeat = safe_pad
while ((pad_before > safe_pad) or (pad_after > safe_pad)):
offset = 0
pad_iter_b = min(safe_pad,
@@ -1443,7 +1442,6 @@ def pad(array, pad_width, mode=None, **kwargs):
# length, to keep the period of the reflections consistent.
method = kwargs['reflect_type']
safe_pad = newmat.shape[axis]
- repeat = safe_pad
while ((pad_before > safe_pad) or
(pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
@@ -1462,7 +1460,6 @@ def pad(array, pad_width, mode=None, **kwargs):
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
safe_pad = newmat.shape[axis]
- repeat = safe_pad
while ((pad_before > safe_pad) or
(pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
@@ -1473,7 +1470,6 @@ def pad(array, pad_width, mode=None, **kwargs):
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
-
newmat = _pad_wrap(newmat, (pad_before, pad_after), axis)
return newmat
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 005703d16..2d98c35d2 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -26,11 +26,14 @@ To do: Optionally return indices analogously to unique for all functions.
"""
from __future__ import division, absolute_import, print_function
-__all__ = ['ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d',
- 'unique', 'in1d']
-
import numpy as np
-from numpy.lib.utils import deprecate
+
+
+__all__ = [
+ 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
+ 'in1d'
+ ]
+
def ediff1d(ary, to_end=None, to_begin=None):
"""
@@ -242,7 +245,7 @@ def intersect1d(ar1, ar2, assume_unique=False):
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
ar1 = unique(ar1)
ar2 = unique(ar2)
- aux = np.concatenate( (ar1, ar2) )
+ aux = np.concatenate((ar1, ar2))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
@@ -279,13 +282,13 @@ def setxor1d(ar1, ar2, assume_unique=False):
ar1 = unique(ar1)
ar2 = unique(ar2)
- aux = np.concatenate( (ar1, ar2) )
+ aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
- flag = np.concatenate( ([True], aux[1:] != aux[:-1], [True] ) )
+ flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
@@ -368,7 +371,7 @@ def in1d(ar1, ar2, assume_unique=False, invert=False):
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
- ar = np.concatenate( (ar1, ar2) )
+ ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
@@ -378,8 +381,8 @@ def in1d(ar1, ar2, assume_unique=False, invert=False):
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
- flag = np.concatenate( (bool_ar, [invert]) )
- indx = order.argsort(kind='mergesort')[:len( ar1 )]
+ flag = np.concatenate((bool_ar, [invert]))
+ indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
@@ -414,7 +417,7 @@ def union1d(ar1, ar2):
array([-2, -1, 0, 1, 2])
"""
- return unique( np.concatenate( (ar1, ar2) ) )
+ return unique(np.concatenate((ar1, ar2)))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py
index c2cde574e..d9839feeb 100644
--- a/numpy/lib/arrayterator.py
+++ b/numpy/lib/arrayterator.py
@@ -9,7 +9,6 @@ a user-specified number of elements.
"""
from __future__ import division, absolute_import, print_function
-import sys
from operator import mul
from functools import reduce
@@ -105,7 +104,8 @@ class Arrayterator(object):
"""
# Fix index, handling ellipsis and incomplete slices.
- if not isinstance(index, tuple): index = (index,)
+ if not isinstance(index, tuple):
+ index = (index,)
fixed = []
length, dims = len(index), len(self.shape)
for slice_ in index:
@@ -181,7 +181,8 @@ class Arrayterator(object):
def __iter__(self):
# Skip arrays with degenerate dimensions
- if [dim for dim in self.shape if dim <= 0]: raise StopIteration
+ if [dim for dim in self.shape if dim <= 0]:
+ raise StopIteration
start = self.start[:]
stop = self.stop[:]
@@ -200,12 +201,13 @@ class Arrayterator(object):
# along higher dimensions, so we read only a single position
if count == 0:
stop[i] = start[i]+1
- elif count <= self.shape[i]: # limit along this dimension
+ elif count <= self.shape[i]:
+ # limit along this dimension
stop[i] = start[i] + count*step[i]
rundim = i
else:
- stop[i] = self.stop[i] # read everything along this
- # dimension
+ # read everything along this dimension
+ stop[i] = self.stop[i]
stop[i] = min(self.stop[i], stop[i])
count = count//self.shape[i]
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index c085a5d53..5b96e5b8e 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -119,7 +119,8 @@ def fv(rate, nper, pmt, pv, when='end'):
temp = (1+rate)**nper
miter = np.broadcast(rate, nper, pmt, pv, when)
zer = np.zeros(miter.shape)
- fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer)
+ fact = np.where(rate == zer, nper + zer,
+ (1 + rate*when)*(temp - 1)/rate + zer)
return -(pv*temp + pmt*fact)
def pmt(rate, nper, pv, fv=0, when='end'):
@@ -210,7 +211,8 @@ def pmt(rate, nper, pv, fv=0, when='end'):
temp = (1+rate)**nper
miter = np.broadcast(rate, nper, pv, fv, when)
zer = np.zeros(miter.shape)
- fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer)
+ fact = np.where(rate == zer, nper + zer,
+ (1 + rate*when)*(temp - 1)/rate + zer)
return -(fv + pv*temp) / fact
def nper(rate, pmt, pv, fv=0, when='end'):
@@ -279,7 +281,7 @@ def nper(rate, pmt, pv, fv=0, when='end'):
B = np.log((-fv+z) / (pv+z))/np.log(1.0+rate)
miter = np.broadcast(rate, pmt, pv, fv, when)
zer = np.zeros(miter.shape)
- return np.where(rate==zer, A+zer, B+zer) + 0.0
+ return np.where(rate == zer, A + zer, B + zer) + 0.0
def ipmt(rate, per, nper, pv, fv=0.0, when='end'):
"""
@@ -365,7 +367,8 @@ def ipmt(rate, per, nper, pv, fv=0.0, when='end'):
"""
when = _convert_when(when)
- rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper, pv, fv, when)
+ rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper,
+ pv, fv, when)
total_pmt = pmt(rate, nper, pv, fv, when)
ipmt = _rbl(rate, per, total_pmt, pv, when)*rate
try:
@@ -507,12 +510,16 @@ def pv(rate, nper, pmt, fv=0.0, when='end'):
return -(fv + pmt*fact)/temp
# Computed with Sage
-# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + p*((r + 1)^n - 1)*w/r)
+# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x -
+# p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r +
+# p*((r + 1)^n - 1)*w/r)
def _g_div_gp(r, n, p, x, y, w):
t1 = (r+1)**n
t2 = (r+1)**(n-1)
- return (y + t1*x + p*(t1 - 1)*(r*w + 1)/r)/(n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r + p*(t1 - 1)*w/r)
+ return ((y + t1*x + p*(t1 - 1)*(r*w + 1)/r) /
+ (n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r +
+ p*(t1 - 1)*w/r))
# Use Newton's iteration until the change is less than 1e-6
# for all values or a maximum of 100 iterations is reached.
@@ -572,7 +579,7 @@ def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100):
while (iter < maxiter) and not close:
rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when)
diff = abs(rnp1-rn)
- close = np.all(diff<tol)
+ close = np.all(diff < tol)
iter += 1
rn = rnp1
if not close:
@@ -593,9 +600,9 @@ def irr(values):
----------
values : array_like, shape(N,)
Input cash flows per time period. By convention, net "deposits"
- are negative and net "withdrawals" are positive. Thus, for example,
- at least the first element of `values`, which represents the initial
- investment, will typically be negative.
+ are negative and net "withdrawals" are positive. Thus, for
+ example, at least the first element of `values`, which represents
+ the initial investment, will typically be negative.
Returns
-------
@@ -605,13 +612,13 @@ def irr(values):
Notes
-----
The IRR is perhaps best understood through an example (illustrated
- using np.irr in the Examples section below). Suppose one invests
- 100 units and then makes the following withdrawals at regular
- (fixed) intervals: 39, 59, 55, 20. Assuming the ending value is 0,
- one's 100 unit investment yields 173 units; however, due to the
- combination of compounding and the periodic withdrawals, the
- "average" rate of return is neither simply 0.73/4 nor (1.73)^0.25-1.
- Rather, it is the solution (for :math:`r`) of the equation:
+ using np.irr in the Examples section below). Suppose one invests 100
+ units and then makes the following withdrawals at regular (fixed)
+ intervals: 39, 59, 55, 20. Assuming the ending value is 0, one's 100
+ unit investment yields 173 units; however, due to the combination of
+ compounding and the periodic withdrawals, the "average" rate of return
+ is neither simply 0.73/4 nor (1.73)^0.25-1. Rather, it is the solution
+ (for :math:`r`) of the equation:
.. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2}
+ \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0
@@ -663,18 +670,18 @@ def npv(rate, values):
The discount rate.
values : array_like, shape(M, )
The values of the time series of cash flows. The (fixed) time
- interval between cash flow "events" must be the same as that
- for which `rate` is given (i.e., if `rate` is per year, then
- precisely a year is understood to elapse between each cash flow
- event). By convention, investments or "deposits" are negative,
- income or "withdrawals" are positive; `values` must begin with
- the initial investment, thus `values[0]` will typically be
- negative.
+ interval between cash flow "events" must be the same as that for
+ which `rate` is given (i.e., if `rate` is per year, then precisely
+ a year is understood to elapse between each cash flow event). By
+ convention, investments or "deposits" are negative, income or
+ "withdrawals" are positive; `values` must begin with the initial
+ investment, thus `values[0]` will typically be negative.
Returns
-------
out : float
- The NPV of the input cash flow series `values` at the discount `rate`.
+ The NPV of the input cash flow series `values` at the discount
+ `rate`.
Notes
-----
@@ -705,8 +712,9 @@ def mirr(values, finance_rate, reinvest_rate):
Parameters
----------
values : array_like
- Cash flows (must contain at least one positive and one negative value)
- or nan is returned. The first value is considered a sunk cost at time zero.
+ Cash flows (must contain at least one positive and one negative
+ value) or nan is returned. The first value is considered a sunk
+ cost at time zero.
finance_rate : scalar
Interest rate paid on the cash flows
reinvest_rate : scalar
@@ -718,7 +726,6 @@ def mirr(values, finance_rate, reinvest_rate):
Modified internal rate of return
"""
-
values = np.asarray(values, dtype=np.double)
n = values.size
pos = values > 0
@@ -728,8 +735,3 @@ def mirr(values, finance_rate, reinvest_rate):
numer = np.abs(npv(reinvest_rate, values*pos))
denom = np.abs(npv(finance_rate, values*neg))
return (numer/denom)**(1.0/(n - 1))*(1 + reinvest_rate) - 1
-
-if __name__ == '__main__':
- import doctest
- import numpy as np
- doctest.testmod(verbose=True)
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 6083312de..98743b6ad 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -150,7 +150,7 @@ else:
MAGIC_PREFIX = asbytes('\x93NUMPY')
MAGIC_LEN = len(MAGIC_PREFIX) + 2
-BUFFER_SIZE = 2 ** 18 #size of buffer for reading npz files in bytes
+BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
# instead of 2 bytes (H) allowing storage of large structured arrays
@@ -231,10 +231,10 @@ def dtype_to_descr(dtype):
"""
if dtype.names is not None:
- # This is a record array. The .descr is fine.
- # XXX: parts of the record array with an empty name, like padding bytes,
- # still get fiddled with. This needs to be fixed in the C implementation
- # of dtype().
+ # This is a record array. The .descr is fine. XXX: parts of the
+ # record array with an empty name, like padding bytes, still get
+ # fiddled with. This needs to be fixed in the C implementation of
+ # dtype().
return dtype.descr
else:
return dtype.str
@@ -293,9 +293,9 @@ def _write_array_header(fp, d, version=None):
header.append("}")
header = "".join(header)
# Pad the header with spaces and a final newline such that the magic
- # string, the header-length short and the header are aligned on a 16-byte
- # boundary. Hopefully, some system, possibly memory-mapping, can take
- # advantage of our premature optimization.
+ # string, the header-length short and the header are aligned on a
+ # 16-byte boundary. Hopefully, some system, possibly memory-mapping,
+ # can take advantage of our premature optimization.
current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline
topad = 16 - (current_header_len % 16)
header = asbytes(header + ' '*topad + '\n')
@@ -325,8 +325,8 @@ def write_array_header_1_0(fp, d):
----------
fp : filelike object
d : dict
- This has the appropriate entries for writing its string representation
- to the header of the file.
+ This has the appropriate entries for writing its string
+ representation to the header of the file.
"""
_write_array_header(fp, d, (1, 0))
@@ -341,8 +341,8 @@ def write_array_header_2_0(fp, d):
----------
fp : filelike object
d : dict
- This has the appropriate entries for writing its string representation
- to the header of the file.
+ This has the appropriate entries for writing its string
+ representation to the header of the file.
"""
_write_array_header(fp, d, (2, 0))
@@ -363,9 +363,9 @@ def read_array_header_1_0(fp):
shape : tuple of int
The shape of the array.
fortran_order : bool
- The array data will be written out directly if it is either C-contiguous
- or Fortran-contiguous. Otherwise, it will be made contiguous before
- writing it out.
+ The array data will be written out directly if it is either
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
+ contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
@@ -396,9 +396,9 @@ def read_array_header_2_0(fp):
shape : tuple of int
The shape of the array.
fortran_order : bool
- The array data will be written out directly if it is either C-contiguous
- or Fortran-contiguous. Otherwise, it will be made contiguous before
- writing it out.
+ The array data will be written out directly if it is either
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
+ contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
@@ -428,9 +428,9 @@ def _read_array_header(fp, version):
else:
raise ValueError("Invalid version %r" % version)
- # The header is a pretty-printed string representation of a literal Python
- # dictionary with trailing newlines padded to a 16-byte boundary. The keys
- # are strings.
+ # The header is a pretty-printed string representation of a literal
+ # Python dictionary with trailing newlines padded to a 16-byte
+ # boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
# "descr" : dtype.descr
@@ -449,7 +449,7 @@ def _read_array_header(fp, version):
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
- not numpy.all([isinstance(x, (int, long)) for x in d['shape']])):
+ not numpy.all([isinstance(x, (int, long)) for x in d['shape']])):
msg = "shape is not valid: %r"
raise ValueError(msg % (d['shape'],))
if not isinstance(d['fortran_order'], bool):
@@ -474,13 +474,13 @@ def write_array(fp, array, version=None):
Parameters
----------
fp : file_like object
- An open, writable file object, or similar object with a ``.write()``
- method.
+ An open, writable file object, or similar object with a
+ ``.write()`` method.
array : ndarray
The array to write to disk.
version : (int, int) or None, optional
- The version number of the format. None means use the oldest supported
- version that is able to store the data. Default: None
+ The version number of the format. None means use the oldest
+ supported version that is able to store the data. Default: None
Raises
------
@@ -504,8 +504,9 @@ def write_array(fp, array, version=None):
buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
if array.dtype.hasobject:
- # We contain Python objects so we cannot write out the data directly.
- # Instead, we will pickle it out with version 2 of the pickle protocol.
+ # We contain Python objects so we cannot write out the data
+ # directly. Instead, we will pickle it out with version 2 of the
+ # pickle protocol.
pickle.dump(array, fp, protocol=2)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
@@ -563,13 +564,13 @@ def read_array(fp):
# We can use the fast fromfile() function.
array = numpy.fromfile(fp, dtype=dtype, count=count)
else:
- # This is not a real file. We have to read it the memory-intensive
- # way.
- # crc32 module fails on reads greater than 2 ** 32 bytes, breaking
- # large reads from gzip streams. Chunk reads to BUFFER_SIZE bytes to
- # avoid issue and reduce memory overhead of the read. In
- # non-chunked case count < max_read_count, so only one read is
- # performed.
+ # This is not a real file. We have to read it the
+ # memory-intensive way.
+ # crc32 module fails on reads greater than 2 ** 32 bytes,
+ # breaking large reads from gzip streams. Chunk reads to
+ # BUFFER_SIZE bytes to avoid issue and reduce memory overhead
+ # of the read. In non-chunked case count < max_read_count, so
+ # only one read is performed.
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
@@ -604,25 +605,24 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
object.
mode : str, optional
The mode in which to open the file; the default is 'r+'. In
- addition to the standard file modes, 'c' is also accepted to
- mean "copy on write." See `memmap` for the available mode strings.
+ addition to the standard file modes, 'c' is also accepted to mean
+ "copy on write." See `memmap` for the available mode strings.
dtype : data-type, optional
The data type of the array if we are creating a new file in "write"
- mode, if not, `dtype` is ignored. The default value is None,
- which results in a data-type of `float64`.
+ mode, if not, `dtype` is ignored. The default value is None, which
+ results in a data-type of `float64`.
shape : tuple of int
The shape of the array if we are creating a new file in "write"
mode, in which case this parameter is required. Otherwise, this
parameter is ignored and is thus optional.
fortran_order : bool, optional
Whether the array should be Fortran-contiguous (True) or
- C-contiguous (False, the default) if we are creating a new file
- in "write" mode.
+ C-contiguous (False, the default) if we are creating a new file in
+ "write" mode.
version : tuple of int (major, minor) or None
If the mode is a "write" mode, then this is the version of the file
- format used to create the file.
- None means use the oldest supported version that is able to store the
- data. Default: None
+ format used to create the file. None means use the oldest
+ supported version that is able to store the data. Default: None
Returns
-------
@@ -642,15 +642,15 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
"""
if not isinstance(filename, basestring):
- raise ValueError("Filename must be a string. Memmap cannot use" \
+ raise ValueError("Filename must be a string. Memmap cannot use"
" existing file handles.")
if 'w' in mode:
# We are creating the file, not reading it.
# Check if we ought to create the file.
_check_version(version)
- # Ensure that the given dtype is an authentic dtype object rather than
- # just something that can be interpreted as a dtype object.
+ # Ensure that the given dtype is an authentic dtype object rather
+ # than just something that can be interpreted as a dtype object.
dtype = numpy.dtype(dtype)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
@@ -713,9 +713,9 @@ def _read_bytes(fp, size, error_template="ran out of data"):
"""
data = bytes()
while True:
- # io files (default in python3) return None or raise on would-block,
- # python2 file will truncate, probably nothing can be done about that.
- # note that regular files can't be non-blocking
+ # io files (default in python3) return None or raise on
+ # would-block, python2 file will truncate, probably nothing can be
+ # done about that. note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
@@ -725,6 +725,6 @@ def _read_bytes(fp, size, error_template="ran out of data"):
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
- raise ValueError(msg %(error_template, size, len(data)))
+ raise ValueError(msg % (error_template, size, len(data)))
else:
return data
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index f625bcb90..618a93bb9 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1,15 +1,5 @@
from __future__ import division, absolute_import, print_function
-__docformat__ = "restructuredtext en"
-__all__ = [
- 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
- 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
- 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
- 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
- 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
- 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
- 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc']
-
import warnings
import sys
import collections
@@ -20,21 +10,21 @@ import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
- empty_like, ndarray, around, floor, ceil, take, ScalarType, dot, where,
- intp, integer, isscalar
+ empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
+ integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
- ravel, nonzero, choose, sort, partition, mean
+ ravel, nonzero, sort, partition, mean
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
+from .utils import deprecate
from ._compiled_base import _insert, add_docstring
from ._compiled_base import digitize, bincount, interp as compiled_interp
-from .utils import deprecate
from ._compiled_base import add_newdoc_ufunc
from numpy.compat import long
@@ -43,6 +33,17 @@ if sys.version_info[0] < 3:
range = xrange
+__all__ = [
+ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
+ 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
+ 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
+ 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
+ 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
+ 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
+ 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
+ ]
+
+
def iterable(y):
"""
Check whether or not an object can be iterated over.
@@ -371,8 +372,8 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
- # For the rightmost bin, we want values equal to the right
- # edge to be counted in the last bin, and not as an outlier.
+ # For the rightmost bin, we want values equal to the right edge to be
+ # counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
@@ -380,7 +381,8 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
- on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal))
+ on_edge = (around(sample[:, i], decimal) ==
+ around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
@@ -1626,6 +1628,7 @@ class vectorize(object):
further degrades performance.
"""
+
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
@@ -3379,7 +3382,7 @@ def meshgrid(*xi, **kwargs):
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
- if not indexing in ['xy', 'ij']:
+ if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
@@ -3440,7 +3443,7 @@ def delete(arr, obj, axis=None):
Notes
-----
Often it is preferable to use a boolean mask. For example:
-
+
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index f0066be81..98c6b291b 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -1,28 +1,30 @@
from __future__ import division, absolute_import, print_function
-__all__ = ['ravel_multi_index',
- 'unravel_index',
- 'mgrid',
- 'ogrid',
- 'r_', 'c_', 's_',
- 'index_exp', 'ix_',
- 'ndenumerate', 'ndindex',
- 'fill_diagonal', 'diag_indices', 'diag_indices_from']
-
import sys
+import math
+
import numpy.core.numeric as _nx
-from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod,
- arange )
+from numpy.core.numeric import (
+ asarray, ScalarType, array, alltrue, cumprod, arange
+ )
from numpy.core.numerictypes import find_common_type
-import math
from . import function_base
import numpy.matrixlib as matrix
from .function_base import diff
from numpy.lib._compiled_base import ravel_multi_index, unravel_index
from numpy.lib.stride_tricks import as_strided
+
makemat = matrix.matrix
+
+__all__ = [
+ 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
+ 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
+ 'diag_indices', 'diag_indices_from'
+ ]
+
+
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
@@ -142,8 +144,10 @@ class nd_grid(object):
[4]]), array([[0, 1, 2, 3, 4]])]
"""
+
def __init__(self, sparse=False):
self.sparse = sparse
+
def __getitem__(self, key):
try:
size = []
@@ -151,16 +155,19 @@ class nd_grid(object):
for k in range(len(key)):
step = key[k].step
start = key[k].start
- if start is None: start=0
- if step is None: step=1
+ if start is None:
+ start = 0
+ if step is None:
+ step = 1
if isinstance(step, complex):
size.append(int(abs(step)))
typ = float
else:
- size.append(int(math.ceil((key[k].stop - start)/(step*1.0))))
- if isinstance(step, float) or \
- isinstance(start, float) or \
- isinstance(key[k].stop, float):
+ size.append(
+ int(math.ceil((key[k].stop - start)/(step*1.0))))
+ if (isinstance(step, float) or
+ isinstance(start, float) or
+ isinstance(key[k].stop, float)):
typ = float
if self.sparse:
nn = [_nx.arange(_x, dtype=_t)
@@ -170,8 +177,10 @@ class nd_grid(object):
for k in range(len(size)):
step = key[k].step
start = key[k].start
- if start is None: start=0
- if step is None: step=1
+ if start is None:
+ start = 0
+ if step is None:
+ step = 1
if isinstance(step, complex):
step = int(abs(step))
if step != 1:
@@ -188,13 +197,14 @@ class nd_grid(object):
step = key.step
stop = key.stop
start = key.start
- if start is None: start = 0
+ if start is None:
+ start = 0
if isinstance(step, complex):
step = abs(step)
length = int(step)
if step != 1:
step = (key.stop-start)/float(step-1)
- stop = key.stop+step
+ stop = key.stop + step
return _nx.arange(0, length, 1, float)*step + start
else:
return _nx.arange(start, stop, step)
@@ -207,8 +217,8 @@ class nd_grid(object):
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
-mgrid.__doc__ = None # set in numpy.add_newdocs
-ogrid.__doc__ = None # set in numpy.add_newdocs
+mgrid.__doc__ = None # set in numpy.add_newdocs
+ogrid.__doc__ = None # set in numpy.add_newdocs
class AxisConcatenator(object):
"""
@@ -217,6 +227,7 @@ class AxisConcatenator(object):
For detailed documentation on usage, see `r_`.
"""
+
def _retval(self, res):
if self.matrix:
oldndim = res.ndim
@@ -256,7 +267,8 @@ class AxisConcatenator(object):
step = key[k].step
start = key[k].start
stop = key[k].stop
- if start is None: start = 0
+ if start is None:
+ start = 0
if step is None:
step = 1
if isinstance(step, complex):
@@ -431,6 +443,7 @@ class RClass(AxisConcatenator):
matrix([[1, 2, 3, 4, 5, 6]])
"""
+
def __init__(self):
AxisConcatenator.__init__(self, 0)
@@ -453,6 +466,7 @@ class CClass(AxisConcatenator):
array([[1, 2, 3, 0, 0, 4, 5, 6]])
"""
+
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
@@ -484,6 +498,7 @@ class ndenumerate(object):
(1, 1) 4
"""
+
def __init__(self, arr):
self.iter = asarray(arr).flat
@@ -536,10 +551,12 @@ class ndindex(object):
(2, 1, 0)
"""
+
def __init__(self, *shape):
if len(shape) == 1 and isinstance(shape[0], tuple):
shape = shape[0]
- x = as_strided(_nx.zeros(1), shape=shape, strides=_nx.zeros_like(shape))
+ x = as_strided(_nx.zeros(1), shape=shape,
+ strides=_nx.zeros_like(shape))
self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
order='C')
@@ -556,18 +573,20 @@ class ndindex(object):
def __next__(self):
"""
- Standard iterator method, updates the index and returns the index tuple.
+ Standard iterator method, updates the index and returns the index
+ tuple.
Returns
-------
val : tuple of ints
- Returns a tuple containing the indices of the current iteration.
+ Returns a tuple containing the indices of the current
+ iteration.
"""
next(self._it)
return self._it.multi_index
- next = __next__
+ next = __next__
# You can do all this with slice() plus a few special objects,
@@ -624,6 +643,7 @@ class IndexExpression(object):
array([2, 4])
"""
+
def __init__(self, maketuple):
self.maketuple = maketuple
@@ -743,7 +763,7 @@ def fill_diagonal(a, val, wrap=False):
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
- if not alltrue(diff(a.shape)==0):
+ if not alltrue(diff(a.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + (cumprod(a.shape[:-1])).sum()
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index d695245d4..7260a35b8 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -17,9 +17,7 @@ Functions
from __future__ import division, absolute_import, print_function
import warnings
-import operator
import numpy as np
-from numpy.core.fromnumeric import partition
from numpy.lib.function_base import _ureduce as _ureduce
__all__ = [
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 42a539f78..0e49dd31c 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -33,7 +33,8 @@ loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
- 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
+ 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
+ ]
def seek_gzip_factory(f):
@@ -111,6 +112,7 @@ class BagObj(object):
"Doesn't matter what you want, you're gonna get this"
"""
+
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
@@ -184,6 +186,7 @@ class NpzFile(object):
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
+
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
@@ -866,7 +869,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
- if not ndmin in [0, 1, 2]:
+ if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
@@ -1390,7 +1393,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
first_line = next(fhd)
if names is True:
if comments in first_line:
- first_line = asbytes('').join(first_line.split(comments)[1:])
+ first_line = (
+ asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
@@ -1595,7 +1599,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
- miss_chars = [_.missing_values for _ in converters]
+ # Fixme: possible error as following variable never used.
+ #miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
@@ -1686,23 +1691,17 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
if usemask:
masks = masks[:-skip_footer]
-
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
- #
- # if loose:
- # conversionfuncs = [conv._loose_call for conv in converters]
- # else:
- # conversionfuncs = [conv._strict_call for conv in converters]
- # for (i, vals) in enumerate(rows):
- # rows[i] = tuple([convert(val)
- # for (convert, val) in zip(conversionfuncs, vals)])
if loose:
- rows = list(zip(*[[converter._loose_call(_r) for _r in map(itemgetter(i), rows)]
- for (i, converter) in enumerate(converters)]))
+ rows = list(
+ zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
+ for (i, conv) in enumerate(converters)]))
else:
- rows = list(zip(*[[converter._strict_call(_r) for _r in map(itemgetter(i), rows)]
- for (i, converter) in enumerate(converters)]))
+ rows = list(
+ zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
+ for (i, conv) in enumerate(converters)]))
+
# Reset the dtype
data = rows
if dtype is None:
@@ -1764,7 +1763,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
if user_converters:
ishomogeneous = True
descr = []
- for (i, ttype) in enumerate([conv.type for conv in converters]):
+ for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 10ae32a60..6a1adc773 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -143,7 +143,7 @@ def poly(seq_of_zeros):
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
- NX.alltrue(neg_roots == pos_roots)):
+ NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
@@ -412,15 +412,14 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
deg : int
Degree of the fitting polynomial
rcond : float, optional
- Relative condition number of the fit. Singular values smaller than this
- relative to the largest singular value will be ignored. The default
- value is len(x)*eps, where eps is the relative precision of the float
- type, about 2e-16 in most cases.
+ Relative condition number of the fit. Singular values smaller than
+ this relative to the largest singular value will be ignored. The
+ default value is len(x)*eps, where eps is the relative precision of
+ the float type, about 2e-16 in most cases.
full : bool, optional
- Switch determining nature of return value. When it is
- False (the default) just the coefficients are returned, when True
- diagnostic information from the singular value decomposition is also
- returned.
+ Switch determining nature of return value. When it is False (the
+ default) just the coefficients are returned, when True diagnostic
+ information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
@@ -430,18 +429,21 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
Returns
-------
p : ndarray, shape (M,) or (M, K)
- Polynomial coefficients, highest power first.
- If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
+ Polynomial coefficients, highest power first. If `y` was 2-D, the
+ coefficients for `k`-th data set are in ``p[:,k]``.
- residuals, rank, singular_values, rcond : present only if `full` = True
- Residuals of the least-squares fit, the effective rank of the scaled
- Vandermonde coefficient matrix, its singular values, and the specified
- value of `rcond`. For more details, see `linalg.lstsq`.
+ residuals, rank, singular_values, rcond :
+ Present only if `full` = True. Residuals of the least-squares fit,
+ the effective rank of the scaled Vandermonde coefficient matrix,
+ its singular values, and the specified value of `rcond`. For more
+ details, see `linalg.lstsq`.
- V : ndaray, shape (M,M) or (M,M,K) : present only if `full` = False and `cov`=True
- The covariance matrix of the polynomial coefficient estimates. The diagonal
- of this matrix are the variance estimates for each coefficient. If y is a 2-d
- array, then the covariance matrix for the `k`-th data set are in ``V[:,:,k]``
+ V : ndarray, shape (M,M) or (M,M,K)
+ Present only if `full` = False and `cov`=True. The covariance
+ matrix of the polynomial coefficient estimates. The diagonal of
+ this matrix are the variance estimates for each coefficient. If y
+ is a 2-D array, then the covariance matrix for the `k`-th data set
+ are in ``V[:,:,k]``
Warns
@@ -531,8 +533,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
- >>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
- [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
+ >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
@@ -543,19 +544,19 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
y = NX.asarray(y) + 0.0
# check arguments.
- if deg < 0 :
+ if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
- if y.ndim < 1 or y.ndim > 2 :
+ if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
- if x.shape[0] != y.shape[0] :
+ if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
- if rcond is None :
+ if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
@@ -567,7 +568,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
- if w.shape[0] != y.shape[0] :
+ if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
@@ -586,9 +587,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
- if full :
+ if full:
return c, resids, rank, s, rcond
- elif cov :
+ elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
@@ -600,7 +601,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
- else :
+ else:
return c
@@ -917,8 +918,8 @@ def _raise_power(astr, wrap=70):
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
- if ((len(line2)+len(toadd2) > wrap) or \
- (len(line1)+len(toadd1) > wrap)):
+ if ((len(line2) + len(toadd2) > wrap) or
+ (len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
@@ -1126,7 +1127,6 @@ class poly1d(object):
thestr = newstr
return _raise_power(thestr)
-
def __call__(self, val):
return polyval(self.coeffs, val)
@@ -1214,7 +1214,8 @@ class poly1d(object):
try:
return self.__dict__[key]
except KeyError:
- raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
+ raise AttributeError(
+ "'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index ae4ee56c6..a61b1749b 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -1,8 +1,8 @@
"""
Collection of utilities to manipulate structured arrays.
-Most of these functions were initially implemented by John Hunter for matplotlib.
-They have been rewritten and extended for convenience.
+Most of these functions were initially implemented by John Hunter for
+matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
@@ -22,16 +22,13 @@ if sys.version_info[0] < 3:
_check_fill_value = np.ma.core._check_fill_value
-__all__ = ['append_fields',
- 'drop_fields',
- 'find_duplicates',
- 'get_fieldstructure',
- 'join_by',
- 'merge_arrays',
- 'rec_append_fields', 'rec_drop_fields', 'rec_join',
- 'recursive_fill_fields', 'rename_fields',
- 'stack_arrays',
- ]
+
+__all__ = [
+ 'append_fields', 'drop_fields', 'find_duplicates',
+ 'get_fieldstructure', 'join_by', 'merge_arrays',
+ 'rec_append_fields', 'rec_drop_fields', 'rec_join',
+ 'recursive_fill_fields', 'rename_fields', 'stack_arrays',
+ ]
def recursive_fill_fields(input, output):
@@ -73,7 +70,6 @@ def recursive_fill_fields(input, output):
return output
-
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
@@ -190,7 +186,7 @@ def zip_descr(seqarrays, flatten=False):
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
- Returns a dictionary with fields as keys and a list of parent fields as values.
+ Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
@@ -228,8 +224,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,):
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
-# if (lastparent[-1] != lastname):
- lastparent.append(lastname)
+ lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
@@ -240,6 +235,7 @@ def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
+
"""
for element in iterable:
if isinstance(element, np.void):
@@ -252,9 +248,11 @@ def _izip_fields_flat(iterable):
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
+
"""
for element in iterable:
- if hasattr(element, '__iter__') and not isinstance(element, basestring):
+ if (hasattr(element, '__iter__') and
+ not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
@@ -328,9 +326,8 @@ def _fix_defaults(output, defaults=None):
return output
-
-def merge_arrays(seqarrays,
- fill_value= -1, flatten=False, usemask=False, asrecarray=False):
+def merge_arrays(seqarrays, fill_value=-1, flatten=False,
+ usemask=False, asrecarray=False):
"""
Merge arrays field by field.
@@ -466,7 +463,6 @@ def merge_arrays(seqarrays,
return output
-
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
@@ -478,13 +474,14 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
base : array
Input array
drop_names : string or sequence
- String or sequence of strings corresponding to the names of the fields
- to drop.
+ String or sequence of strings corresponding to the names of the
+ fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
- asrecarray : string or sequence
+ asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
- a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
+ a plain ndarray or masked array with flexible dtype. The default
+ is False.
Examples
--------
@@ -505,7 +502,7 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
- #
+
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
@@ -520,11 +517,11 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
else:
newdtype.append((name, current))
return newdtype
- #
+
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
- #
+
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
@@ -537,7 +534,6 @@ def rec_drop_fields(base, drop_names):
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
-
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
@@ -567,8 +563,9 @@ def rename_fields(base, namemapper):
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
- newdtype.append((newname,
- _recursive_rename_fields(current, namemapper)))
+ newdtype.append(
+ (newname, _recursive_rename_fields(current, namemapper))
+ )
else:
newdtype.append((newname, current))
return newdtype
@@ -577,7 +574,7 @@ def rename_fields(base, namemapper):
def append_fields(base, names, data, dtypes=None,
- fill_value= -1, usemask=True, asrecarray=False):
+ fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
@@ -618,7 +615,7 @@ def append_fields(base, names, data, dtypes=None,
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
- else :
+ else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
@@ -645,7 +642,6 @@ def append_fields(base, names, data, dtypes=None,
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
-
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
@@ -680,7 +676,6 @@ def rec_append_fields(base, names, data, dtypes=None):
asrecarray=True, usemask=False)
-
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
@@ -693,11 +688,11 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
- Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
- or a ndarray.
+ Whether to return a MaskedArray (or MaskedRecords is
+ `asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
- Whether to return a recarray (or MaskedRecords if `usemask==True`) or
- just a flexible-type ndarray.
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
+ or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
@@ -747,7 +742,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
- raise TypeError("Incompatible type '%s' <> '%s'" % \
+ raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
@@ -771,7 +766,6 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
usemask=usemask, asrecarray=asrecarray)
-
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
@@ -826,19 +820,17 @@ def find_duplicates(a, key=None, ignoremask=True, return_index=False):
return duplicates
-
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
- to the fields used to join the array.
- An exception is raised if the `key` field cannot be found in the two input
- arrays.
- Neither `r1` nor `r2` should have any duplicates along `key`: the presence
- of duplicates will make the output quite unreliable. Note that duplicates
- are not looked for by the algorithm.
+ to the fields used to join the array. An exception is raised if the
+ `key` field cannot be found in the two input arrays. Neither `r1` nor
+ `r2` should have any duplicates along `key`: the presence of duplicates
+ will make the output quite unreliable. Note that duplicates are not
+ looked for by the algorithm.
Parameters
----------
@@ -849,39 +841,41 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
- If 'outer', returns the common elements as well as the elements of r1
- not in r2 and the elements of not in r2.
- If 'leftouter', returns the common elements and the elements of r1 not
- in r2.
+ If 'outer', returns the common elements as well as the elements of
+ r1 not in r2 and the elements of not in r2.
+ If 'leftouter', returns the common elements and the elements of r1
+ not in r2.
r1postfix : string, optional
- String appended to the names of the fields of r1 that are present in r2
- but absent of the key.
+ String appended to the names of the fields of r1 that are present
+ in r2 but absent of the key.
r2postfix : string, optional
- String appended to the names of the fields of r2 that are present in r1
- but absent of the key.
+ String appended to the names of the fields of r2 that are present
+ in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
- Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
- or a ndarray.
+ Whether to return a MaskedArray (or MaskedRecords is
+ `asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
- Whether to return a recarray (or MaskedRecords if `usemask==True`) or
- just a flexible-type ndarray.
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
+ or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
- * A temporary array is formed by dropping the fields not in the key for the
- two arrays and concatenating the result. This array is then sorted, and
- the common entries selected. The output is constructed by filling the fields
- with the selected entries. Matching is not preserved if there are some
- duplicates...
+ * A temporary array is formed by dropping the fields not in the key for
+ the two arrays and concatenating the result. This array is then
+ sorted, and the common entries selected. The output is constructed by
+ filling the fields with the selected entries. Matching is not
+ preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
- raise ValueError("The 'jointype' argument should be in 'inner', "\
- "'outer' or 'leftouter' (got '%s' instead)" % jointype)
+ raise ValueError(
+ "The 'jointype' argument should be in 'inner', "
+ "'outer' or 'leftouter' (got '%s' instead)" % jointype
+ )
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
@@ -896,7 +890,9 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
- (nb1, nb2) = (len(r1), len(r2))
+ # Fixme: nb2 below is never used. Commenting out for pyflakes.
+ # (nb1, nb2) = (len(r1), len(r2))
+ nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
@@ -964,14 +960,15 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
- # Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
+ # Find the largest nb of common fields :
+ # r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
- if f not in names or (f in r2names and not r2postfix and not f in key):
+ if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index 3da86d9c8..e07caf805 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -17,16 +17,21 @@ correctly handled. See their respective docstrings for specific examples.
"""
from __future__ import division, absolute_import, print_function
-__all__ = ['sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos',
- 'arcsin', 'arctanh']
-
import numpy.core.numeric as nx
import numpy.core.numerictypes as nt
from numpy.core.numeric import asarray, any
from numpy.lib.type_check import isreal
+
+__all__ = [
+ 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
+ 'arctanh'
+ ]
+
+
_ln2 = nx.log(2.0)
+
def _tocomplex(arr):
"""Convert its input `arr` to a complex array.
@@ -109,9 +114,10 @@ def _fix_real_lt_zero(x):
>>> np.lib.scimath._fix_real_lt_zero([-1,2])
array([-1.+0.j, 2.+0.j])
+
"""
x = asarray(x)
- if any(isreal(x) & (x<0)):
+ if any(isreal(x) & (x < 0)):
x = _tocomplex(x)
return x
@@ -163,7 +169,7 @@ def _fix_real_abs_gt_1(x):
array([ 0.+0.j, 2.+0.j])
"""
x = asarray(x)
- if any(isreal(x) & (abs(x)>1)):
+ if any(isreal(x) & (abs(x) > 1)):
x = _tocomplex(x)
return x
diff --git a/numpy/lib/setup.py b/numpy/lib/setup.py
index 153af314c..68d99c33a 100644
--- a/numpy/lib/setup.py
+++ b/numpy/lib/setup.py
@@ -9,7 +9,6 @@ def configuration(parent_package='',top_path=None):
config.add_include_dirs(join('..', 'core', 'include'))
-
config.add_extension('_compiled_base',
sources=[join('src', '_compiled_base.c')]
)
@@ -19,6 +18,6 @@ def configuration(parent_package='',top_path=None):
return config
-if __name__=='__main__':
+if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index a6d391728..70fa3ab03 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -1,16 +1,21 @@
from __future__ import division, absolute_import, print_function
-__all__ = ['column_stack', 'row_stack', 'dstack', 'array_split', 'split', 'hsplit',
- 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
- 'apply_along_axis', 'kron', 'tile', 'get_array_wrap']
-
import warnings
import numpy.core.numeric as _nx
-from numpy.core.numeric import asarray, zeros, newaxis, outer, \
- concatenate, isscalar, array, asanyarray
+from numpy.core.numeric import (
+ asarray, zeros, outer, concatenate, isscalar, array, asanyarray
+ )
from numpy.core.fromnumeric import product, reshape
-from numpy.core import hstack, vstack, atleast_3d
+from numpy.core import vstack, atleast_3d
+
+
+__all__ = [
+ 'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
+ 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
+ 'apply_along_axis', 'kron', 'tile', 'get_array_wrap'
+ ]
+
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
@@ -196,7 +201,8 @@ def apply_over_axes(func, a, axes):
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
- if axis < 0: axis = N + axis
+ if axis < 0:
+ axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
@@ -368,7 +374,7 @@ def _replace_zero_by_x_arrays(sub_arys):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
-def array_split(ary,indices_or_sections,axis = 0):
+def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
@@ -392,23 +398,26 @@ def array_split(ary,indices_or_sections,axis = 0):
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
- try: # handle scalar case.
+ try:
+ # handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
- except TypeError: #indices_or_sections is a scalar, not an array.
+ except TypeError:
+ # indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
- section_sizes = [0] + \
- extras * [Neach_section+1] + \
- (Nsections-extras) * [Neach_section]
+ section_sizes = ([0] +
+ extras * [Neach_section+1] +
+ (Nsections-extras) * [Neach_section])
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
- st = div_points[i]; end = div_points[i+1]
+ st = div_points[i]
+ end = div_points[i + 1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
# This "kludge" was introduced here to replace arrays shaped (0, 10)
@@ -488,12 +497,14 @@ def split(ary,indices_or_sections,axis=0):
array([], dtype=float64)]
"""
- try: len(indices_or_sections)
+ try:
+ len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
- raise ValueError('array split does not result in an equal division')
+ raise ValueError(
+ 'array split does not result in an equal division')
res = array_split(ary, indices_or_sections, axis)
return res
@@ -845,7 +856,7 @@ def tile(A, reps):
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
for i, nrep in enumerate(tup):
- if nrep!=1:
+ if nrep != 1:
c = c.reshape(-1, n).repeat(nrep, 0)
dim_in = shape[i]
dim_out = dim_in*nrep
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 1ffaaee36..12f8bbf13 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -12,9 +12,10 @@ import numpy as np
__all__ = ['broadcast_arrays']
class DummyArray(object):
- """ Dummy object that just exists to hang __array_interface__ dictionaries
+ """Dummy object that just exists to hang __array_interface__ dictionaries
and possibly keep alive a reference to a base array.
"""
+
def __init__(self, interface, base=None):
self.__array_interface__ = interface
self.base = base
@@ -81,8 +82,8 @@ def broadcast_arrays(*args):
strides = [list(x.strides) for x in args]
nds = [len(s) for s in shapes]
biggest = max(nds)
- # Go through each array and prepend dimensions of length 1 to each of the
- # shapes in order to make the number of dimensions equal.
+ # Go through each array and prepend dimensions of length 1 to each of
+ # the shapes in order to make the number of dimensions equal.
for i in range(len(args)):
diff = biggest - nds[i]
if diff > 0:
@@ -99,23 +100,24 @@ def broadcast_arrays(*args):
raise ValueError("shape mismatch: two or more arrays have "
"incompatible dimensions on axis %r." % (axis,))
elif len(unique) == 2:
- # There is exactly one non-1 length. The common shape will take this
- # value.
+ # There is exactly one non-1 length. The common shape will take
+ # this value.
unique.remove(1)
new_length = unique.pop()
common_shape.append(new_length)
- # For each array, if this axis is being broadcasted from a length of
- # 1, then set its stride to 0 so that it repeats its data.
+ # For each array, if this axis is being broadcasted from a
+ # length of 1, then set its stride to 0 so that it repeats its
+ # data.
for i in range(len(args)):
if shapes[i][axis] == 1:
shapes[i][axis] = new_length
strides[i][axis] = 0
else:
- # Every array has a length of 1 on this axis. Strides can be left
- # alone as nothing is broadcasted.
+ # Every array has a length of 1 on this axis. Strides can be
+ # left alone as nothing is broadcasted.
common_shape.append(1)
# Construct the new arrays.
broadcasted = [as_strided(x, shape=sh, strides=st) for (x, sh, st) in
- zip(args, shapes, strides)]
+ zip(args, shapes, strides)]
return broadcasted
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index f61d3086b..090f71f67 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -2,11 +2,14 @@ from __future__ import division, absolute_import, print_function
import os
import sys
-import numpy.lib._datasource as datasource
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
+
from numpy.compat import asbytes
-from numpy.testing import *
+from numpy.testing import (
+ run_module_suite, TestCase, assert_
+ )
+import numpy.lib._datasource as datasource
if sys.version_info[0] >= 3:
import urllib.request as urllib_request
@@ -63,7 +66,7 @@ def valid_textfile(filedir):
def invalid_textfile(filedir):
# Generate and return an invalid filename.
- fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)
+ fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)
os.close(fd)
os.remove(path)
return path
@@ -213,7 +216,7 @@ class TestDataSourceAbspath(TestCase):
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
- self.assertEqual(tmpfile, self.ds.abspath(os.path.split(tmpfile)[-1]))
+ self.assertEqual(tmpfile, self.ds.abspath(tmpfilename))
# Test filename with complete path
self.assertEqual(tmpfile, self.ds.abspath(tmpfile))
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index d564121d1..4db19382a 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -5,12 +5,14 @@ import time
from datetime import date
import numpy as np
+from numpy.compat import asbytes, asbytes_nested
+from numpy.testing import (
+ run_module_suite, TestCase, assert_, assert_equal
+ )
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
has_nested_fields, easy_dtype, flatten_dtype
)
-from numpy.testing import *
-from numpy.compat import asbytes, asbytes_nested
class TestLineSplitter(TestCase):
@@ -285,7 +287,7 @@ class TestMiscFunctions(TestCase):
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', int), ('f1', float), ('f2', float)]))
- # As list of types w names
+ # As list of types w names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', int), ('b', float), ('c', float)]))
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index e07f856bb..f8ba8643a 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -18,7 +18,7 @@ class TestStatistic(TestCase):
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5,
- 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
@@ -42,7 +42,7 @@ class TestStatistic(TestCase):
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
@@ -66,16 +66,16 @@ class TestStatistic(TestCase):
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100,
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
- 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
- 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
- 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
- 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
- 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
- 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
- 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
@@ -86,11 +86,11 @@ class TestStatistic(TestCase):
a = np.arange(100)
a = pad(a, (25, 20), 'minimum')
b = np.array(
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0,
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
@@ -101,8 +101,8 @@ class TestStatistic(TestCase):
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
@@ -110,11 +110,11 @@ class TestStatistic(TestCase):
a = np.arange(100) + 2
a = pad(a, (25, 20), 'minimum')
b = np.array(
- [2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2,
+ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2,
- 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
@@ -125,8 +125,8 @@ class TestStatistic(TestCase):
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
assert_array_equal(a, b)
@@ -138,16 +138,16 @@ class TestStatistic(TestCase):
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
- 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
- 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
- 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
- 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
- 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
- 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
- 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
- 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
- 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
- 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
+ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
+ 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
+ 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
+ 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
+ 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
+ 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
+ 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
+ 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
+ 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
@@ -158,13 +158,13 @@ class TestStatistic(TestCase):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a, 1, 'median')
b = np.array(
- [[4, 4, 5, 4, 4],
+ [[4, 4, 5, 4, 4],
- [3, 3, 1, 4, 3],
- [5, 4, 5, 9, 5],
- [8, 9, 8, 2, 8],
+ [3, 3, 1, 4, 3],
+ [5, 4, 5, 9, 5],
+ [8, 9, 8, 2, 8],
- [4, 4, 5, 4, 4]]
+ [4, 4, 5, 4, 4]]
)
assert_array_equal(a, b)
@@ -172,13 +172,13 @@ class TestStatistic(TestCase):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a.T, 1, 'median').T
b = np.array(
- [[5, 4, 5, 4, 5],
+ [[5, 4, 5, 4, 5],
- [3, 3, 1, 4, 3],
- [5, 4, 5, 9, 5],
- [8, 9, 8, 2, 8],
+ [3, 3, 1, 4, 3],
+ [5, 4, 5, 9, 5],
+ [8, 9, 8, 2, 8],
- [5, 4, 5, 4, 5]]
+ [5, 4, 5, 4, 5]]
)
assert_array_equal(a, b)
@@ -186,21 +186,21 @@ class TestStatistic(TestCase):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'mean', stat_length=2)
b = np.array(
- [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
-
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
-
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
- [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
+ [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
)
assert_array_equal(a, b)
@@ -212,16 +212,16 @@ class TestStatistic(TestCase):
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
- 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
- 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
- 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
- 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
- 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
- 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
- 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
- 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
- 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
- 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
+ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
+ 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
+ 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
+ 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
+ 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
+ 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
+ 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
+ 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
+ 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
@@ -238,7 +238,7 @@ class TestConstant(TestCase):
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
@@ -276,7 +276,7 @@ class TestLinearRamp(TestCase):
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
- 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
+ 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
)
assert_array_almost_equal(a, b, decimal=5)
@@ -287,10 +287,10 @@ class TestReflect(TestCase):
a = pad(a, (25, 20), 'reflect')
b = np.array(
[25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
- 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
- 5, 4, 3, 2, 1,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
+ 5, 4, 3, 2, 1,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
@@ -310,22 +310,22 @@ class TestReflect(TestCase):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
- [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
-
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
-
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
+ [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
@@ -333,21 +333,21 @@ class TestReflect(TestCase):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
- [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
-
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
-
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
- [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
+ [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
@@ -376,7 +376,7 @@ class TestWrap(TestCase):
85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
@@ -387,7 +387,7 @@ class TestWrap(TestCase):
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
)
assert_array_equal(a, b)
@@ -397,58 +397,58 @@ class TestWrap(TestCase):
a = np.reshape(a, (3, 4))
a = pad(a, (10, 12), 'wrap')
b = np.array(
- [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
- 11, 8, 9, 10, 11, 8, 9, 10, 11],
- [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
- 3, 0, 1, 2, 3, 0, 1, 2, 3],
- [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
- 7, 4, 5, 6, 7, 4, 5, 6, 7],
- [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
- 11, 8, 9, 10, 11, 8, 9, 10, 11],
- [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
- 3, 0, 1, 2, 3, 0, 1, 2, 3],
- [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
- 7, 4, 5, 6, 7, 4, 5, 6, 7],
- [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
- 11, 8, 9, 10, 11, 8, 9, 10, 11],
- [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
- 3, 0, 1, 2, 3, 0, 1, 2, 3],
- [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
- 7, 4, 5, 6, 7, 4, 5, 6, 7],
- [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
- 11, 8, 9, 10, 11, 8, 9, 10, 11],
-
- [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
- 3, 0, 1, 2, 3, 0, 1, 2, 3],
- [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
- 7, 4, 5, 6, 7, 4, 5, 6, 7],
- [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
- 11, 8, 9, 10, 11, 8, 9, 10, 11],
-
- [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
- 3, 0, 1, 2, 3, 0, 1, 2, 3],
- [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
- 7, 4, 5, 6, 7, 4, 5, 6, 7],
- [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
- 11, 8, 9, 10, 11, 8, 9, 10, 11],
- [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
- 3, 0, 1, 2, 3, 0, 1, 2, 3],
- [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
- 7, 4, 5, 6, 7, 4, 5, 6, 7],
- [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
- 11, 8, 9, 10, 11, 8, 9, 10, 11],
- [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
- 3, 0, 1, 2, 3, 0, 1, 2, 3],
- [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
- 7, 4, 5, 6, 7, 4, 5, 6, 7],
- [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
- 11, 8, 9, 10, 11, 8, 9, 10, 11],
- [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
- 3, 0, 1, 2, 3, 0, 1, 2, 3],
- [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
- 7, 4, 5, 6, 7, 4, 5, 6, 7],
- [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
- 11, 8, 9, 10, 11, 8, 9, 10, 11]]
+ [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11]]
)
assert_array_equal(a, b)
@@ -469,19 +469,19 @@ class TestStatLen(TestCase):
a = np.reshape(a, (6, 5))
a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
b = np.array(
- [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
- [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
-
- [1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
- [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
- [11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
- [16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
- [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
- [26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
-
- [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
- [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
- [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
+ [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+ [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+
+ [1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
+ [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+ [11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
+ [16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
+
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
)
assert_array_equal(a, b)
@@ -492,17 +492,17 @@ class TestEdge(TestCase):
a = np.reshape(a, (4, 3))
a = pad(a, ((2, 3), (3, 2)), 'edge')
b = np.array(
- [[0, 0, 0, 0, 1, 2, 2, 2],
- [0, 0, 0, 0, 1, 2, 2, 2],
+ [[0, 0, 0, 0, 1, 2, 2, 2],
+ [0, 0, 0, 0, 1, 2, 2, 2],
- [0, 0, 0, 0, 1, 2, 2, 2],
- [3, 3, 3, 3, 4, 5, 5, 5],
- [6, 6, 6, 6, 7, 8, 8, 8],
- [9, 9, 9, 9, 10, 11, 11, 11],
+ [0, 0, 0, 0, 1, 2, 2, 2],
+ [3, 3, 3, 3, 4, 5, 5, 5],
+ [6, 6, 6, 6, 7, 8, 8, 8],
+ [9, 9, 9, 9, 10, 11, 11, 11],
- [9, 9, 9, 9, 10, 11, 11, 11],
- [9, 9, 9, 9, 10, 11, 11, 11],
- [9, 9, 9, 9, 10, 11, 11, 11]]
+ [9, 9, 9, 9, 10, 11, 11, 11],
+ [9, 9, 9, 9, 10, 11, 11, 11],
+ [9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index 271943abc..e83f8552e 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -3,11 +3,13 @@
"""
from __future__ import division, absolute_import, print_function
-from numpy.testing import *
import numpy as np
-from numpy.lib.arraysetops import *
-
-import warnings
+from numpy.testing import (
+ run_module_suite, TestCase, assert_array_equal
+ )
+from numpy.lib.arraysetops import (
+ ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d
+ )
class TestSetOps(TestCase):
@@ -94,8 +96,8 @@ class TestSetOps(TestCase):
check_all(aa, bb, i1, i2, c, dt)
# test for ticket #2799
- aa = [1.+0.j, 1- 1.j, 1]
- assert_array_equal(np.unique(aa), [ 1.-1.j, 1.+0.j])
+ aa = [1. + 0.j, 1 - 1.j, 1]
+ assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j])
# test for ticket #4785
a = [(1, 2), (1, 2), (2, 3)]
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index 41a060a3f..a4b9cfe2e 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -1,7 +1,9 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import *
import numpy as np
+from numpy.testing import (
+ run_module_suite, TestCase, assert_, assert_almost_equal
+ )
class TestFinancial(TestCase):
@@ -139,7 +141,7 @@ class TestFinancial(TestCase):
def test_broadcast(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]),
- [21.5449442, 20.76156441], 4)
+ [21.5449442, 20.76156441], 4)
assert_almost_equal(np.ipmt(0.1/12, list(range(5)), 24, 2000),
[-17.29165168, -16.66666667, -16.03647345,
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 1034b5125..eea6f1e5c 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -284,9 +284,12 @@ import warnings
from io import BytesIO
import numpy as np
-from numpy.testing import *
-from numpy.lib import format
from numpy.compat import asbytes, asbytes_nested
+from numpy.testing import (
+ run_module_suite, assert_, assert_array_equal, assert_raises, raises,
+ dec
+ )
+from numpy.lib import format
tempdir = None
@@ -445,7 +448,7 @@ def roundtrip_truncated(arr):
return arr2
-def assert_equal(o1, o2):
+def assert_equal_(o1, o2):
assert_(o1 == o2)
@@ -477,7 +480,7 @@ def test_long_str():
@dec.slow
def test_memmap_roundtrip():
- # XXX: test crashes nose on windows. Fix this
+ # Fixme: test crashes nose on windows.
if not (sys.platform == 'win32' or sys.platform == 'cygwin'):
for arr in basic_arrays + record_arrays:
if arr.dtype.hasobject:
@@ -506,11 +509,10 @@ def test_memmap_roundtrip():
fp = open(mfn, 'rb')
memmap_bytes = fp.read()
fp.close()
- yield assert_equal, normal_bytes, memmap_bytes
+ yield assert_equal_, normal_bytes, memmap_bytes
# Check that reading the file using memmap works.
ma = format.open_memmap(nfn, mode='r')
- #yield assert_array_equal, ma, arr
del ma
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index 6b01464a7..97047c53a 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -1,10 +1,14 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import *
import numpy as np
-from numpy import (array, ones, r_, mgrid, unravel_index, zeros, where,
- ndenumerate, fill_diagonal, diag_indices,
- diag_indices_from, s_, index_exp, ndindex)
+from numpy.testing import (
+ run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
+ assert_almost_equal, assert_array_almost_equal, assert_raises
+ )
+from numpy.lib.index_tricks import (
+ mgrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
+ index_exp, ndindex, r_, s_
+ )
class TestRavelUnravelIndex(TestCase):
@@ -106,18 +110,20 @@ class TestGrid(TestCase):
d = mgrid[-1:1:0.1, -2:2:0.2]
assert_(c.shape == (2, 10, 10))
assert_(d.shape == (2, 20, 20))
- assert_array_equal(c[0][0, :], -ones(10, 'd'))
- assert_array_equal(c[1][:, 0], -2*ones(10, 'd'))
- assert_array_almost_equal(c[0][-1, :], ones(10, 'd'), 11)
- assert_array_almost_equal(c[1][:, -1], 2*ones(10, 'd'), 11)
- assert_array_almost_equal(d[0, 1, :]-d[0, 0, :], 0.1*ones(20, 'd'), 11)
- assert_array_almost_equal(d[1, :, 1]-d[1, :, 0], 0.2*ones(20, 'd'), 11)
+ assert_array_equal(c[0][0, :], -np.ones(10, 'd'))
+ assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd'))
+ assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11)
+ assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11)
+ assert_array_almost_equal(d[0, 1, :] - d[0, 0, :],
+ 0.1*np.ones(20, 'd'), 11)
+ assert_array_almost_equal(d[1, :, 1] - d[1, :, 0],
+ 0.2*np.ones(20, 'd'), 11)
class TestConcatenator(TestCase):
def test_1d(self):
- assert_array_equal(r_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6]))
- b = ones(5)
+ assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
+ b = np.ones(5)
c = r_[b, 0, 0, b]
assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
@@ -126,12 +132,12 @@ class TestConcatenator(TestCase):
assert_(g.dtype == 'f8')
def test_more_mixed_type(self):
- g = r_[-10.1, array([1]), array([2, 3, 4]), 10.0]
+ g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0]
assert_(g.dtype == 'f8')
def test_2d(self):
- b = rand(5, 5)
- c = rand(5, 5)
+ b = np.random.rand(5, 5)
+ c = np.random.rand(5, 5)
d = r_['1', b, c] # append columns
assert_(d.shape == (5, 10))
assert_array_equal(d[:, :5], b)
@@ -144,7 +150,7 @@ class TestConcatenator(TestCase):
class TestNdenumerate(TestCase):
def test_basic(self):
- a = array([[1, 2], [3, 4]])
+ a = np.array([[1, 2], [3, 4]])
assert_equal(list(ndenumerate(a)),
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
@@ -169,18 +175,18 @@ def test_c_():
def test_fill_diagonal():
- a = zeros((3, 3), int)
+ a = np.zeros((3, 3), int)
fill_diagonal(a, 5)
yield (assert_array_equal, a,
- array([[5, 0, 0],
+ np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]]))
#Test tall matrix
- a = zeros((10, 3), int)
+ a = np.zeros((10, 3), int)
fill_diagonal(a, 5)
yield (assert_array_equal, a,
- array([[5, 0, 0],
+ np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5],
[0, 0, 0],
@@ -192,10 +198,10 @@ def test_fill_diagonal():
[0, 0, 0]]))
#Test tall matrix wrap
- a = zeros((10, 3), int)
+ a = np.zeros((10, 3), int)
fill_diagonal(a, 5, True)
yield (assert_array_equal, a,
- array([[5, 0, 0],
+ np.array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5],
[0, 0, 0],
@@ -207,41 +213,41 @@ def test_fill_diagonal():
[0, 5, 0]]))
#Test wide matrix
- a = zeros((3, 10), int)
+ a = np.zeros((3, 10), int)
fill_diagonal(a, 5)
yield (assert_array_equal, a,
- array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]))
# The same function can operate on a 4-d array:
- a = zeros((3, 3, 3, 3), int)
+ a = np.zeros((3, 3, 3, 3), int)
fill_diagonal(a, 4)
- i = array([0, 1, 2])
- yield (assert_equal, where(a != 0), (i, i, i, i))
+ i = np.array([0, 1, 2])
+ yield (assert_equal, np.where(a != 0), (i, i, i, i))
def test_diag_indices():
di = diag_indices(4)
- a = array([[1, 2, 3, 4],
- [5, 6, 7, 8],
- [9, 10, 11, 12],
+ a = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
[13, 14, 15, 16]])
a[di] = 100
yield (assert_array_equal, a,
- array([[100, 2, 3, 4],
- [5, 100, 7, 8],
- [9, 10, 100, 12],
- [13, 14, 15, 100]]))
+ np.array([[100, 2, 3, 4],
+ [5, 100, 7, 8],
+ [9, 10, 100, 12],
+ [13, 14, 15, 100]]))
# Now, we create indices to manipulate a 3-d array:
d3 = diag_indices(2, 3)
# And use it to set the diagonal of a zeros array to 1:
- a = zeros((2, 2, 2), int)
+ a = np.zeros((2, 2, 2), int)
a[d3] = 1
yield (assert_array_equal, a,
- array([[[1, 0],
+ np.array([[[1, 0],
[0, 0]],
[[0, 0],
@@ -256,26 +262,26 @@ def test_diag_indices_from():
def test_ndindex():
- x = list(np.ndindex(1, 2, 3))
- expected = [ix for ix, e in np.ndenumerate(np.zeros((1, 2, 3)))]
+ x = list(ndindex(1, 2, 3))
+ expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))]
assert_array_equal(x, expected)
- x = list(np.ndindex((1, 2, 3)))
+ x = list(ndindex((1, 2, 3)))
assert_array_equal(x, expected)
# Test use of scalars and tuples
- x = list(np.ndindex((3,)))
- assert_array_equal(x, list(np.ndindex(3)))
+ x = list(ndindex((3,)))
+ assert_array_equal(x, list(ndindex(3)))
# Make sure size argument is optional
- x = list(np.ndindex())
+ x = list(ndindex())
assert_equal(x, [()])
- x = list(np.ndindex(()))
+ x = list(ndindex(()))
assert_equal(x, [()])
# Make sure 0-sized ndindex works correctly
- x = list(np.ndindex(*[0]))
+ x = list(ndindex(*[0]))
assert_equal(x, [])
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index c5af61434..3da6b5149 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -10,17 +10,17 @@ from numpy.testing import (
# Test data
-_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
- [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
- [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
- [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
+_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
+ [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
+ [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
+ [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
# Rows of _ndat with nans removed
-_rdat = [np.array([ 0.6244, 0.2692, 0.0116, 0.1170]),
- np.array([ 0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
- np.array([ 0.1042, -0.5954]),
- np.array([ 0.1610, 0.1859, 0.3146])]
+_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
+ np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
+ np.array([0.1042, -0.5954]),
+ np.array([0.1610, 0.1859, 0.3146])]
class TestNanFunctions_MinMax(TestCase):
@@ -205,7 +205,7 @@ class TestNanFunctions_IntTypes(TestCase):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
- mat = np.array([127, 39, 93, 87, 46])
+ mat = np.array([127, 39, 93, 87, 46])
def integer_arrays(self):
for dtype in self.int_types:
@@ -383,13 +383,13 @@ class TestNanFunctions_MeanVarStd(TestCase):
def test_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object]:
- assert_raises( TypeError, f, _ndat, axis=1, dtype=np.int)
+ assert_raises(TypeError, f, _ndat, axis=1, dtype=np.int)
def test_out_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object]:
out = np.empty(_ndat.shape[0], dtype=dtype)
- assert_raises( TypeError, f, _ndat, axis=1, out=out)
+ assert_raises(TypeError, f, _ndat, axis=1, out=out)
def test_keepdims(self):
mat = np.eye(3)
@@ -587,7 +587,7 @@ class TestNanFunctions_Median(TestCase):
# Randomly set some elements to NaN:
w = np.random.randint(0, d.size, size=d.size // 5)
d.ravel()[w] = np.nan
- d[:,0] = 1. # ensure at least one good value
+ d[:,0] = 1. # ensure at least one good value
# use normal median without nans to compare
tgt = []
for x in d:
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 4adc6aed8..02faa0283 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -78,8 +78,11 @@ poly1d([ 2.])
(poly1d([ 1., -1.]), poly1d([ 0.]))
'''
-from numpy.testing import *
import numpy as np
+from numpy.testing import (
+ run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
+ assert_almost_equal, rundocs
+ )
class TestDocs(TestCase):
@@ -118,10 +121,10 @@ class TestDocs(TestCase):
assert_almost_equal(val0, cov, decimal=4)
m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True)
- assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
- val = [[8.7929, -10.0103, 0.9756],
- [-10.0103, 13.6134, -1.8178],
- [0.9756, -1.8178, 0.6674]]
+ assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
+ val = [[8.7929, -10.0103, 0.9756],
+ [-10.0103, 13.6134, -1.8178],
+ [0.9756, -1.8178, 0.6674]]
assert_almost_equal(val, cov2, decimal=4)
# check 2D (n,1) case
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index d0eda50ce..51a2077eb 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -1,14 +1,15 @@
from __future__ import division, absolute_import, print_function
-import sys
-
import numpy as np
import numpy.ma as ma
-from numpy.ma.testutils import *
-
from numpy.ma.mrecords import MaskedRecords
-
-from numpy.lib.recfunctions import *
+from numpy.ma.testutils import (
+ run_module_suite, TestCase, assert_, assert_equal
+ )
+from numpy.lib.recfunctions import (
+ drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
+ find_duplicates, merge_arrays, append_fields, stack_arrays, join_by
+ )
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
@@ -293,11 +294,12 @@ class TestMergeArrays(TestCase):
assert_equal(test, control)
test = merge_arrays((x, w), flatten=False)
- controldtype = dtype = [('f0', int),
+ controldtype = [('f0', int),
('f1', [('a', int),
('b', [('ba', float), ('bb', int)])])]
control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
dtype=controldtype)
+ assert_equal(test, control)
def test_wmasked_arrays(self):
# Test merge_arrays masked arrays
@@ -324,9 +326,17 @@ class TestMergeArrays(TestCase):
def test_w_shorter_flex(self):
# Test merge_arrays w/ a shorter flexndarray.
z = self.data[-1]
- test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
- control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
- dtype=[('A', '|S3'), ('B', float), ('C', int)])
+
+ # Fixme, this test looks incomplete and broken
+ #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
+ #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
+ # dtype=[('A', '|S3'), ('B', float), ('C', int)])
+ #assert_equal(test, control)
+
+ # Hack to avoid pyflakes warnings about unused variables
+ merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
+ np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
+ dtype=[('A', '|S3'), ('B', float), ('C', int)])
def test_singlerecord(self):
(_, x, y, z) = self.data
@@ -562,12 +572,22 @@ class TestJoinBy(TestCase):
def test_join(self):
a, b = self.a, self.b
- test = join_by(('a', 'b'), a, b)
- control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
- (7, 57, 107, 102), (8, 58, 108, 103),
- (9, 59, 109, 104)],
- dtype=[('a', int), ('b', int),
- ('c', int), ('d', int)])
+ # Fixme, this test is broken
+ #test = join_by(('a', 'b'), a, b)
+ #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
+ # (7, 57, 107, 102), (8, 58, 108, 103),
+ # (9, 59, 109, 104)],
+ # dtype=[('a', int), ('b', int),
+ # ('c', int), ('d', int)])
+ #assert_equal(test, control)
+
+ # Hack to avoid pyflakes unused variable warnings
+ join_by(('a', 'b'), a, b)
+ np.array([(5, 55, 105, 100), (6, 56, 106, 101),
+ (7, 57, 107, 102), (8, 58, 108, 103),
+ (9, 59, 109, 104)],
+ dtype=[('a', int), ('b', int),
+ ('c', int), ('d', int)])
def test_outer_join(self):
a, b = self.a, self.b
@@ -612,6 +632,7 @@ class TestJoinBy(TestCase):
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1)],
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
+ assert_equal(test, control)
class TestJoinBy2(TestCase):
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index eb991eeb0..00fa3f195 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -4,7 +4,10 @@ import os
import sys
import numpy as np
-from numpy.testing import *
+from numpy.testing import (
+ run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
+ assert_array_almost_equal, assert_raises
+ )
from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import unicode
@@ -13,12 +16,12 @@ rlevel = 1
class TestRegression(TestCase):
def test_poly1d(self, level=rlevel):
- """Ticket #28"""
+ # Ticket #28
assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
np.poly1d([-1, 1]))
def test_cov_parameters(self, level=rlevel):
- """Ticket #91"""
+ # Ticket #91
x = np.random.random((3, 3))
y = x.copy()
np.cov(x, rowvar=1)
@@ -26,68 +29,68 @@ class TestRegression(TestCase):
assert_array_equal(x, y)
def test_mem_digitize(self, level=rlevel):
- """Ticket #95"""
+ # Ticket #95
for i in range(100):
np.digitize([1, 2, 3, 4], [1, 3])
np.digitize([0, 1, 2, 3, 4], [1, 3])
def test_unique_zero_sized(self, level=rlevel):
- """Ticket #205"""
+ # Ticket #205
assert_array_equal([], np.unique(np.array([])))
def test_mem_vectorise(self, level=rlevel):
- """Ticket #325"""
+ # Ticket #325
vt = np.vectorize(lambda *args: args)
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)))
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1,
1, 2)), np.zeros((2, 2)))
def test_mgrid_single_element(self, level=rlevel):
- """Ticket #339"""
+ # Ticket #339
assert_array_equal(np.mgrid[0:0:1j], [0])
assert_array_equal(np.mgrid[0:0], [])
def test_refcount_vectorize(self, level=rlevel):
- """Ticket #378"""
+ # Ticket #378
def p(x, y):
return 123
v = np.vectorize(p)
_assert_valid_refcount(v)
def test_poly1d_nan_roots(self, level=rlevel):
- """Ticket #396"""
+ # Ticket #396
p = np.poly1d([np.nan, np.nan, 1], r=0)
self.assertRaises(np.linalg.LinAlgError, getattr, p, "r")
def test_mem_polymul(self, level=rlevel):
- """Ticket #448"""
+ # Ticket #448
np.polymul([], [1.])
def test_mem_string_concat(self, level=rlevel):
- """Ticket #469"""
+ # Ticket #469
x = np.array([])
np.append(x, 'asdasd\tasdasd')
def test_poly_div(self, level=rlevel):
- """Ticket #553"""
+ # Ticket #553
u = np.poly1d([1, 2, 3])
v = np.poly1d([1, 2, 3, 4, 5])
q, r = np.polydiv(u, v)
assert_equal(q*v + r, u)
def test_poly_eq(self, level=rlevel):
- """Ticket #554"""
+ # Ticket #554
x = np.poly1d([1, 2, 3])
y = np.poly1d([3, 4])
assert_(x != y)
assert_(x == x)
def test_mem_insert(self, level=rlevel):
- """Ticket #572"""
+ # Ticket #572
np.lib.place(1, 1, 1)
def test_polyfit_build(self):
- """Ticket #628"""
+ # Ticket #628
ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
9.95368241e+00, -3.14526520e+02]
x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
@@ -108,7 +111,7 @@ class TestRegression(TestCase):
assert_array_almost_equal(ref, tested)
def test_polydiv_type(self):
- """Make polydiv work for complex types"""
+ # Make polydiv work for complex types
msg = "Wrong type, should be complex"
x = np.ones(3, dtype=np.complex)
q, r = np.polydiv(x, x)
@@ -119,11 +122,11 @@ class TestRegression(TestCase):
assert_(q.dtype == np.float, msg)
def test_histogramdd_too_many_bins(self):
- """Ticket 928."""
+ # Ticket 928.
assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10)
def test_polyint_type(self):
- """Ticket #944"""
+ # Ticket #944
msg = "Wrong type, should be complex"
x = np.ones(3, dtype=np.complex)
assert_(np.polyint(x).dtype == np.complex, msg)
@@ -132,12 +135,12 @@ class TestRegression(TestCase):
assert_(np.polyint(x).dtype == np.float, msg)
def test_ndenumerate_crash(self):
- """Ticket 1140"""
+ # Ticket 1140
# Shouldn't crash:
list(np.ndenumerate(np.array([[]])))
def test_asfarray_none(self, level=rlevel):
- """Test for changeset r5065"""
+ # Test for changeset r5065
assert_array_equal(np.array([np.nan]), np.asfarray([None]))
def test_large_fancy_indexing(self, level=rlevel):
@@ -155,7 +158,8 @@ class TestRegression(TestCase):
n = 3
a = np.ones((n,)*5)
i = np.random.randint(0, n, size=thesize)
- g = a[np.ix_(i, i, i, i, i)]
+ a[np.ix_(i, i, i, i, i)]
+
self.assertRaises(ValueError, dp)
self.assertRaises(ValueError, dp2)
@@ -165,7 +169,7 @@ class TestRegression(TestCase):
assert_(np.r_[x, x].dtype == dt)
def test_who_with_0dim_array(self, level=rlevel):
- """ticket #1243"""
+ # ticket #1243
import os
import sys
@@ -173,7 +177,7 @@ class TestRegression(TestCase):
sys.stdout = open(os.devnull, 'w')
try:
try:
- tmp = np.who({'foo': np.array(1)})
+ np.who({'foo': np.array(1)})
except:
raise AssertionError("ticket #1243")
finally:
@@ -183,29 +187,29 @@ class TestRegression(TestCase):
def test_include_dirs(self):
# As a sanity check, just test that get_include
# includes something reasonable. Somewhat
- # related to ticket #1405."""
+ # related to ticket #1405.
include_dirs = [np.get_include()]
for path in include_dirs:
assert_(isinstance(path, (str, unicode)))
assert_(path != '')
def test_polyder_return_type(self):
- """Ticket #1249"""
+ # Ticket #1249
assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d))
assert_(isinstance(np.polyder([1], 0), np.ndarray))
assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d))
assert_(isinstance(np.polyder([1], 1), np.ndarray))
def test_append_fields_dtype_list(self):
- """Ticket #1676"""
+ # Ticket #1676
from numpy.lib.recfunctions import append_fields
- F = False
+
base = np.array([1, 2, 3], dtype=np.int32)
- data = np.eye(3).astype(np.int32)
names = ['a', 'b', 'c']
+ data = np.eye(3).astype(np.int32)
dlist = [np.float64, np.int32, np.int32]
try:
- a = append_fields(base, names, data, dlist)
+ append_fields(base, names, data, dlist)
except:
raise AssertionError()
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index d6d01ba99..23f3edfbe 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -1,214 +1,216 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import *
-from numpy.lib import *
-from numpy.core import *
-from numpy import matrix, asmatrix
+import numpy as np
+from numpy.lib.shape_base import (
+ apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
+ vsplit, dstack, kron, tile
+ )
+from numpy.testing import (
+ run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
+ assert_raises, assert_warns
+ )
class TestApplyAlongAxis(TestCase):
def test_simple(self):
- a = ones((20, 10), 'd')
+ a = np.ones((20, 10), 'd')
assert_array_equal(
- apply_along_axis(len, 0, a), len(a)*ones(shape(a)[1]))
+ apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
def test_simple101(self, level=11):
- a = ones((10, 101), 'd')
+ a = np.ones((10, 101), 'd')
assert_array_equal(
- apply_along_axis(len, 0, a), len(a)*ones(shape(a)[1]))
+ apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
def test_3d(self):
- a = arange(27).reshape((3, 3, 3))
- assert_array_equal(apply_along_axis(sum, 0, a),
+ a = np.arange(27).reshape((3, 3, 3))
+ assert_array_equal(apply_along_axis(np.sum, 0, a),
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
class TestApplyOverAxes(TestCase):
def test_simple(self):
- a = arange(24).reshape(2, 3, 4)
- aoa_a = apply_over_axes(sum, a, [0, 2])
- assert_array_equal(aoa_a, array([[[60], [92], [124]]]))
+ a = np.arange(24).reshape(2, 3, 4)
+ aoa_a = apply_over_axes(np.sum, a, [0, 2])
+ assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
class TestArraySplit(TestCase):
def test_integer_0_split(self):
- a = arange(10)
- try:
- res = array_split(a, 0)
- assert_(0) # it should have thrown a value error
- except ValueError:
- pass
+ a = np.arange(10)
+ assert_raises(ValueError, array_split, a, 0)
def test_integer_split(self):
- a = arange(10)
+ a = np.arange(10)
res = array_split(a, 1)
- desired = [arange(10)]
+ desired = [np.arange(10)]
compare_results(res, desired)
res = array_split(a, 2)
- desired = [arange(5), arange(5, 10)]
+ desired = [np.arange(5), np.arange(5, 10)]
compare_results(res, desired)
res = array_split(a, 3)
- desired = [arange(4), arange(4, 7), arange(7, 10)]
+ desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)]
compare_results(res, desired)
res = array_split(a, 4)
- desired = [arange(3), arange(3, 6), arange(6, 8), arange(8, 10)]
+ desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8),
+ np.arange(8, 10)]
compare_results(res, desired)
res = array_split(a, 5)
- desired = [arange(
- 2), arange(2, 4), arange(4, 6), arange(6, 8), arange(8, 10)]
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
+ np.arange(6, 8), np.arange(8, 10)]
compare_results(res, desired)
res = array_split(a, 6)
- desired = [arange(
- 2), arange(2, 4), arange(4, 6), arange(6, 8), arange(8, 9),
- arange(9, 10)]
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
+ np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)]
compare_results(res, desired)
res = array_split(a, 7)
- desired = [arange(
- 2), arange(2, 4), arange(4, 6), arange(6, 7), arange(7, 8),
- arange(8, 9), arange(9, 10)]
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
+ np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
+ np.arange(9, 10)]
compare_results(res, desired)
res = array_split(a, 8)
- desired = [arange(
- 2), arange(2, 4), arange(4, 5), arange(5, 6), arange(6, 7),
- arange(7, 8), arange(8, 9), arange(9, 10)]
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5),
+ np.arange(5, 6), np.arange(6, 7), np.arange(7, 8),
+ np.arange(8, 9), np.arange(9, 10)]
compare_results(res, desired)
res = array_split(a, 9)
- desired = [arange(
- 2), arange(2, 3), arange(3, 4), arange(4, 5), arange(5, 6),
- arange(6, 7), arange(7, 8), arange(8, 9), arange(9, 10)]
+ desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4),
+ np.arange(4, 5), np.arange(5, 6), np.arange(6, 7),
+ np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)]
compare_results(res, desired)
res = array_split(a, 10)
- desired = [arange(1), arange(1, 2), arange(2, 3), arange(3, 4),
- arange(4, 5), arange(5, 6), arange(6, 7), arange(7, 8),
- arange(8, 9), arange(9, 10)]
+ desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
+ np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
+ np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
+ np.arange(9, 10)]
compare_results(res, desired)
res = array_split(a, 11)
- desired = [arange(1), arange(1, 2), arange(2, 3), arange(3, 4),
- arange(4, 5), arange(5, 6), arange(6, 7), arange(7, 8),
- arange(8, 9), arange(9, 10), array([])]
+ desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
+ np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
+ np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
+ np.arange(9, 10), np.array([])]
compare_results(res, desired)
def test_integer_split_2D_rows(self):
- a = array([arange(10), arange(10)])
+ a = np.array([np.arange(10), np.arange(10)])
res = assert_warns(FutureWarning, array_split, a, 3, axis=0)
# After removing the FutureWarning, the last should be zeros((0, 10))
- desired = [array([arange(10)]), array([arange(10)]), array([])]
+ desired = [np.array([np.arange(10)]), np.array([np.arange(10)]),
+ np.array([])]
compare_results(res, desired)
assert_(a.dtype.type is res[-1].dtype.type)
def test_integer_split_2D_cols(self):
- a = array([arange(10), arange(10)])
+ a = np.array([np.arange(10), np.arange(10)])
res = array_split(a, 3, axis=-1)
- desired = [array([arange(4), arange(4)]),
- array([arange(4, 7), arange(4, 7)]),
- array([arange(7, 10), arange(7, 10)])]
+ desired = [np.array([np.arange(4), np.arange(4)]),
+ np.array([np.arange(4, 7), np.arange(4, 7)]),
+ np.array([np.arange(7, 10), np.arange(7, 10)])]
compare_results(res, desired)
def test_integer_split_2D_default(self):
""" This will fail if we change default axis
"""
- a = array([arange(10), arange(10)])
+ a = np.array([np.arange(10), np.arange(10)])
res = assert_warns(FutureWarning, array_split, a, 3)
# After removing the FutureWarning, the last should be zeros((0, 10))
- desired = [array([arange(10)]), array([arange(10)]), array([])]
+ desired = [np.array([np.arange(10)]), np.array([np.arange(10)]),
+ np.array([])]
compare_results(res, desired)
assert_(a.dtype.type is res[-1].dtype.type)
- #perhaps should check higher dimensions
+ # perhaps should check higher dimensions
def test_index_split_simple(self):
- a = arange(10)
+ a = np.arange(10)
indices = [1, 5, 7]
res = array_split(a, indices, axis=-1)
- desired = [arange(0, 1), arange(1, 5), arange(5, 7), arange(7, 10)]
+ desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7),
+ np.arange(7, 10)]
compare_results(res, desired)
def test_index_split_low_bound(self):
- a = arange(10)
+ a = np.arange(10)
indices = [0, 5, 7]
res = array_split(a, indices, axis=-1)
- desired = [array([]), arange(0, 5), arange(5, 7), arange(7, 10)]
+ desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
+ np.arange(7, 10)]
compare_results(res, desired)
def test_index_split_high_bound(self):
- a = arange(10)
+ a = np.arange(10)
indices = [0, 5, 7, 10, 12]
res = array_split(a, indices, axis=-1)
- desired = [array([]), arange(0, 5), arange(5, 7), arange(7, 10),
- array([]), array([])]
+ desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
+ np.arange(7, 10), np.array([]), np.array([])]
compare_results(res, desired)
class TestSplit(TestCase):
- """* This function is essentially the same as array_split,
- except that it test if splitting will result in an
- equal split. Only test for this case.
- *"""
+ # The split function is essentially the same as array_split,
+ # except that it test if splitting will result in an
+ # equal split. Only test for this case.
+
def test_equal_split(self):
- a = arange(10)
+ a = np.arange(10)
res = split(a, 2)
- desired = [arange(5), arange(5, 10)]
+ desired = [np.arange(5), np.arange(5, 10)]
compare_results(res, desired)
def test_unequal_split(self):
- a = arange(10)
- try:
- res = split(a, 3)
- assert_(0) # should raise an error
- except ValueError:
- pass
+ a = np.arange(10)
+ assert_raises(ValueError, split, a, 3)
class TestDstack(TestCase):
def test_0D_array(self):
- a = array(1)
- b = array(2)
+ a = np.array(1)
+ b = np.array(2)
res = dstack([a, b])
- desired = array([[[1, 2]]])
+ desired = np.array([[[1, 2]]])
assert_array_equal(res, desired)
def test_1D_array(self):
- a = array([1])
- b = array([2])
+ a = np.array([1])
+ b = np.array([2])
res = dstack([a, b])
- desired = array([[[1, 2]]])
+ desired = np.array([[[1, 2]]])
assert_array_equal(res, desired)
def test_2D_array(self):
- a = array([[1], [2]])
- b = array([[1], [2]])
+ a = np.array([[1], [2]])
+ b = np.array([[1], [2]])
res = dstack([a, b])
- desired = array([[[1, 1]], [[2, 2, ]]])
+ desired = np.array([[[1, 1]], [[2, 2, ]]])
assert_array_equal(res, desired)
def test_2D_array2(self):
- a = array([1, 2])
- b = array([1, 2])
+ a = np.array([1, 2])
+ b = np.array([1, 2])
res = dstack([a, b])
- desired = array([[[1, 1], [2, 2]]])
+ desired = np.array([[[1, 1], [2, 2]]])
assert_array_equal(res, desired)
-""" array_split has more comprehensive test of splitting.
- only do simple test on hsplit, vsplit, and dsplit
-"""
-
+# array_split has more comprehensive test of splitting.
+# only do simple test on hsplit, vsplit, and dsplit
class TestHsplit(TestCase):
- """ only testing for integer splits.
+ """Only testing for integer splits.
+
"""
def test_0D_array(self):
- a = array(1)
+ a = np.array(1)
try:
hsplit(a, 2)
assert_(0)
@@ -216,24 +218,25 @@ class TestHsplit(TestCase):
pass
def test_1D_array(self):
- a = array([1, 2, 3, 4])
+ a = np.array([1, 2, 3, 4])
res = hsplit(a, 2)
- desired = [array([1, 2]), array([3, 4])]
+ desired = [np.array([1, 2]), np.array([3, 4])]
compare_results(res, desired)
def test_2D_array(self):
- a = array([[1, 2, 3, 4],
+ a = np.array([[1, 2, 3, 4],
[1, 2, 3, 4]])
res = hsplit(a, 2)
- desired = [array([[1, 2], [1, 2]]), array([[3, 4], [3, 4]])]
+ desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])]
compare_results(res, desired)
class TestVsplit(TestCase):
- """ only testing for integer splits.
+ """Only testing for integer splits.
+
"""
def test_1D_array(self):
- a = array([1, 2, 3, 4])
+ a = np.array([1, 2, 3, 4])
try:
vsplit(a, 2)
assert_(0)
@@ -241,18 +244,18 @@ class TestVsplit(TestCase):
pass
def test_2D_array(self):
- a = array([[1, 2, 3, 4],
+ a = np.array([[1, 2, 3, 4],
[1, 2, 3, 4]])
res = vsplit(a, 2)
- desired = [array([[1, 2, 3, 4]]), array([[1, 2, 3, 4]])]
+ desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])]
compare_results(res, desired)
class TestDsplit(TestCase):
- """ only testing for integer splits.
- """
+ # Only testing for integer splits.
+
def test_2D_array(self):
- a = array([[1, 2, 3, 4],
+ a = np.array([[1, 2, 3, 4],
[1, 2, 3, 4]])
try:
dsplit(a, 2)
@@ -261,54 +264,57 @@ class TestDsplit(TestCase):
pass
def test_3D_array(self):
- a = array([[[1, 2, 3, 4],
+ a = np.array([[[1, 2, 3, 4],
[1, 2, 3, 4]],
[[1, 2, 3, 4],
[1, 2, 3, 4]]])
res = dsplit(a, 2)
- desired = [array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]),
- array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])]
+ desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]),
+ np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])]
compare_results(res, desired)
class TestSqueeze(TestCase):
def test_basic(self):
+ from numpy.random import rand
+
a = rand(20, 10, 10, 1, 1)
b = rand(20, 1, 10, 1, 20)
c = rand(1, 1, 20, 10)
- assert_array_equal(squeeze(a), reshape(a, (20, 10, 10)))
- assert_array_equal(squeeze(b), reshape(b, (20, 10, 20)))
- assert_array_equal(squeeze(c), reshape(c, (20, 10)))
+ assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10)))
+ assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20)))
+ assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10)))
# Squeezing to 0-dim should still give an ndarray
a = [[[1.5]]]
- res = squeeze(a)
+ res = np.squeeze(a)
assert_equal(res, 1.5)
assert_equal(res.ndim, 0)
- assert_equal(type(res), ndarray)
+ assert_equal(type(res), np.ndarray)
class TestKron(TestCase):
def test_return_type(self):
- a = ones([2, 2])
- m = asmatrix(a)
- assert_equal(type(kron(a, a)), ndarray)
- assert_equal(type(kron(m, m)), matrix)
- assert_equal(type(kron(a, m)), matrix)
- assert_equal(type(kron(m, a)), matrix)
-
- class myarray(ndarray):
+ a = np.ones([2, 2])
+ m = np.asmatrix(a)
+ assert_equal(type(kron(a, a)), np.ndarray)
+ assert_equal(type(kron(m, m)), np.matrix)
+ assert_equal(type(kron(a, m)), np.matrix)
+ assert_equal(type(kron(m, a)), np.matrix)
+
+ class myarray(np.ndarray):
__array_priority__ = 0.0
+
ma = myarray(a.shape, a.dtype, a.data)
- assert_equal(type(kron(a, a)), ndarray)
+ assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(ma, ma)), myarray)
- assert_equal(type(kron(a, ma)), ndarray)
+ assert_equal(type(kron(a, ma)), np.ndarray)
assert_equal(type(kron(ma, a)), myarray)
class TestTile(TestCase):
def test_basic(self):
- a = array([0, 1, 2])
+ a = np.array([0, 1, 2])
b = [[1, 2], [3, 4]]
assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2])
assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]])
@@ -319,18 +325,19 @@ class TestTile(TestCase):
[1, 2, 1, 2], [3, 4, 3, 4]])
def test_empty(self):
- a = array([[[]]])
+ a = np.array([[[]]])
d = tile(a, (3, 2, 5)).shape
assert_equal(d, (3, 2, 0))
def test_kroncompare(self):
- import numpy.random as nr
+ from numpy.random import randint
+
reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)]
shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)]
for s in shape:
- b = nr.randint(0, 10, size=s)
+ b = randint(0, 10, size=s)
for r in reps:
- a = ones(r, b.dtype)
+ a = np.ones(r, b.dtype)
large = tile(b, r)
klarge = kron(a, b)
assert_equal(large, klarge)
@@ -338,17 +345,17 @@ class TestTile(TestCase):
class TestMayShareMemory(TestCase):
def test_basic(self):
- d = ones((50, 60))
- d2 = ones((30, 60, 6))
- self.assertTrue(may_share_memory(d, d))
- self.assertTrue(may_share_memory(d, d[::-1]))
- self.assertTrue(may_share_memory(d, d[::2]))
- self.assertTrue(may_share_memory(d, d[1:, ::-1]))
-
- self.assertFalse(may_share_memory(d[::-1], d2))
- self.assertFalse(may_share_memory(d[::2], d2))
- self.assertFalse(may_share_memory(d[1:, ::-1], d2))
- self.assertTrue(may_share_memory(d2[1:, ::-1], d2))
+ d = np.ones((50, 60))
+ d2 = np.ones((30, 60, 6))
+ self.assertTrue(np.may_share_memory(d, d))
+ self.assertTrue(np.may_share_memory(d, d[::-1]))
+ self.assertTrue(np.may_share_memory(d, d[::2]))
+ self.assertTrue(np.may_share_memory(d, d[1:, ::-1]))
+
+ self.assertFalse(np.may_share_memory(d[::-1], d2))
+ self.assertFalse(np.may_share_memory(d[::2], d2))
+ self.assertFalse(np.may_share_memory(d[1:, ::-1], d2))
+ self.assertTrue(np.may_share_memory(d2[1:, ::-1], d2))
# Utility
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index 3adcf0b41..cd0973300 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -1,15 +1,17 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import *
-from numpy.lib.stride_tricks import broadcast_arrays
-from numpy.lib.stride_tricks import as_strided
+from numpy.testing import (
+ run_module_suite, assert_equal, assert_array_equal,
+ assert_raises
+ )
+from numpy.lib.stride_tricks import as_strided, broadcast_arrays
def assert_shapes_correct(input_shapes, expected_shape):
- """ Broadcast a list of arrays with the given input shapes and check the
- common output shape.
- """
+ # Broadcast a list of arrays with the given input shapes and check the
+ # common output shape.
+
inarrays = [np.zeros(s) for s in input_shapes]
outarrays = broadcast_arrays(*inarrays)
outshapes = [a.shape for a in outarrays]
@@ -18,17 +20,17 @@ def assert_shapes_correct(input_shapes, expected_shape):
def assert_incompatible_shapes_raise(input_shapes):
- """ Broadcast a list of arrays with the given (incompatible) input shapes
- and check that they raise a ValueError.
- """
+ # Broadcast a list of arrays with the given (incompatible) input shapes
+ # and check that they raise a ValueError.
+
inarrays = [np.zeros(s) for s in input_shapes]
assert_raises(ValueError, broadcast_arrays, *inarrays)
def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False):
- """ Broadcast two shapes against each other and check that the data layout
- is the same as if a ufunc did the broadcasting.
- """
+ # Broadcast two shapes against each other and check that the data layout
+ # is the same as if a ufunc did the broadcasting.
+
x0 = np.zeros(shape0, dtype=int)
# Note that multiply.reduce's identity element is 1.0, so when shape1==(),
# this gives the desired n==1.
@@ -66,8 +68,8 @@ def test_one_off():
def test_same_input_shapes():
- """ Check that the final shape is just the input shape.
- """
+ # Check that the final shape is just the input shape.
+
data = [
(),
(1,),
@@ -93,9 +95,9 @@ def test_same_input_shapes():
def test_two_compatible_by_ones_input_shapes():
- """ Check that two different input shapes (of the same length but some have
- 1s) broadcast to the correct shape.
- """
+ # Check that two different input shapes of the same length, but some have
+ # ones, broadcast to the correct shape.
+
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
@@ -118,9 +120,9 @@ def test_two_compatible_by_ones_input_shapes():
def test_two_compatible_by_prepending_ones_input_shapes():
- """ Check that two different input shapes (of different lengths) broadcast
- to the correct shape.
- """
+ # Check that two different input shapes (of different lengths) broadcast
+ # to the correct shape.
+
data = [
[[(), (3,)], (3,)],
[[(3,), (3, 3)], (3, 3)],
@@ -150,8 +152,8 @@ def test_two_compatible_by_prepending_ones_input_shapes():
def test_incompatible_shapes_raise_valueerror():
- """ Check that a ValueError is raised for incompatible shapes.
- """
+ # Check that a ValueError is raised for incompatible shapes.
+
data = [
[(3,), (4,)],
[(2, 3), (2,)],
@@ -165,8 +167,8 @@ def test_incompatible_shapes_raise_valueerror():
def test_same_as_ufunc():
- """ Check that the data layout is the same as if a ufunc did the operation.
- """
+ # Check that the data layout is the same as if a ufunc did the operation.
+
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index d07e4d578..739061a5d 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -15,7 +15,7 @@ from numpy import (
)
import numpy as np
-from numpy.compat import asbytes, asbytes_nested
+from numpy.compat import asbytes_nested
def get_mat(n):
@@ -79,12 +79,12 @@ class TestEye(TestCase):
[0, 1, 0]]))
def test_eye_bounds(self):
- assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
+ assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])
- assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
+ assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])
- assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
- assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
+ assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
+ assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])
assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])
assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])
@@ -248,13 +248,13 @@ class TestHistogram2d(TestCase):
y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])
H, xed, yed = histogram2d(
x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], normed=True)
- answer = array([[1, 1, .5],
- [1, 1, .5],
+ answer = array([[1, 1, .5],
+ [1, 1, .5],
[.5, .5, .25]])/9.
assert_array_almost_equal(H, answer, 3)
def test_all_outliers(self):
- r = rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6
+ r = rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6
H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1]))
assert_array_equal(H, 0)
@@ -363,29 +363,29 @@ def test_tril_indices():
il3 = tril_indices(4, m=5)
il4 = tril_indices(4, k=2, m=5)
- a = np.array([[1, 2, 3, 4],
- [5, 6, 7, 8],
- [9, 10, 11, 12],
+ a = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
[13, 14, 15, 16]])
b = np.arange(1, 21).reshape(4, 5)
# indexing:
yield (assert_array_equal, a[il1],
- array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
+ array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
yield (assert_array_equal, b[il3],
array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
# And for assigning values:
a[il1] = -1
yield (assert_array_equal, a,
- array([[-1, 2, 3, 4],
- [-1, -1, 7, 8],
+ array([[-1, 2, 3, 4],
+ [-1, -1, 7, 8],
[-1, -1, -1, 12],
[-1, -1, -1, -1]]))
b[il3] = -1
yield (assert_array_equal, b,
- array([[-1, 2, 3, 4, 5],
- [-1, -1, 8, 9, 10],
+ array([[-1, 2, 3, 4, 5],
+ [-1, -1, 8, 9, 10],
[-1, -1, -1, 14, 15],
[-1, -1, -1, -1, 20]]))
# These cover almost the whole array (two diagonals right of the main one):
@@ -397,8 +397,8 @@ def test_tril_indices():
[-10, -10, -10, -10]]))
b[il4] = -10
yield (assert_array_equal, b,
- array([[-10, -10, -10, 4, 5],
- [-10, -10, -10, -10, 10],
+ array([[-10, -10, -10, 4, 5],
+ [-10, -10, -10, -10, 10],
[-10, -10, -10, -10, -10],
[-10, -10, -10, -10, -10]]))
@@ -410,15 +410,15 @@ class TestTriuIndices(object):
iu3 = triu_indices(4, m=5)
iu4 = triu_indices(4, k=2, m=5)
- a = np.array([[1, 2, 3, 4],
- [5, 6, 7, 8],
- [9, 10, 11, 12],
+ a = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
[13, 14, 15, 16]])
b = np.arange(1, 21).reshape(4, 5)
# Both for indexing:
yield (assert_array_equal, a[iu1],
- array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
+ array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
yield (assert_array_equal, b[iu3],
array([1, 2, 3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 19, 20]))
@@ -426,13 +426,13 @@ class TestTriuIndices(object):
a[iu1] = -1
yield (assert_array_equal, a,
array([[-1, -1, -1, -1],
- [5, -1, -1, -1],
- [9, 10, -1, -1],
+ [5, -1, -1, -1],
+ [9, 10, -1, -1],
[13, 14, 15, -1]]))
b[iu3] = -1
yield (assert_array_equal, b,
array([[-1, -1, -1, -1, -1],
- [ 6, -1, -1, -1, -1],
+ [6, -1, -1, -1, -1],
[11, 12, -1, -1, -1],
[16, 17, 18, -1, -1]]))
@@ -440,10 +440,10 @@ class TestTriuIndices(object):
# main one):
a[iu2] = -10
yield (assert_array_equal, a,
- array([[-1, -1, -10, -10],
- [5, -1, -1, -10],
- [9, 10, -1, -1],
- [13, 14, 15, -1]]))
+ array([[-1, -1, -10, -10],
+ [5, -1, -1, -10],
+ [9, 10, -1, -1],
+ [13, 14, 15, -1]]))
b[iu4] = -10
yield (assert_array_equal, b,
array([[-1, -1, -10, -10, -10],
@@ -470,10 +470,10 @@ class TestVander(object):
def test_basic(self):
c = np.array([0, 1, -2, 3])
v = vander(c)
- powers = np.array([[ 0, 0, 0, 0, 1],
- [ 1, 1, 1, 1, 1],
+ powers = np.array([[0, 0, 0, 0, 1],
+ [1, 1, 1, 1, 1],
[16, -8, 4, -2, 1],
- [81, 27, 9, 3, 1]])
+ [81, 27, 9, 3, 1]])
# Check default value of N:
yield (assert_array_equal, v, powers[:, 1:])
# Check a range of N values, including 0 and 5 (greater than default)
@@ -485,14 +485,14 @@ class TestVander(object):
def test_dtypes(self):
c = array([11, -12, 13], dtype=np.int8)
v = vander(c)
- expected = np.array([[121, 11, 1],
+ expected = np.array([[121, 11, 1],
[144, -12, 1],
- [169, 13, 1]])
+ [169, 13, 1]])
yield (assert_array_equal, v, expected)
c = array([1.0+1j, 1.0-1j])
v = vander(c, N=3)
- expected = np.array([[ 2j, 1+1j, 1],
+ expected = np.array([[2j, 1+1j, 1],
[-2j, 1-1j, 1]])
# The data is floating point, but the values are small integers,
# so assert_array_equal *should* be safe here (rather than, say,
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index eac8134e5..3931f95e5 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -1,34 +1,32 @@
from __future__ import division, absolute_import, print_function
-from numpy.lib import *
-from numpy.core import *
-from numpy.random import rand
-from numpy.compat import asbytes, long
+import numpy as np
+from numpy.compat import long
from numpy.testing import (
- TestCase, assert_, assert_equal, assert_array_equal, run_module_suite)
-
-try:
- import ctypes
- _HAS_CTYPE = True
-except ImportError:
- _HAS_CTYPE = False
+ TestCase, assert_, assert_equal, assert_array_equal, run_module_suite
+ )
+from numpy.lib.type_check import (
+ common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
+ nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close
+ )
def assert_all(x):
- assert_(all(x), x)
+ assert_(np.all(x), x)
class TestCommonType(TestCase):
def test_basic(self):
- ai32 = array([[1, 2], [3, 4]], dtype=int32)
- af32 = array([[1, 2], [3, 4]], dtype=float32)
- af64 = array([[1, 2], [3, 4]], dtype=float64)
- acs = array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=csingle)
- acd = array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=cdouble)
- assert_(common_type(af32) == float32)
- assert_(common_type(af64) == float64)
- assert_(common_type(acs) == csingle)
- assert_(common_type(acd) == cdouble)
+ ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
+ af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
+ af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
+ acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle)
+ acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble)
+ assert_(common_type(ai32) == np.float64)
+ assert_(common_type(af32) == np.float32)
+ assert_(common_type(af64) == np.float64)
+ assert_(common_type(acs) == np.csingle)
+ assert_(common_type(acd) == np.cdouble)
class TestMintypecode(TestCase):
@@ -84,45 +82,45 @@ class TestMintypecode(TestCase):
class TestIsscalar(TestCase):
def test_basic(self):
- assert_(isscalar(3))
- assert_(not isscalar([3]))
- assert_(not isscalar((3,)))
- assert_(isscalar(3j))
- assert_(isscalar(long(10)))
- assert_(isscalar(4.0))
+ assert_(np.isscalar(3))
+ assert_(not np.isscalar([3]))
+ assert_(not np.isscalar((3,)))
+ assert_(np.isscalar(3j))
+ assert_(np.isscalar(long(10)))
+ assert_(np.isscalar(4.0))
class TestReal(TestCase):
def test_real(self):
- y = rand(10,)
- assert_array_equal(y, real(y))
+ y = np.random.rand(10,)
+ assert_array_equal(y, np.real(y))
def test_cmplx(self):
- y = rand(10,)+1j*rand(10,)
- assert_array_equal(y.real, real(y))
+ y = np.random.rand(10,)+1j*np.random.rand(10,)
+ assert_array_equal(y.real, np.real(y))
class TestImag(TestCase):
def test_real(self):
- y = rand(10,)
- assert_array_equal(0, imag(y))
+ y = np.random.rand(10,)
+ assert_array_equal(0, np.imag(y))
def test_cmplx(self):
- y = rand(10,)+1j*rand(10,)
- assert_array_equal(y.imag, imag(y))
+ y = np.random.rand(10,)+1j*np.random.rand(10,)
+ assert_array_equal(y.imag, np.imag(y))
class TestIscomplex(TestCase):
def test_fail(self):
- z = array([-1, 0, 1])
+ z = np.array([-1, 0, 1])
res = iscomplex(z)
- assert_(not sometrue(res, axis=0))
+ assert_(not np.sometrue(res, axis=0))
def test_pass(self):
- z = array([-1j, 1, 0])
+ z = np.array([-1j, 1, 0])
res = iscomplex(z)
assert_array_equal(res, [1, 0, 0])
@@ -130,12 +128,12 @@ class TestIscomplex(TestCase):
class TestIsreal(TestCase):
def test_pass(self):
- z = array([-1, 0, 1j])
+ z = np.array([-1, 0, 1j])
res = isreal(z)
assert_array_equal(res, [1, 1, 0])
def test_fail(self):
- z = array([-1j, 1, 0])
+ z = np.array([-1j, 1, 0])
res = isreal(z)
assert_array_equal(res, [0, 1, 1])
@@ -143,123 +141,115 @@ class TestIsreal(TestCase):
class TestIscomplexobj(TestCase):
def test_basic(self):
- z = array([-1, 0, 1])
+ z = np.array([-1, 0, 1])
assert_(not iscomplexobj(z))
- z = array([-1j, 0, -1])
+ z = np.array([-1j, 0, -1])
assert_(iscomplexobj(z))
class TestIsrealobj(TestCase):
def test_basic(self):
- z = array([-1, 0, 1])
+ z = np.array([-1, 0, 1])
assert_(isrealobj(z))
- z = array([-1j, 0, -1])
+ z = np.array([-1j, 0, -1])
assert_(not isrealobj(z))
class TestIsnan(TestCase):
def test_goodvalues(self):
- z = array((-1., 0., 1.))
- res = isnan(z) == 0
- assert_all(alltrue(res, axis=0))
+ z = np.array((-1., 0., 1.))
+ res = np.isnan(z) == 0
+ assert_all(np.all(res, axis=0))
def test_posinf(self):
- with errstate(divide='ignore'):
- assert_all(isnan(array((1.,))/0.) == 0)
+ with np.errstate(divide='ignore'):
+ assert_all(np.isnan(np.array((1.,))/0.) == 0)
def test_neginf(self):
- with errstate(divide='ignore'):
- assert_all(isnan(array((-1.,))/0.) == 0)
+ with np.errstate(divide='ignore'):
+ assert_all(np.isnan(np.array((-1.,))/0.) == 0)
def test_ind(self):
- with errstate(divide='ignore', invalid='ignore'):
- assert_all(isnan(array((0.,))/0.) == 1)
-
- #def test_qnan(self): log(-1) return pi*j now
- # assert_all(isnan(log(-1.)) == 1)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isnan(np.array((0.,))/0.) == 1)
def test_integer(self):
- assert_all(isnan(1) == 0)
+ assert_all(np.isnan(1) == 0)
def test_complex(self):
- assert_all(isnan(1+1j) == 0)
+ assert_all(np.isnan(1+1j) == 0)
def test_complex1(self):
- with errstate(divide='ignore', invalid='ignore'):
- assert_all(isnan(array(0+0j)/0.) == 1)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isnan(np.array(0+0j)/0.) == 1)
class TestIsfinite(TestCase):
+ # Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
- z = array((-1., 0., 1.))
- res = isfinite(z) == 1
- assert_all(alltrue(res, axis=0))
+ z = np.array((-1., 0., 1.))
+ res = np.isfinite(z) == 1
+ assert_all(np.all(res, axis=0))
def test_posinf(self):
- with errstate(divide='ignore', invalid='ignore'):
- assert_all(isfinite(array((1.,))/0.) == 0)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array((1.,))/0.) == 0)
def test_neginf(self):
- with errstate(divide='ignore', invalid='ignore'):
- assert_all(isfinite(array((-1.,))/0.) == 0)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array((-1.,))/0.) == 0)
def test_ind(self):
- with errstate(divide='ignore', invalid='ignore'):
- assert_all(isfinite(array((0.,))/0.) == 0)
-
- #def test_qnan(self):
- # assert_all(isfinite(log(-1.)) == 0)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array((0.,))/0.) == 0)
def test_integer(self):
- assert_all(isfinite(1) == 1)
+ assert_all(np.isfinite(1) == 1)
def test_complex(self):
- assert_all(isfinite(1+1j) == 1)
+ assert_all(np.isfinite(1+1j) == 1)
def test_complex1(self):
- with errstate(divide='ignore', invalid='ignore'):
- assert_all(isfinite(array(1+1j)/0.) == 0)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
class TestIsinf(TestCase):
+ # Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
- z = array((-1., 0., 1.))
- res = isinf(z) == 0
- assert_all(alltrue(res, axis=0))
+ z = np.array((-1., 0., 1.))
+ res = np.isinf(z) == 0
+ assert_all(np.all(res, axis=0))
def test_posinf(self):
- with errstate(divide='ignore', invalid='ignore'):
- assert_all(isinf(array((1.,))/0.) == 1)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array((1.,))/0.) == 1)
def test_posinf_scalar(self):
- with errstate(divide='ignore', invalid='ignore'):
- assert_all(isinf(array(1.,)/0.) == 1)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array(1.,)/0.) == 1)
def test_neginf(self):
- with errstate(divide='ignore', invalid='ignore'):
- assert_all(isinf(array((-1.,))/0.) == 1)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array((-1.,))/0.) == 1)
def test_neginf_scalar(self):
- with errstate(divide='ignore', invalid='ignore'):
- assert_all(isinf(array(-1.)/0.) == 1)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array(-1.)/0.) == 1)
def test_ind(self):
- with errstate(divide='ignore', invalid='ignore'):
- assert_all(isinf(array((0.,))/0.) == 0)
-
- #def test_qnan(self):
- # assert_all(isinf(log(-1.)) == 0)
- # assert_all(isnan(log(-1.)) == 1)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array((0.,))/0.) == 0)
class TestIsposinf(TestCase):
def test_generic(self):
- with errstate(divide='ignore', invalid='ignore'):
- vals = isposinf(array((-1., 0, 1))/0.)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = isposinf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
@@ -268,8 +258,8 @@ class TestIsposinf(TestCase):
class TestIsneginf(TestCase):
def test_generic(self):
- with errstate(divide='ignore', invalid='ignore'):
- vals = isneginf(array((-1., 0, 1))/0.)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = isneginf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
@@ -278,11 +268,11 @@ class TestIsneginf(TestCase):
class TestNanToNum(TestCase):
def test_generic(self):
- with errstate(divide='ignore', invalid='ignore'):
- vals = nan_to_num(array((-1., 0, 1))/0.)
- assert_all(vals[0] < -1e10) and assert_all(isfinite(vals[0]))
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = nan_to_num(np.array((-1., 0, 1))/0.)
+ assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
- assert_all(vals[2] > 1e10) and assert_all(isfinite(vals[2]))
+ assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
def test_integer(self):
vals = nan_to_num(1)
@@ -293,30 +283,31 @@ class TestNanToNum(TestCase):
assert_all(vals == 1+1j)
def test_complex_bad(self):
- with errstate(divide='ignore', invalid='ignore'):
+ with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
- v += array(0+1.j)/0.
+ v += np.array(0+1.j)/0.
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
- assert_all(isfinite(vals))
+ assert_all(np.isfinite(vals))
def test_complex_bad2(self):
- with errstate(divide='ignore', invalid='ignore'):
+ with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
- v += array(-1+1.j)/0.
+ v += np.array(-1+1.j)/0.
vals = nan_to_num(v)
- assert_all(isfinite(vals))
- #assert_all(vals.imag > 1e10) and assert_all(isfinite(vals))
+ assert_all(np.isfinite(vals))
+ # Fixme
+ #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
# !! This is actually (unexpectedly) positive
# !! inf. Comment out for now, and see if it
# !! changes
- #assert_all(vals.real < -1e10) and assert_all(isfinite(vals))
+ #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
class TestRealIfClose(TestCase):
def test_basic(self):
- a = rand(10)
+ a = np.random.rand(10)
b = real_if_close(a+1e-15j)
assert_all(isrealobj(b))
assert_array_equal(a, b)
@@ -329,9 +320,9 @@ class TestRealIfClose(TestCase):
class TestArrayConversion(TestCase):
def test_asfarray(self):
- a = asfarray(array([1, 2, 3]))
- assert_equal(a.__class__, ndarray)
- assert_(issubdtype(a.dtype, float))
+ a = asfarray(np.array([1, 2, 3]))
+ assert_equal(a.__class__, np.ndarray)
+ assert_(np.issubdtype(a.dtype, np.float))
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index 2bedd9b40..97d608ecf 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -1,9 +1,10 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import *
import numpy.core as nx
import numpy.lib.ufunclike as ufl
-from numpy.testing.decorators import deprecated
+from numpy.testing import (
+ run_module_suite, TestCase, assert_, assert_equal, assert_array_equal
+ )
class TestUfunclike(TestCase):
diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py
index 36d5d6428..fcb37f98a 100644
--- a/numpy/lib/tests/test_utils.py
+++ b/numpy/lib/tests/test_utils.py
@@ -2,9 +2,11 @@ from __future__ import division, absolute_import, print_function
import sys
from numpy.core import arange
-from numpy.testing import *
-import numpy.lib.utils as utils
+from numpy.testing import (
+ run_module_suite, assert_, assert_equal
+ )
from numpy.lib import deprecate
+import numpy.lib.utils as utils
if sys.version_info[0] >= 3:
from io import StringIO
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 7670da1ed..40a140b6b 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -3,19 +3,19 @@
"""
from __future__ import division, absolute_import, print_function
-__all__ = ['diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri',
- 'triu', 'tril', 'vander', 'histogram2d', 'mask_indices',
- 'tril_indices', 'tril_indices_from', 'triu_indices',
- 'triu_indices_from',
- ]
-
from numpy.core.numeric import (
- asanyarray, subtract, arange, zeros, greater_equal, multiply, ones,
- asarray, where, less, int8, int16, int32, int64, empty, promote_types
+ asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
+ where, int8, int16, int32, int64, empty, promote_types
)
from numpy.core import iinfo
+__all__ = [
+ 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
+ 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
+ 'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
+
+
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 1ed5bf32a..a45d0bd86 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -58,7 +58,7 @@ def mintypecode(typechars,typeset='GDFgdf',default='d'):
'G'
"""
- typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char\
+ typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char
for t in typechars]
intersection = [t for t in typecodes if t in typeset]
if not intersection:
@@ -266,7 +266,7 @@ def iscomplexobj(x):
True
"""
- return issubclass( asarray(x).dtype.type, _nx.complexfloating)
+ return issubclass(asarray(x).dtype.type, _nx.complexfloating)
def isrealobj(x):
"""
@@ -300,7 +300,7 @@ def isrealobj(x):
False
"""
- return not issubclass( asarray(x).dtype.type, _nx.complexfloating)
+ return not issubclass(asarray(x).dtype.type, _nx.complexfloating)
#-----------------------------------------------------------------------------
@@ -464,28 +464,28 @@ def asscalar(a):
#-----------------------------------------------------------------------------
-_namefromtype = {'S1' : 'character',
- '?' : 'bool',
- 'b' : 'signed char',
- 'B' : 'unsigned char',
- 'h' : 'short',
- 'H' : 'unsigned short',
- 'i' : 'integer',
- 'I' : 'unsigned integer',
- 'l' : 'long integer',
- 'L' : 'unsigned long integer',
- 'q' : 'long long integer',
- 'Q' : 'unsigned long long integer',
- 'f' : 'single precision',
- 'd' : 'double precision',
- 'g' : 'long precision',
- 'F' : 'complex single precision',
- 'D' : 'complex double precision',
- 'G' : 'complex long double precision',
- 'S' : 'string',
- 'U' : 'unicode',
- 'V' : 'void',
- 'O' : 'object'
+_namefromtype = {'S1': 'character',
+ '?': 'bool',
+ 'b': 'signed char',
+ 'B': 'unsigned char',
+ 'h': 'short',
+ 'H': 'unsigned short',
+ 'i': 'integer',
+ 'I': 'unsigned integer',
+ 'l': 'long integer',
+ 'L': 'unsigned long integer',
+ 'q': 'long long integer',
+ 'Q': 'unsigned long long integer',
+ 'f': 'single precision',
+ 'd': 'double precision',
+ 'g': 'long precision',
+ 'F': 'complex single precision',
+ 'D': 'complex double precision',
+ 'G': 'complex long double precision',
+ 'S': 'string',
+ 'U': 'unicode',
+ 'V': 'void',
+ 'O': 'object'
}
def typename(char):
@@ -544,12 +544,12 @@ def typename(char):
#determine the "minimum common type" for a group of arrays.
array_type = [[_nx.single, _nx.double, _nx.longdouble],
[_nx.csingle, _nx.cdouble, _nx.clongdouble]]
-array_precision = {_nx.single : 0,
- _nx.double : 1,
- _nx.longdouble : 2,
- _nx.csingle : 0,
- _nx.cdouble : 1,
- _nx.clongdouble : 2}
+array_precision = {_nx.single: 0,
+ _nx.double: 1,
+ _nx.longdouble: 2,
+ _nx.csingle: 0,
+ _nx.cdouble: 1,
+ _nx.clongdouble: 2}
def common_type(*arrays):
"""
Return a scalar type which is common to the input arrays.
diff --git a/numpy/lib/user_array.py b/numpy/lib/user_array.py
index f62f6db59..bb5bec628 100644
--- a/numpy/lib/user_array.py
+++ b/numpy/lib/user_array.py
@@ -7,29 +7,33 @@ complete.
from __future__ import division, absolute_import, print_function
from numpy.core import (
- array, asarray, absolute, add, subtract, multiply, divide,
- remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
- bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
- greater_equal, shape, reshape, arange, sin, sqrt, transpose
- )
+ array, asarray, absolute, add, subtract, multiply, divide,
+ remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
+ bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
+ greater_equal, shape, reshape, arange, sin, sqrt, transpose
+)
from numpy.compat import long
+
class container(object):
+
def __init__(self, data, dtype=None, copy=True):
self.array = array(data, dtype, copy=copy)
def __repr__(self):
if len(self.shape) > 0:
- return self.__class__.__name__+repr(self.array)[len("array"):]
+ return self.__class__.__name__ + repr(self.array)[len("array"):]
else:
- return self.__class__.__name__+"("+repr(self.array)+")"
+ return self.__class__.__name__ + "(" + repr(self.array) + ")"
- def __array__(self,t=None):
- if t: return self.array.astype(t)
+ def __array__(self, t=None):
+ if t:
+ return self.array.astype(t)
return self.array
# Array as sequence
- def __len__(self): return len(self.array)
+ def __len__(self):
+ return len(self.array)
def __getitem__(self, index):
return self._rc(self.array[index])
@@ -37,19 +41,21 @@ class container(object):
def __getslice__(self, i, j):
return self._rc(self.array[i:j])
-
def __setitem__(self, index, value):
self.array[index] = asarray(value, self.dtype)
+
def __setslice__(self, i, j, value):
self.array[i:j] = asarray(value, self.dtype)
def __abs__(self):
return self._rc(absolute(self.array))
+
def __neg__(self):
return self._rc(-self.array)
def __add__(self, other):
- return self._rc(self.array+asarray(other))
+ return self._rc(self.array + asarray(other))
+
__radd__ = __add__
def __iadd__(self, other):
@@ -57,32 +63,40 @@ class container(object):
return self
def __sub__(self, other):
- return self._rc(self.array-asarray(other))
+ return self._rc(self.array - asarray(other))
+
def __rsub__(self, other):
- return self._rc(asarray(other)-self.array)
+ return self._rc(asarray(other) - self.array)
+
def __isub__(self, other):
subtract(self.array, other, self.array)
return self
def __mul__(self, other):
return self._rc(multiply(self.array, asarray(other)))
+
__rmul__ = __mul__
+
def __imul__(self, other):
multiply(self.array, other, self.array)
return self
def __div__(self, other):
return self._rc(divide(self.array, asarray(other)))
+
def __rdiv__(self, other):
return self._rc(divide(asarray(other), self.array))
+
def __idiv__(self, other):
divide(self.array, other, self.array)
return self
def __mod__(self, other):
return self._rc(remainder(self.array, other))
+
def __rmod__(self, other):
return self._rc(remainder(other, self.array))
+
def __imod__(self, other):
remainder(self.array, other, self.array)
return self
@@ -90,63 +104,74 @@ class container(object):
def __divmod__(self, other):
return (self._rc(divide(self.array, other)),
self._rc(remainder(self.array, other)))
+
def __rdivmod__(self, other):
return (self._rc(divide(other, self.array)),
self._rc(remainder(other, self.array)))
def __pow__(self, other):
return self._rc(power(self.array, asarray(other)))
+
def __rpow__(self, other):
return self._rc(power(asarray(other), self.array))
+
def __ipow__(self, other):
power(self.array, other, self.array)
return self
def __lshift__(self, other):
return self._rc(left_shift(self.array, other))
+
def __rshift__(self, other):
return self._rc(right_shift(self.array, other))
+
def __rlshift__(self, other):
return self._rc(left_shift(other, self.array))
+
def __rrshift__(self, other):
return self._rc(right_shift(other, self.array))
+
def __ilshift__(self, other):
left_shift(self.array, other, self.array)
return self
+
def __irshift__(self, other):
right_shift(self.array, other, self.array)
return self
def __and__(self, other):
return self._rc(bitwise_and(self.array, other))
+
def __rand__(self, other):
return self._rc(bitwise_and(other, self.array))
+
def __iand__(self, other):
bitwise_and(self.array, other, self.array)
return self
def __xor__(self, other):
return self._rc(bitwise_xor(self.array, other))
+
def __rxor__(self, other):
return self._rc(bitwise_xor(other, self.array))
+
def __ixor__(self, other):
bitwise_xor(self.array, other, self.array)
return self
def __or__(self, other):
return self._rc(bitwise_or(self.array, other))
+
def __ror__(self, other):
return self._rc(bitwise_or(other, self.array))
+
def __ior__(self, other):
bitwise_or(self.array, other, self.array)
return self
- def __neg__(self):
- return self._rc(-self.array)
def __pos__(self):
return self._rc(self.array)
- def __abs__(self):
- return self._rc(abs(self.array))
+
def __invert__(self):
return self._rc(invert(self.array))
@@ -154,33 +179,62 @@ class container(object):
if len(self.shape) == 0:
return func(self[0])
else:
- raise TypeError("only rank-0 arrays can be converted to Python scalars.")
+ raise TypeError(
+ "only rank-0 arrays can be converted to Python scalars.")
+
+ def __complex__(self):
+ return self._scalarfunc(complex)
- def __complex__(self): return self._scalarfunc(complex)
- def __float__(self): return self._scalarfunc(float)
- def __int__(self): return self._scalarfunc(int)
- def __long__(self): return self._scalarfunc(long)
- def __hex__(self): return self._scalarfunc(hex)
- def __oct__(self): return self._scalarfunc(oct)
+ def __float__(self):
+ return self._scalarfunc(float)
- def __lt__(self, other): return self._rc(less(self.array, other))
- def __le__(self, other): return self._rc(less_equal(self.array, other))
- def __eq__(self, other): return self._rc(equal(self.array, other))
- def __ne__(self, other): return self._rc(not_equal(self.array, other))
- def __gt__(self, other): return self._rc(greater(self.array, other))
- def __ge__(self, other): return self._rc(greater_equal(self.array, other))
+ def __int__(self):
+ return self._scalarfunc(int)
- def copy(self): return self._rc(self.array.copy())
+ def __long__(self):
+ return self._scalarfunc(long)
- def tostring(self): return self.array.tostring()
+ def __hex__(self):
+ return self._scalarfunc(hex)
- def byteswap(self): return self._rc(self.array.byteswap())
+ def __oct__(self):
+ return self._scalarfunc(oct)
- def astype(self, typecode): return self._rc(self.array.astype(typecode))
+ def __lt__(self, other):
+ return self._rc(less(self.array, other))
+
+ def __le__(self, other):
+ return self._rc(less_equal(self.array, other))
+
+ def __eq__(self, other):
+ return self._rc(equal(self.array, other))
+
+ def __ne__(self, other):
+ return self._rc(not_equal(self.array, other))
+
+ def __gt__(self, other):
+ return self._rc(greater(self.array, other))
+
+ def __ge__(self, other):
+ return self._rc(greater_equal(self.array, other))
+
+ def copy(self):
+ return self._rc(self.array.copy())
+
+ def tostring(self):
+ return self.array.tostring()
+
+ def byteswap(self):
+ return self._rc(self.array.byteswap())
+
+ def astype(self, typecode):
+ return self._rc(self.array.astype(typecode))
def _rc(self, a):
- if len(shape(a)) == 0: return a
- else: return self.__class__(a)
+ if len(shape(a)) == 0:
+ return a
+ else:
+ return self.__class__(a)
def __array_wrap__(self, *args):
return self.__class__(args[0])
@@ -204,19 +258,20 @@ class container(object):
# Test of class container
#############################################################
if __name__ == '__main__':
- temp=reshape(arange(10000), (100, 100))
+ temp = reshape(arange(10000), (100, 100))
- ua=container(temp)
+ ua = container(temp)
# new object created begin test
print(dir(ua))
- print(shape(ua), ua.shape) # I have changed Numeric.py
+ print(shape(ua), ua.shape) # I have changed Numeric.py
- ua_small=ua[:3, :5]
+ ua_small = ua[:3, :5]
print(ua_small)
- ua_small[0, 0]=10 # this did not change ua[0,0], which is not normal behavior
+ # this did not change ua[0,0], which is not normal behavior
+ ua_small[0, 0] = 10
print(ua_small[0, 0], ua[0, 0])
- print(sin(ua_small)/3.*6.+sqrt(ua_small**2))
+ print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
print(less(ua_small, 103), type(less(ua_small, 103)))
- print(type(ua_small*reshape(arange(15), shape(ua_small))))
+ print(type(ua_small * reshape(arange(15), shape(ua_small))))
print(reshape(ua_small, (5, 3)))
print(transpose(ua_small))
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 6d41e8eb4..df0052493 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -6,13 +6,13 @@ import types
import re
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
-from numpy.core import product, ndarray, ufunc, asarray
+from numpy.core import ndarray, ufunc, asarray
__all__ = [
'issubclass_', 'issubsctype', 'issubdtype', 'deprecate',
'deprecate_with_doc', 'get_include', 'info', 'source', 'who',
'lookfor', 'byte_bounds', 'safe_eval'
-]
+ ]
def get_include():
"""
@@ -60,6 +60,7 @@ class _Deprecate(object):
deprecate
"""
+
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
@@ -122,16 +123,16 @@ def deprecate(*args, **kwargs):
func : function
The function to be deprecated.
old_name : str, optional
- The name of the function to be deprecated. Default is None, in which
- case the name of `func` is used.
+ The name of the function to be deprecated. Default is None, in
+ which case the name of `func` is used.
new_name : str, optional
- The new name for the function. Default is None, in which case
- the deprecation message is that `old_name` is deprecated. If given,
- the deprecation message is that `old_name` is deprecated and `new_name`
+ The new name for the function. Default is None, in which case the
+ deprecation message is that `old_name` is deprecated. If given, the
+ deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
- Additional explanation of the deprecation. Displayed in the docstring
- after the warning.
+ Additional explanation of the deprecation. Displayed in the
+ docstring after the warning.
Returns
-------
@@ -140,7 +141,8 @@ def deprecate(*args, **kwargs):
Examples
--------
- Note that ``olduint`` returns a value after printing Deprecation Warning:
+ Note that ``olduint`` returns a value after printing Deprecation
+ Warning:
>>> olduint = np.deprecate(np.uint)
>>> olduint(6)
@@ -183,14 +185,16 @@ def byte_bounds(a):
Parameters
----------
a : ndarray
- Input array. It must conform to the Python-side of the array interface.
+ Input array. It must conform to the Python-side of the array
+ interface.
Returns
-------
(low, high) : tuple of 2 integers
- The first integer is the first byte of the array, the second integer is
- just past the last byte of the array. If `a` is not contiguous it
- will not use every byte between the (`low`, `high`) values.
+ The first integer is the first byte of the array, the second
+ integer is just past the last byte of the array. If `a` is not
+ contiguous it will not use every byte between the (`low`, `high`)
+ values.
Examples
--------
@@ -210,11 +214,11 @@ def byte_bounds(a):
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
-
bytes_a = asarray(a).dtype.itemsize
-
+
a_low = a_high = a_data
- if astrides is None: # contiguous case
+ if astrides is None:
+ # contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
@@ -251,8 +255,8 @@ def who(vardict=None):
Notes
-----
- Prints out the name, shape, bytes and type of all of the ndarrays present
- in `vardict`.
+ Prints out the name, shape, bytes and type of all of the ndarrays
+ present in `vardict`.
Examples
--------
@@ -286,11 +290,11 @@ def who(vardict=None):
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
- original=0
+ original = 0
else:
cache[idv] = name
namestr = name
- original=1
+ original = 1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.nbytes)
sta.append([namestr, shapestr, bytestr, var.dtype.name,
@@ -333,9 +337,9 @@ def who(vardict=None):
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
-# combine name and arguments and split to multiple lines of
-# width characters. End lines on a comma and begin argument list
-# indented with the rest of the arguments.
+# combine name and arguments and split to multiple lines of width
+# characters. End lines on a comma and begin argument list indented with
+# the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
@@ -413,7 +417,10 @@ def _info(obj, output=sys.stdout):
print("aligned: ", bp(obj.flags.aligned), file=output)
print("contiguous: ", bp(obj.flags.contiguous), file=output)
print("fortran: ", obj.flags.fortran, file=output)
- print("data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), file=output)
+ print(
+ "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra),
+ file=output
+ )
print("byteorder: ", end=' ', file=output)
if endian in ['|', '=']:
print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
@@ -428,7 +435,7 @@ def _info(obj, output=sys.stdout):
print("type: %s" % obj.dtype, file=output)
-def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
+def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
"""
Get help information for a function, class, or module.
@@ -437,13 +444,13 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
object : object or str, optional
Input object or name to get information about. If `object` is a
numpy object, its docstring is given. If it is a string, available
- modules are searched for matching objects.
- If None, information about `info` itself is returned.
+ modules are searched for matching objects. If None, information
+ about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
- File like object that the output is written to, default is ``stdout``.
- The object has to be opened in 'w' or 'a' mode.
+ File like object that the output is written to, default is
+ ``stdout``. The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
@@ -453,8 +460,9 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
Notes
-----
- When used interactively with an object, ``np.info(obj)`` is equivalent to
- ``help(obj)`` on the Python prompt or ``obj?`` on the IPython prompt.
+ When used interactively with an object, ``np.info(obj)`` is equivalent
+ to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
+ prompt.
Examples
--------
@@ -478,10 +486,11 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
- import pydoc, inspect
+ import pydoc
+ import inspect
- if hasattr(object, '_ppimport_importer') or \
- hasattr(object, '_ppimport_module'):
+ if (hasattr(object, '_ppimport_importer') or
+ hasattr(object, '_ppimport_module')):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
@@ -499,7 +508,10 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
- print("\n *** Repeat reference found in %s *** " % namestr, file=output)
+ print("\n "
+ "*** Repeat reference found in %s *** " % namestr,
+ file=output
+ )
else:
objlist.append(id(obj))
print(" *** Found in %s ***" % namestr, file=output)
@@ -511,7 +523,10 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
if numfound == 0:
print("Help for %s not found." % object, file=output)
else:
- print("\n *** Total of %d references found. ***" % numfound, file=output)
+ print("\n "
+ "*** Total of %d references found. ***" % numfound,
+ file=output
+ )
elif inspect.isfunction(object):
name = object.__name__
@@ -530,7 +545,9 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
arguments = "()"
try:
if hasattr(object, '__init__'):
- arguments = inspect.formatargspec(*inspect.getargspec(object.__init__.__func__))
+ arguments = inspect.formatargspec(
+ *inspect.getargspec(object.__init__.__func__)
+ )
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
@@ -559,7 +576,9 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
- methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None")
+ methstr, other = pydoc.splitdoc(
+ inspect.getdoc(thisobj) or "None"
+ )
print(" %s -- %s" % (meth, methstr), file=output)
elif (sys.version_info[0] < 3
@@ -569,7 +588,9 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
print("Instance of class: ", object.__class__.__name__, file=output)
print(file=output)
if hasattr(object, '__call__'):
- arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.__func__))
+ arguments = inspect.formatargspec(
+ *inspect.getargspec(object.__call__.__func__)
+ )
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
@@ -597,7 +618,9 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
elif inspect.ismethod(object):
name = object.__name__
- arguments = inspect.formatargspec(*inspect.getargspec(object.__func__))
+ arguments = inspect.formatargspec(
+ *inspect.getargspec(object.__func__)
+ )
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
@@ -628,7 +651,8 @@ def source(object, output=sys.stdout):
Parameters
----------
object : numpy object
- Input object. This can be any object (function, class, module, ...).
+ Input object. This can be any object (function, class, module,
+ ...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
@@ -669,7 +693,8 @@ def source(object, output=sys.stdout):
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
-# regexp whose match indicates that the string may contain a function signature
+# regexp whose match indicates that the string may contain a function
+# signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
def lookfor(what, module=None, import_modules=True, regenerate=False,
@@ -727,7 +752,8 @@ def lookfor(what, module=None, import_modules=True, regenerate=False,
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
- if not whats: return
+ if not whats:
+ return
for name, (docstring, kind, index) in cache.items():
if kind in ('module', 'object'):
@@ -856,7 +882,8 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
- if id(item) in seen: continue
+ if id(item) in seen:
+ continue
seen[id(item)] = True
index += 1
@@ -875,7 +902,8 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
- if os.path.isfile(this_py) and mod_path.endswith('.py'):
+ if (os.path.isfile(this_py) and
+ mod_path.endswith('.py')):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
@@ -935,7 +963,8 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
try:
doc = inspect.getdoc(item)
- except NameError: # ref SWIG's NameError: Unknown C global variable
+ except NameError:
+ # ref SWIG's NameError: Unknown C global variable
doc = None
if doc is not None:
cache[name] = (doc, kind, index)
@@ -999,7 +1028,9 @@ class SafeEval(object):
return node.value
def visitDict(self, node,**kw):
- return dict([(self.visit(k), self.visit(v)) for k, v in node.items])
+ return dict(
+ [(self.visit(k), self.visit(v)) for k, v in node.items]
+ )
def visitTuple(self, node, **kw):
return tuple([self.visit(i) for i in node.nodes])
@@ -1097,8 +1128,8 @@ def safe_eval(source):
Raises
------
SyntaxError
- If the code has invalid Python syntax, or if it contains non-literal
- code.
+ If the code has invalid Python syntax, or if it contains
+ non-literal code.
Examples
--------
@@ -1124,7 +1155,8 @@ def safe_eval(source):
import warnings
with warnings.catch_warnings():
- # compiler package is deprecated for 3.x, which is already solved here
+ # compiler package is deprecated for 3.x, which is already solved
+ # here
warnings.simplefilter('ignore', DeprecationWarning)
try:
import compiler
@@ -1134,11 +1166,11 @@ def safe_eval(source):
walker = SafeEval()
try:
ast = compiler.parse(source, mode="eval")
- except SyntaxError as err:
+ except SyntaxError:
raise
try:
return walker.visit(ast)
- except SyntaxError as err:
+ except SyntaxError:
raise
#-----------------------------------------------------------------------------