diff options
author | Charles Harris <charlesr.harris@gmail.com> | 2013-08-18 16:19:20 -0700 |
---|---|---|
committer | Charles Harris <charlesr.harris@gmail.com> | 2013-08-18 16:19:20 -0700 |
commit | 6c729b4423857850e6553cf6c2d0fc8b026036dd (patch) | |
tree | 330ce703eb02d20f96099c3fe0fc36ae33d4905b | |
parent | 13b0b272f764c14bc4ac34f5b19fd030d9c611a4 (diff) | |
parent | fbd6510d58a47ea0d166c48a82793f05425406e4 (diff) | |
download | numpy-6c729b4423857850e6553cf6c2d0fc8b026036dd.tar.gz |
Merge pull request #3635 from charris/giant-style-cleanup
Giant style cleanup
372 files changed, 8421 insertions, 8502 deletions
diff --git a/BENTO_BUILD.txt b/BENTO_BUILD.txt index 5eea32386..ac11b0441 100644 --- a/BENTO_BUILD.txt +++ b/BENTO_BUILD.txt @@ -3,7 +3,7 @@ No-frill version: * Clone bento:: git clone git://github.com/cournape/Bento.git bento-git - + * Bootstrap bento:: cd bento-git && python bootstrap.py diff --git a/DEV_README.txt b/DEV_README.txt index 65f84994c..7dc8bceed 100644 --- a/DEV_README.txt +++ b/DEV_README.txt @@ -1,19 +1,18 @@ Thank you for your willingness to help make NumPy the best array system available. -We have a few simple rules: +We have a few simple rules: * try hard to keep the Git repository in a buildable state and to not indiscriminately muck with what others have contributed. - * Simple changes (including bug fixes) and obvious improvements are - always welcome. Changes that fundamentally change behavior need - discussion on numpy-discussions@scipy.org before anything is + * Simple changes (including bug fixes) and obvious improvements are + always welcome. Changes that fundamentally change behavior need + discussion on numpy-discussions@scipy.org before anything is done. * Please add meaningful comments when you check changes in. These - comments form the basis of the change-log. + comments form the basis of the change-log. * Add unit tests to exercise new code, and regression tests whenever you fix a bug. - diff --git a/README.txt b/README.txt index 31ba1c02a..a20163a30 100644 --- a/README.txt +++ b/README.txt @@ -1,10 +1,10 @@ -NumPy is the fundamental package needed for scientific computing with Python. +NumPy is the fundamental package needed for scientific computing with Python. This package contains: * a powerful N-dimensional array object * sophisticated (broadcasting) functions * tools for integrating C/C++ and Fortran code - * useful linear algebra, Fourier transform, and random number capabilities. + * useful linear algebra, Fourier transform, and random number capabilities. It derives from the old Numeric code base and can be used as a replacement for Numeric. It also adds the features introduced by numarray and can be used to replace numarray. diff --git a/doc/CAPI.rst.txt b/doc/CAPI.rst.txt index 41241ce5a..4bd51baca 100644 --- a/doc/CAPI.rst.txt +++ b/doc/CAPI.rst.txt @@ -15,7 +15,7 @@ of the API) that will need to be changed: * If you used any of the function pointers in the ``PyArray_Descr`` structure you will have to modify your usage of those. First, - the pointers are all under the member named ``f``. So ``descr->cast`` + the pointers are all under the member named ``f``. So ``descr->cast`` is now ``descr->f->cast``. In addition, the casting functions have eliminated the strides argument (use ``PyArray_CastTo`` if you need strided casting). All functions have @@ -238,7 +238,7 @@ segfaults may result. There are 6 (binary) flags that describe the memory area used by the data buffer. These constants are defined in ``arrayobject.h`` and determine the bit-position of the flag. Python exposes a nice attribute- -based interface as well as a dictionary-like interface for getting +based interface as well as a dictionary-like interface for getting (and, if appropriate, setting) these flags. Memory areas of all kinds can be pointed to by an ndarray, necessitating @@ -254,7 +254,7 @@ PyArray_FromAny function. ``NPY_FORTRAN`` True if the array is (Fortran-style) contiguous in memory. -Notice that contiguous 1-d arrays are always both ``NPY_FORTRAN`` contiguous +Notice that contiguous 1-d arrays are always both ``NPY_FORTRAN`` contiguous and C contiguous. Both of these flags can be checked and are convenience flags only as whether or not an array is ``NPY_CONTIGUOUS`` or ``NPY_FORTRAN`` can be determined by the ``strides``, ``dimensions``, and ``itemsize`` diff --git a/doc/DISTUTILS.rst.txt b/doc/DISTUTILS.rst.txt index 363112ea9..01bc9cc43 100644 --- a/doc/DISTUTILS.rst.txt +++ b/doc/DISTUTILS.rst.txt @@ -29,8 +29,8 @@ Requirements for SciPy packages SciPy consists of Python packages, called SciPy packages, that are available to Python users via the ``scipy`` namespace. Each SciPy package -may contain other SciPy packages. And so on. Therefore, the SciPy -directory tree is a tree of packages with arbitrary depth and width. +may contain other SciPy packages. And so on. Therefore, the SciPy +directory tree is a tree of packages with arbitrary depth and width. Any SciPy package may depend on NumPy packages but the dependence on other SciPy packages should be kept minimal or zero. @@ -46,12 +46,12 @@ Their contents are described below. The ``setup.py`` file ''''''''''''''''''''' -In order to add a Python package to SciPy, its build script (``setup.py``) -must meet certain requirements. The most important requirement is that the -package define a ``configuration(parent_package='',top_path=None)`` function -which returns a dictionary suitable for passing to -``numpy.distutils.core.setup(..)``. To simplify the construction of -this dictionary, ``numpy.distutils.misc_util`` provides the +In order to add a Python package to SciPy, its build script (``setup.py``) +must meet certain requirements. The most important requirement is that the +package define a ``configuration(parent_package='',top_path=None)`` function +which returns a dictionary suitable for passing to +``numpy.distutils.core.setup(..)``. To simplify the construction of +this dictionary, ``numpy.distutils.misc_util`` provides the ``Configuration`` class, described below. SciPy pure Python package example @@ -72,13 +72,13 @@ Below is an example of a minimal ``setup.py`` file for a pure SciPy package:: The arguments of the ``configuration`` function specifiy the name of parent SciPy package (``parent_package``) and the directory location -of the main ``setup.py`` script (``top_path``). These arguments, +of the main ``setup.py`` script (``top_path``). These arguments, along with the name of the current package, should be passed to the ``Configuration`` constructor. The ``Configuration`` constructor has a fourth optional argument, ``package_path``, that can be used when package files are located in -a different location than the directory of the ``setup.py`` file. +a different location than the directory of the ``setup.py`` file. Remaining ``Configuration`` arguments are all keyword arguments that will be used to initialize attributes of ``Configuration`` @@ -159,12 +159,12 @@ in writing setup scripts: sun.dat bar/ car.dat - can.dat + can.dat Path to data files can be a function taking no arguments and returning path(s) to data files -- this is a useful when data files are generated while building the package. (XXX: explain the step - when this function are called exactly) + when this function are called exactly) + ``config.add_data_dir(data_path)`` --- add directory ``data_path`` recursively to ``data_files``. The whole directory tree starting at @@ -174,14 +174,14 @@ in writing setup scripts: directory and the second element specifies the path to data directory. By default, data directory are copied under package installation directory under the basename of ``data_path``. For example, - + :: config.add_data_dir('fun') # fun/ contains foo.dat bar/car.dat config.add_data_dir(('sun','fun')) config.add_data_dir(('gun','/full/path/to/fun')) - will install data files to the following locations + will install data files to the following locations :: @@ -204,7 +204,7 @@ in writing setup scripts: modules of the current package. + ``config.add_headers(*files)`` --- prepend ``files`` to ``headers`` - list. By default, headers will be installed under + list. By default, headers will be installed under ``<prefix>/include/pythonX.X/<config.name.replace('.','/')>/`` directory. If ``files`` item is a tuple then it's first argument specifies the installation suffix relative to @@ -216,7 +216,7 @@ in writing setup scripts: list. Scripts will be installed under ``<prefix>/bin/`` directory. + ``config.add_extension(name,sources,*kw)`` --- create and add an - ``Extension`` instance to ``ext_modules`` list. The first argument + ``Extension`` instance to ``ext_modules`` list. The first argument ``name`` defines the name of the extension module that will be installed under ``config.name`` package. The second argument is a list of sources. ``add_extension`` method takes also keyword @@ -269,10 +269,10 @@ in writing setup scripts: more information on arguments. + ``config.have_f77c()`` --- return True if Fortran 77 compiler is - available (read: a simple Fortran 77 code compiled succesfully). + available (read: a simple Fortran 77 code compiled succesfully). + ``config.have_f90c()`` --- return True if Fortran 90 compiler is - available (read: a simple Fortran 90 code compiled succesfully). + available (read: a simple Fortran 90 code compiled succesfully). + ``config.get_version()`` --- return version string of the current package, ``None`` if version information could not be detected. This methods @@ -405,7 +405,7 @@ The header of a typical SciPy ``__init__.py`` is:: """ Package docstring, typically with a brief description and function listing. """ - + # py3k related imports from __future__ import division, print_function, absolute_import @@ -414,7 +414,7 @@ The header of a typical SciPy ``__init__.py`` is:: ... __all__ = [s for s in dir() if not s.startswith('_')] - + from numpy.testing import Tester test = Tester().test bench = Tester().bench @@ -441,7 +441,7 @@ will compile the ``library`` sources without optimization flags. It's recommended to specify only those config_fc options in such a way that are compiler independent. -Getting extra Fortran 77 compiler options from source +Getting extra Fortran 77 compiler options from source ----------------------------------------------------- Some old Fortran codes need special compiler options in order to @@ -452,7 +452,7 @@ pattern:: CF77FLAGS(<fcompiler type>) = <fcompiler f77flags> in the first 20 lines of the source and use the ``f77flags`` for -specified type of the fcompiler (the first character ``C`` is optional). +specified type of the fcompiler (the first character ``C`` is optional). TODO: This feature can be easily extended for Fortran 90 codes as well. Let us know if you would need such a feature. diff --git a/doc/HOWTO_DOCUMENT.rst.txt b/doc/HOWTO_DOCUMENT.rst.txt index 13e9b2607..8e841755a 100644 --- a/doc/HOWTO_DOCUMENT.rst.txt +++ b/doc/HOWTO_DOCUMENT.rst.txt @@ -142,7 +142,7 @@ The sections of the docstring are: 2. **Deprecation warning** A section (use if applicable) to warn users that the object is deprecated. - Section contents should include: + Section contents should include: * In what Numpy version the object was deprecated, and when it will be removed. @@ -150,7 +150,7 @@ The sections of the docstring are: * Reason for deprecation if this is useful information (e.g., object is superseded, duplicates functionality found elsewhere, etc.). - * New recommended way of obtaining the same functionality. + * New recommended way of obtaining the same functionality. This section should use the note Sphinx directive instead of an underlined section header. @@ -182,7 +182,7 @@ The sections of the docstring are: x : type Description of parameter `x`. - Enclose variables in single backticks. The colon must be preceded + Enclose variables in single backticks. The colon must be preceded by a space, or omitted if the type is absent. For the parameter types, be as precise as possible. Below are a @@ -195,7 +195,7 @@ The sections of the docstring are: filename : str copy : bool dtype : data-type - iterable : iterable object + iterable : iterable object shape : int or tuple of int files : list of str @@ -370,7 +370,7 @@ The sections of the docstring are: Referencing sources of a temporary nature, like web pages, is discouraged. References are meant to augment the docstring, but should not be required to understand it. References are numbered, starting - from one, in the order in which they are cited. + from one, in the order in which they are cited. 11. **Examples** @@ -397,7 +397,7 @@ The sections of the docstring are: >>> import numpy.random >>> np.random.rand(2) - array([ 0.35773152, 0.38568979]) #random + array([ 0.35773152, 0.38568979]) #random You can run examples as doctests using:: @@ -427,7 +427,7 @@ The sections of the docstring are: *matplotlib* for plotting, but should import it explicitly, e.g., ``import matplotlib.pyplot as plt``. - + Documenting classes ------------------- @@ -498,7 +498,7 @@ Document these as you would any other function. Do not include ``self`` in the list of parameters. If a method has an equivalent function (which is the case for many ndarray methods for example), the function docstring should contain the detailed documentation, and the method docstring -should refer to it. Only put brief summary and **See Also** sections in the +should refer to it. Only put brief summary and **See Also** sections in the method docstring. @@ -514,7 +514,7 @@ instances a useful docstring, we do the following: * Multiple instances: If multiple instances are exposed, docstrings for each instance are written and assigned to the instances' ``__doc__`` attributes at run time. The class is documented as usual, and - the exposed instances can be mentioned in the **Notes** and **See Also** + the exposed instances can be mentioned in the **Notes** and **See Also** sections. @@ -553,16 +553,16 @@ hard to get a good overview of all functionality provided by looking at the source file(s) or the ``__all__`` dict. Note that license and author info, while often included in source files, do not -belong in docstrings. +belong in docstrings. Other points to keep in mind ---------------------------- -* Equations : as discussed in the **Notes** section above, LaTeX formatting - should be kept to a minimum. Often it's possible to show equations as - Python code or pseudo-code instead, which is much more readable in a - terminal. For inline display use double backticks (like ``y = np.sin(x)``). - For display with blank lines above and below, use a double colon and indent +* Equations : as discussed in the **Notes** section above, LaTeX formatting + should be kept to a minimum. Often it's possible to show equations as + Python code or pseudo-code instead, which is much more readable in a + terminal. For inline display use double backticks (like ``y = np.sin(x)``). + For display with blank lines above and below, use a double colon and indent the code, like:: end of previous sentence:: @@ -597,7 +597,7 @@ output. New paragraphs are marked with a blank line. Use *italics*, **bold**, and ``monospace`` if needed in any explanations (but not for variable names and doctest code or multi-line code). -Variable, module, function, and class names should be written between +Variable, module, function, and class names should be written between single back-ticks (```numpy```). A more extensive example of reST markup can be found in `this example diff --git a/doc/Py3K.rst.txt b/doc/Py3K.rst.txt index ad76fe240..e06461794 100644 --- a/doc/Py3K.rst.txt +++ b/doc/Py3K.rst.txt @@ -483,7 +483,7 @@ So what is done in ``PyArray_FromAny`` currently is that: 3118 buffers, so that:: array([some_3118_object]) - + will treat the object similarly as it would handle an `ndarray`. However, again, bytes (and unicode) have priority and will not be @@ -491,13 +491,13 @@ So what is done in ``PyArray_FromAny`` currently is that: This amounts to possible semantic changes: -- ``array(buffer)`` will no longer create an object array +- ``array(buffer)`` will no longer create an object array ``array([buffer], dtype='O')``, but will instead expand to a view on the buffer. .. todo:: - Take a second look at places that used PyBuffer_FromMemory and + Take a second look at places that used PyBuffer_FromMemory and PyBuffer_FromReadWriteMemory -- what can be done with these? .. todo:: @@ -633,7 +633,7 @@ Currently, the following is done: 1) Numpy's integer types no longer inherit from Python integer. 2) int is taken dtype-equivalent to NPY_LONG -3) ints are converted to NPY_LONG +3) ints are converted to NPY_LONG PyInt methods are currently replaced by PyLong, via macros in npy_3kcompat.h. diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt index 2b66b5caa..bfea0e117 100644 --- a/doc/TESTS.rst.txt +++ b/doc/TESTS.rst.txt @@ -206,7 +206,7 @@ but ``test_evens`` is a generator that returns a series of tests, using A problem with generator tests can be that if a test is failing, it's hard to see for which parameters. To avoid this problem, ensure that: - - No computation related to the features tested is done in the + - No computation related to the features tested is done in the ``test_*`` generator function, but delegated to a corresponding ``check_*`` function (can be inside the generator, to share namespace). - The generators are used *solely* for loops over parameters. @@ -236,7 +236,7 @@ for numpy.lib:: The doctests are run as if they are in a fresh Python instance which has executed ``import numpy as np``. Tests that are part of a SciPy subpackage will have that subpackage already imported. E.g. for a test -in ``scipy/linalg/tests/``, the namespace will be created such that +in ``scipy/linalg/tests/``, the namespace will be created such that ``from scipy import linalg`` has already executed. diff --git a/doc/cython/README.txt b/doc/cython/README.txt index ff0abb0fe..f527e358b 100644 --- a/doc/cython/README.txt +++ b/doc/cython/README.txt @@ -17,4 +17,4 @@ To run it locally, simply type:: make help which shows you the currently available targets (these are just handy -shorthands for common commands).
\ No newline at end of file +shorthands for common commands). diff --git a/doc/neps/datetime-proposal.rst b/doc/neps/datetime-proposal.rst index f72bab3ae..3bd15db34 100644 --- a/doc/neps/datetime-proposal.rst +++ b/doc/neps/datetime-proposal.rst @@ -6,7 +6,7 @@ :Contact: oliphant@enthought.com :Date: 2009-06-09 -Revised only slightly from the third proposal by +Revised only slightly from the third proposal by :Author: Francesc Alted i Abad :Contact: faltet@pytables.com @@ -115,10 +115,10 @@ lower units respectively). Finally, a date-time data-type can be created with support for tracking sequential events within a basic unit: [D]//100, [Y]//4 (notice the required brackets). These ``modulo`` event units provide the following -interpretation to the date-time integer: +interpretation to the date-time integer: - * the divisor is the number of events in each period - * the (integer) quotient is the integer number representing the base units + * the divisor is the number of events in each period + * the (integer) quotient is the integer number representing the base units * the remainder is the particular event in the period. Modulo event-units can be combined with any derived units, but brackets @@ -140,7 +140,7 @@ i.e. you cannot specify 'M8[us]//5' as 'M8//5' or as '//5' ``datetime64`` ============== - + This dtype represents a time that is absolute (i.e. not relative). It is implemented internally as an ``int64`` type. The integer represents units from the internal POSIX epoch (see [3]_). Like POSIX, the @@ -565,27 +565,27 @@ Necessary changes to NumPy ========================== In order to facilitate the addition of the date-time data-types a few changes -to NumPy were made: +to NumPy were made: Addition of metadata to dtypes ------------------------------ All data-types now have a metadata dictionary. It can be set using the -metadata keyword during construction of the object. +metadata keyword during construction of the object. -Date-time data-types will place the word "__frequency__" in the meta-data +Date-time data-types will place the word "__frequency__" in the meta-data dictionary containing a 4-tuple with the following parameters. -(basic unit string (str), - number of multiples (int), - number of sub-divisions (int), - number of events (int)). +(basic unit string (str), + number of multiples (int), + number of sub-divisions (int), + number of events (int)). -Simple time units like 'D' for days will thus be specified by ('D', 1, 1, 1) in -the "__frequency__" key of the metadata. More complicated time units (like '[2W/5]//50') will be indicated by ('D', 2, 5, 50). +Simple time units like 'D' for days will thus be specified by ('D', 1, 1, 1) in +the "__frequency__" key of the metadata. More complicated time units (like '[2W/5]//50') will be indicated by ('D', 2, 5, 50). -The "__frequency__" key is reserved for metadata and cannot be set with a -dtype constructor. +The "__frequency__" key is reserved for metadata and cannot be set with a +dtype constructor. Ufunc interface extension @@ -595,18 +595,18 @@ ufuncs that have datetime and timedelta arguments can use the Python API during ufunc calls (to raise errors). There is a new ufunc C-API call to set the data for a particular -function pointer (for a particular set of data-types) to be the list of arrays -passed in to the ufunc. +function pointer (for a particular set of data-types) to be the list of arrays +passed in to the ufunc. Array Intervace Extensions -------------------------- The array interface is extended to both handle datetime and timedelta -typestr (including extended notation). +typestr (including extended notation). In addition, the typestr element of the __array_interface__ can be a tuple -as long as the version string is 4. The tuple is -('typestr', metadata dictionary). +as long as the version string is 4. The tuple is +('typestr', metadata dictionary). This extension to the typestr concept extends to the descr portion of the __array_interface__. Thus, the second element in the tuple of a @@ -626,13 +626,13 @@ Multiple of basic units are simple to handle. Divisors of basic units are harder to handle arbitrarily, but it is common to mentally think of a month as 1/12 of a year, or a day as 1/7 of a week. Therefore, the ability to specify a unit in terms of a fraction of a "larger" unit was -implemented. +implemented. The event notion (//50) was added to solve a use-case of a commercial sponsor of this NEP. The idea is to allow timestamp to carry both event number and timestamp information. The remainder carries the event number information, while the quotient carries the timestamp -information. +information. Why the ``origin`` metadata disappeared @@ -672,4 +672,3 @@ allowed. .. coding: utf-8 .. fill-column: 72 .. End: - diff --git a/doc/neps/datetime-proposal3.rst b/doc/neps/datetime-proposal3.rst index 6874aac13..ae98b8f03 100644 --- a/doc/neps/datetime-proposal3.rst +++ b/doc/neps/datetime-proposal3.rst @@ -572,4 +572,3 @@ for this proposal purposes. .. coding: utf-8 .. fill-column: 72 .. End: - diff --git a/doc/neps/deferred-ufunc-evaluation.rst b/doc/neps/deferred-ufunc-evaluation.rst index 95633cab5..634a1f238 100644 --- a/doc/neps/deferred-ufunc-evaluation.rst +++ b/doc/neps/deferred-ufunc-evaluation.rst @@ -97,7 +97,7 @@ Here's how it might be used in NumPy.:: There may be some surprising behavior, though.:: with np.deferredstate(True): - + d = a + b + c # d is deferred @@ -159,7 +159,7 @@ The API would be expanded with a number of functions. ``int PyArray_CalculateAllDeferred()`` This function forces all currently deferred calculations to occur. - + For example, if the error state is set to ignore all, and np.seterr({all='raise'}), this would change what happens to already deferred expressions. Thus, all the existing @@ -185,7 +185,7 @@ The API would be expanded with a number of functions. as an operand. The Python API would be expanded as follows. - + ``numpy.setdeferred(state)`` Enables or disables deferred evaluation. True means to always @@ -266,7 +266,7 @@ Other Implementation Details ============================ When a deferred array is created, it gets references to all the -operands of the UFunc, along with the UFunc itself. The +operands of the UFunc, along with the UFunc itself. The 'DeferredUsageCount' is incremented for each operand, and later gets decremented when the deferred expression is calculated or the deferred array is destroyed. diff --git a/doc/neps/generalized-ufuncs.rst b/doc/neps/generalized-ufuncs.rst index d9f3818b9..98e436990 100644 --- a/doc/neps/generalized-ufuncs.rst +++ b/doc/neps/generalized-ufuncs.rst @@ -91,14 +91,14 @@ dimensions. The signature is represented by a string of the following format: * Core dimensions of each input or output array are represented by a - list of dimension names in parentheses, ``(i_1,...,i_N)``; a scalar + list of dimension names in parentheses, ``(i_1,...,i_N)``; a scalar input/output is denoted by ``()``. Instead of ``i_1``, ``i_2``, etc, one can use any valid Python variable name. * Dimension lists for different arguments are separated by ``","``. Input/output arguments are separated by ``"->"``. * If one uses the same dimension name in multiple locations, this enforces the same size (or broadcastable size) of the corresponding - dimensions. + dimensions. The formal syntax of signatures is as follows:: diff --git a/doc/neps/groupby_additions.rst b/doc/neps/groupby_additions.rst index 28e1c29ac..a86bdd642 100644 --- a/doc/neps/groupby_additions.rst +++ b/doc/neps/groupby_additions.rst @@ -24,7 +24,7 @@ Suppose you have a NumPy structured array containing information about the number of purchases at several stores over multiple days. To be clear, the structured array data-type is: -dt = [('year', i2), ('month', i1), ('day', i1), ('time', float), +dt = [('year', i2), ('month', i1), ('day', i1), ('time', float), ('store', i4), ('SKU', 'S6'), ('number', i4)] Suppose there is a 1-d NumPy array of this data-type and you would like @@ -44,7 +44,7 @@ Ufunc methods proposed It is proposed to add two new reduce-style methods to the ufuncs: reduceby and reducein. The reducein method is intended to be a simpler to use version of reduceat, while the reduceby method is intended to -provide group-by capability on reductions. +provide group-by capability on reductions. reducein:: @@ -54,24 +54,24 @@ reducein:: The reduction occurs along the provided axis, using the provided data-type to calculate intermediate results, storing the result into - the array out (if provided). + the array out (if provided). The indices array provides the start and end indices for the reduction. If the length of the indices array is odd, then the final index provides the beginning point for the final reduction and the ending point is the end of arr. - This generalizes along the given axis, the behavior: + This generalizes along the given axis, the behavior: - [<ufunc>.reduce(arr[indices[2*i]:indices[2*i+1]]) + [<ufunc>.reduce(arr[indices[2*i]:indices[2*i+1]]) for i in range(len(indices)/2)] - This assumes indices is of even length + This assumes indices is of even length - Example: + Example: >>> a = [0,1,2,4,5,6,9,10] - >>> add.reducein(a,[0,3,2,5,-2]) - [3, 11, 19] + >>> add.reducein(a,[0,3,2,5,-2]) + [3, 11, 19] Notice that sum(a[0:3]) = 3; sum(a[2:5]) = 11; and sum(a[-2:]) = 19 @@ -79,7 +79,7 @@ reduceby:: <ufunc>.reduceby(arr, by, dtype=None, out=None) - Perform a reduction in arr over unique non-negative integers in by. + Perform a reduction in arr over unique non-negative integers in by. Let N=arr.ndim and M=by.ndim. Then, by.shape[:N] == arr.shape. @@ -109,4 +109,3 @@ edges:: .. coding: utf-8 .. fill-column: 72 .. End: - diff --git a/doc/neps/missing-data.rst b/doc/neps/missing-data.rst index 338a8da96..0d03d7774 100644 --- a/doc/neps/missing-data.rst +++ b/doc/neps/missing-data.rst @@ -105,12 +105,12 @@ an array of all missing values must produce the same result as the mean of a zero-sized array without missing value support. This kind of data can arise when conforming sparsely sampled data -into a regular sampling pattern, and is a useful interpretation to +into a regular sampling pattern, and is a useful interpretation to use when attempting to get best-guess answers for many statistical queries. In R, many functions take a parameter "na.rm=T" which means to treat the data as if the NA values are not part of the data set. This proposal -defines a standard parameter "skipna=True" for this same purpose. +defines a standard parameter "skipna=True" for this same purpose. ******************************************** Implementation Techniques For Missing Values @@ -1174,7 +1174,7 @@ the discussion are:: Olivier Delalleau Alan G Isaac E. Antero Tammi - Jason Grout + Jason Grout Dag Sverre Seljebotn Joe Harrington Gary Strangman diff --git a/doc/neps/new-iterator-ufunc.rst b/doc/neps/new-iterator-ufunc.rst index 6c4bb6488..b253e874b 100644 --- a/doc/neps/new-iterator-ufunc.rst +++ b/doc/neps/new-iterator-ufunc.rst @@ -375,7 +375,7 @@ In general, it should be possible to emulate the current behavior where it is desired, but I believe the default should be to produce and manipulate memory layouts which will give the best performance. -To support the new cache-friendly behavior, we introduce a new +To support the new cache-friendly behavior, we introduce a new option ‘K’ (for “keep”) for any ``order=`` parameter. The proposed ‘order=’ flags become as follows: @@ -691,7 +691,7 @@ Construction and Destruction If copying is allowed, it will make a temporary copy if the data is castable. If ``UPDATEIFCOPY`` is enabled, it will also copy the data back with another cast upon iterator destruction. - + If ``a_ndim`` is greater than zero, ``axes`` must also be provided. In this case, ``axes`` is an ``a_ndim``-sized array of ``op``'s axes. A value of -1 in ``axes`` means ``newaxis``. Within the ``axes`` @@ -748,7 +748,7 @@ Construction and Destruction for each ``op[i]``. The parameter ``oa_ndim``, when non-zero, specifies the number of - dimensions that will be iterated with customized broadcasting. + dimensions that will be iterated with customized broadcasting. If it is provided, ``op_axes`` must also be provided. These two parameters let you control in detail how the axes of the operand arrays get matched together and iterated. @@ -778,7 +778,7 @@ Construction and Destruction iterator, are: ``NPY_ITER_C_INDEX``, ``NPY_ITER_F_INDEX`` - + Causes the iterator to track an index matching C or Fortran order. These options are mutually exclusive. @@ -813,7 +813,7 @@ Construction and Destruction data type, calculated based on the ufunc type promotion rules. The flags for each operand must be set so that the appropriate casting is permitted, and copying or buffering must be enabled. - + If the common data type is known ahead of time, don't use this flag. Instead, set the requested dtype for all the operands. @@ -936,7 +936,7 @@ Construction and Destruction is flagged for writing and is copied, causes the data in a copy to be copied back to ``op[i]`` when the iterator is destroyed. - + If the operand is flagged as write-only and a copy is needed, an uninitialized temporary array will be created and then copied to back to ``op[i]`` on destruction, instead of doing @@ -988,7 +988,7 @@ Construction and Destruction For use with ``NPY_ITER_ALLOCATE``, this flag disables allocating an array subtype for the output, forcing it to be a straight ndarray. - + TODO: Maybe it would be better to introduce a function ``NpyIter_GetWrappedOutput`` and remove this flag? @@ -1009,7 +1009,7 @@ Construction and Destruction Makes a copy of the given iterator. This function is provided primarily to enable multi-threaded iteration of the data. - + *TODO*: Move this to a section about multithreaded iteration. The recommended approach to multithreaded iteration is to @@ -1052,7 +1052,7 @@ Construction and Destruction for any operand that later has ``NpyIter_UpdateIter`` called on it. The flags that may be passed in ``op_flags`` are - ``NPY_ITER_COPY``, ``NPY_ITER_UPDATEIFCOPY``, + ``NPY_ITER_COPY``, ``NPY_ITER_UPDATEIFCOPY``, ``NPY_ITER_NBO``, ``NPY_ITER_ALIGNED``, ``NPY_ITER_CONTIG``. ``int NpyIter_RemoveAxis(NpyIter *iter, npy_intp axis)`` @@ -1242,7 +1242,7 @@ Construction and Destruction When using ranged iteration to multithread a reduction, there are two possible ways to do the reduction: - + If there is a big reduction to a small output, make a temporary array initialized to the reduction unit for each thread, then have each thread reduce into its temporary. When that is complete, @@ -1341,14 +1341,14 @@ Construction and Destruction handle their processing manually. By calling this function before removing the axes, you can get the strides for the manual processing. - + Returns ``NULL`` on error. ``int NpyIter_GetShape(NpyIter *iter, npy_intp *outshape)`` Returns the broadcast shape of the iterator in ``outshape``. This can only be called on an iterator which supports coordinates. - + Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. ``PyArray_Descr **NpyIter_GetDescrArray(NpyIter *iter)`` @@ -1658,7 +1658,7 @@ First, here is the definition of the ``luf`` function.:: def luf(lamdaexpr, *args, **kwargs): """Lambda UFunc - + e.g. c = luf(lambda i,j:i+j, a, b, order='K', casting='safe', buffersize=8192) @@ -1721,7 +1721,7 @@ Python iterator protocol.:: it = np.nditer([x,y,out], [], [['readonly'],['readonly'],['writeonly','allocate']]) - + for (a, b, c) in it: addop(a, b, c) @@ -1734,7 +1734,7 @@ Here is the same function, but following the C-style pattern.:: it = np.nditer([x,y,out], [], [['readonly'],['readonly'],['writeonly','allocate']]) - + while not it.finished: addop(it[0], it[1], it[2]) it.iternext() @@ -1772,7 +1772,7 @@ of the iterator, designed to help speed up the inner loops, is the flag it = np.nditer([x,y,out], ['no_inner_iteration'], [['readonly'],['readonly'],['writeonly','allocate']]) - + for (a, b, c) in it: addop(a, b, c) @@ -1809,7 +1809,7 @@ modify ``iter_add`` once again.:: def iter_add_itview(x, y, out=None): it = np.nditer([x,y,out], [], [['readonly'],['readonly'],['writeonly','allocate']]) - + (a, b, c) = it.itviews np.add(a, b, c) @@ -1900,7 +1900,7 @@ easy it was to add an optional output parameter.:: ....: it[3] += it[0] ....: it.iternext() ....: return it.operands[3] - + In [6]: timeit composite_over_it(image1, image2) 1 loops, best of 3: 197 ms per loop @@ -1978,4 +1978,3 @@ a dual core machine.:: In [31]: ne.set_num_threads(1) In [32]: timeit composite_over_ne_it(image1,image2) 10 loops, best of 3: 91.1 ms per loop - diff --git a/doc/neps/structured_array_extensions.txt b/doc/neps/structured_array_extensions.txt index 7020c772c..716b98a76 100644 --- a/doc/neps/structured_array_extensions.txt +++ b/doc/neps/structured_array_extensions.txt @@ -6,4 +6,3 @@ 2. Allow structured arrays to be sliced by their column (i.e. one additional indexing option for structured arrays) so that a[:4, 'foo':'bar'] would be allowed. - diff --git a/doc/neps/warnfix.txt b/doc/neps/warnfix.txt index 03b809e3d..b6f307bcb 100644 --- a/doc/neps/warnfix.txt +++ b/doc/neps/warnfix.txt @@ -65,7 +65,7 @@ When applied to a variable, one would get: int foo(int * NPY_UNUSED(dummy)) -expanded to +expanded to int foo(int * __NPY_UNUSED_TAGGEDdummy __COMP_NPY_UNUSED) diff --git a/doc/newdtype_example/example.py b/doc/newdtype_example/example.py index 6e9bf4334..6be9caa75 100644 --- a/doc/newdtype_example/example.py +++ b/doc/newdtype_example/example.py @@ -8,7 +8,7 @@ import numpy as np # So, the setitem code will be called with scalars on the # wrong shaped array. # But we can get a view as an ndarray of the given type: -g = np.array([1,2,3,4,5,6,7,8]).view(ff.floatint_type) +g = np.array([1, 2, 3, 4, 5, 6, 7, 8]).view(ff.floatint_type) # Now, the elements will be the scalar type associated # with the ndarray. diff --git a/doc/newdtype_example/floatint/__init__.py b/doc/newdtype_example/floatint/__init__.py index ebede2753..1d0f69b67 100644 --- a/doc/newdtype_example/floatint/__init__.py +++ b/doc/newdtype_example/floatint/__init__.py @@ -1,3 +1 @@ from __future__ import division, absolute_import, print_function - - diff --git a/doc/newdtype_example/setup.py b/doc/newdtype_example/setup.py index 494343b28..2222f0de1 100644 --- a/doc/newdtype_example/setup.py +++ b/doc/newdtype_example/setup.py @@ -4,7 +4,7 @@ from numpy.distutils.core import setup def configuration(parent_package = '', top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('floatint',parent_package,top_path) + config = Configuration('floatint', parent_package, top_path) config.add_extension('floatint', sources = ['floatint.c']); diff --git a/doc/numpybook/comparison/ctypes/interface.py b/doc/numpybook/comparison/ctypes/interface.py index 34e34ca71..1fabe3170 100644 --- a/doc/numpybook/comparison/ctypes/interface.py +++ b/doc/numpybook/comparison/ctypes/interface.py @@ -49,7 +49,7 @@ def add(a, b): a = N.require(a, dtype, requires) b = N.require(b, dtype, requires) c = N.empty_like(a) - func(a,b,c,a.size) + func(a, b, c, a.size) return c def filter2d(a): diff --git a/doc/numpybook/comparison/timing.py b/doc/numpybook/comparison/timing.py index 0d47c7eba..1c70b48ac 100644 --- a/doc/numpybook/comparison/timing.py +++ b/doc/numpybook/comparison/timing.py @@ -43,7 +43,7 @@ b = N.zeros_like(a) filter.DFILTER2D(a,b) """ -N = [10,20,30,40,50,100,200,300, 400, 500] +N = [10, 20, 30, 40, 50, 100, 200, 300, 400, 500] res = {} @@ -57,6 +57,6 @@ for kind in ['f2py']:#['ctypes', 'pyrex', 'weave', 'f2py']: print(sys.path) for n in N: print("%s - %d" % (kind, n)) - t = timeit.Timer(eval('%s_run'%kind), eval('%s_pre %% (%d,%d)'%(kind,n,n))) - mytime = min(t.repeat(3,100)) + t = timeit.Timer(eval('%s_run'%kind), eval('%s_pre %% (%d,%d)'%(kind, n, n))) + mytime = min(t.repeat(3, 100)) res[kind].append(mytime) diff --git a/doc/numpybook/comparison/weave/filter.py b/doc/numpybook/comparison/weave/filter.py index bbdc9ea85..309764c29 100644 --- a/doc/numpybook/comparison/weave/filter.py +++ b/doc/numpybook/comparison/weave/filter.py @@ -20,5 +20,5 @@ def filter(a): } """ b = zeros_like(a) - weave.inline(code,['a','b']) + weave.inline(code, ['a', 'b']) return b diff --git a/doc/numpybook/comparison/weave/inline.py b/doc/numpybook/comparison/weave/inline.py index 3906553ed..6e98e6997 100644 --- a/doc/numpybook/comparison/weave/inline.py +++ b/doc/numpybook/comparison/weave/inline.py @@ -16,7 +16,7 @@ def example1(a): results[1] = 4.0; return_val = results; """ - return weave.inline(code,['a']) + return weave.inline(code, ['a']) def arr(a): if a.ndim != 2: @@ -36,16 +36,16 @@ def arr(a): } """ b = zeros_like(a) - weave.inline(code,['a','b']) + weave.inline(code, ['a', 'b']) return b a = [None]*10 print(example1(a)) print(a) -a = rand(512,512) +a = rand(512, 512) b = arr(a) -h = [[0.25,0.5,0.25],[0.5,1,0.5],[0.25,0.5,0.25]] +h = [[0.25, 0.5, 0.25], [0.5, 1, 0.5], [0.25, 0.5, 0.25]] import scipy.signal as ss -b2 = ss.convolve(h,a,'same') +b2 = ss.convolve(h, a, 'same') diff --git a/doc/numpybook/runcode.py b/doc/numpybook/runcode.py index 456846cad..c8c532728 100644 --- a/doc/numpybook/runcode.py +++ b/doc/numpybook/runcode.py @@ -131,7 +131,7 @@ def runpycode(lyxstr, name='MyCode'): def main(args): usage = "%prog {options} filename" parser = optparse.OptionParser(usage) - parser.add_option('-n','--name', default='MyCode') + parser.add_option('-n', '--name', default='MyCode') options, args = parser.parse_args(args) if len(args) < 1: @@ -143,7 +143,7 @@ def main(args): fid.close() print("Processing %s" % options.name) newstr = runpycode(str, options.name) - fid = file(args[0],'w') + fid = file(args[0], 'w') fid.write(newstr) fid.close() diff --git a/doc/postprocess.py b/doc/postprocess.py index 3955ad6c5..2e50c115e 100755 --- a/doc/postprocess.py +++ b/doc/postprocess.py @@ -44,7 +44,7 @@ def process_html(fn, lines): def process_tex(lines): """ Remove unnecessary section titles from the LaTeX file. - + """ new_lines = [] for line in lines: diff --git a/doc/records.rst.txt b/doc/records.rst.txt index 6c4824d41..a608880d7 100644 --- a/doc/records.rst.txt +++ b/doc/records.rst.txt @@ -84,4 +84,3 @@ reference. There is a new function and a new method of array objects both labelled dtypescr which can be used to try out the ``PyArray_DescrConverter``. - diff --git a/doc/source/conf.py b/doc/source/conf.py index 233f2e409..13341b56a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -84,7 +84,7 @@ themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme') if not os.path.isdir(themedir): raise RuntimeError("Get the scipy-sphinx-theme first, " "via git submodule init && git submodule update") - + html_theme = 'scipy' html_theme_path = [themedir] @@ -109,7 +109,7 @@ else: html_additional_pages = { 'index': 'indexcontent.html', -} +} html_title = "%s v%s Manual" % (project, version) html_static_path = ['_static'] diff --git a/doc/source/dev/gitwash/configure_git.rst b/doc/source/dev/gitwash/configure_git.rst index 7e8cf8cbd..c62f33671 100644 --- a/doc/source/dev/gitwash/configure_git.rst +++ b/doc/source/dev/gitwash/configure_git.rst @@ -16,7 +16,7 @@ Here is an example ``.gitconfig`` file:: [user] name = Your Name email = you@yourdomain.example.com - + [alias] ci = commit -a co = checkout @@ -24,7 +24,7 @@ Here is an example ``.gitconfig`` file:: stat = status -a br = branch wdiff = diff --color-words - + [core] editor = vim @@ -33,7 +33,7 @@ Here is an example ``.gitconfig`` file:: You can edit this file directly or you can use the ``git config --global`` command:: - + git config --global user.name "Your Name" git config --global user.email you@yourdomain.example.com git config --global alias.ci "commit -a" diff --git a/doc/source/dev/gitwash/development_workflow.rst b/doc/source/dev/gitwash/development_workflow.rst index 8606b9018..523e04c0a 100644 --- a/doc/source/dev/gitwash/development_workflow.rst +++ b/doc/source/dev/gitwash/development_workflow.rst @@ -16,7 +16,7 @@ Basic workflow In short: -1. Update your ``master`` branch if it's not up to date. +1. Update your ``master`` branch if it's not up to date. Then start a new *feature branch* for each set of edits that you do. See :ref:`below <making-a-new-feature-branch>`. @@ -100,7 +100,7 @@ In git >= 1.7 you can ensure that the link is correctly set by using the ``--set-upstream`` option:: git push --set-upstream origin my-new-feature - + From now on git_ will know that ``my-new-feature`` is related to the ``my-new-feature`` branch in your own github_ repo. @@ -144,7 +144,7 @@ In more detail #. Check what the actual changes are with ``git diff`` (`git diff`_). #. Add any new files to version control ``git add new_file_name`` (see - `git add`_). + `git add`_). #. To commit all modified files into the local copy of your repo,, do ``git commit -am 'A commit message'``. Note the ``-am`` options to ``commit``. The ``m`` flag just signals that you're going to type a @@ -155,7 +155,7 @@ In more detail `tangled working copy problem`_. The section on :ref:`commit messages <writing-the-commit-message>` below might also be useful. #. To push the changes up to your forked repo on github_, do a ``git - push`` (see `git push`). + push`` (see `git push`). .. _writing-the-commit-message: diff --git a/doc/source/dev/gitwash/git_development.rst b/doc/source/dev/gitwash/git_development.rst index fb997abec..ee7787fec 100644 --- a/doc/source/dev/gitwash/git_development.rst +++ b/doc/source/dev/gitwash/git_development.rst @@ -12,4 +12,3 @@ Contents: development_setup configure_git development_workflow - diff --git a/doc/source/dev/gitwash/git_resources.rst b/doc/source/dev/gitwash/git_resources.rst index ae350806e..5f0c1d020 100644 --- a/doc/source/dev/gitwash/git_resources.rst +++ b/doc/source/dev/gitwash/git_resources.rst @@ -9,9 +9,9 @@ Tutorials and summaries * `github help`_ has an excellent series of how-to guides. * `learn.github`_ has an excellent series of tutorials -* The `pro git book`_ is a good in-depth book on git. +* The `pro git book`_ is a good in-depth book on git. * A `git cheat sheet`_ is a page giving summaries of common commands. -* The `git user manual`_ +* The `git user manual`_ * The `git tutorial`_ * The `git community book`_ * `git ready`_ - a nice series of tutorials diff --git a/doc/source/dev/gitwash/index.rst b/doc/source/dev/gitwash/index.rst index f3038721e..9d733dd1c 100644 --- a/doc/source/dev/gitwash/index.rst +++ b/doc/source/dev/gitwash/index.rst @@ -12,5 +12,3 @@ Contents: following_latest git_development git_resources - - diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 82316144a..0e8050b01 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -322,7 +322,7 @@ dates, use :func:`busday_count`: >>> np.busday_count(np.datetime64('2011-07-18'), np.datetime64('2011-07-11')) -5 -If you have an array of datetime64 day values, and you want a count of +If you have an array of datetime64 day values, and you want a count of how many of them are valid dates, you can do this: .. admonition:: Example diff --git a/doc/source/reference/c-api.config.rst b/doc/source/reference/c-api.config.rst index 0989c53d7..0073b37a4 100644 --- a/doc/source/reference/c-api.config.rst +++ b/doc/source/reference/c-api.config.rst @@ -101,4 +101,3 @@ Platform information Returns the endianness of the current platform. One of :cdata:`NPY_CPU_BIG`, :cdata:`NPY_CPU_LITTLE`, or :cdata:`NPY_CPU_UNKNOWN_ENDIAN`. - diff --git a/doc/source/reference/c-api.coremath.rst b/doc/source/reference/c-api.coremath.rst index dba092a20..2d5aedc73 100644 --- a/doc/source/reference/c-api.coremath.rst +++ b/doc/source/reference/c-api.coremath.rst @@ -347,7 +347,7 @@ __ http://www.openexr.com/about.html Returns the value of x with the sign bit copied from y. Works for any value, including Inf and NaN. - + .. cfunction:: npy_half npy_half_spacing(npy_half h) This is the same for half-precision float as npy_spacing and npy_spacingf @@ -376,5 +376,4 @@ __ http://www.openexr.com/about.html .. cfunction:: npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h) Low-level function which converts a 16-bit half-precision float - into a 64-bit double-precision float, stored as a uint64. - + into a 64-bit double-precision float, stored as a uint64. diff --git a/doc/source/reference/c-api.deprecations.rst b/doc/source/reference/c-api.deprecations.rst index a7960648a..a382017a2 100644 --- a/doc/source/reference/c-api.deprecations.rst +++ b/doc/source/reference/c-api.deprecations.rst @@ -14,14 +14,14 @@ Numarray. The core API originated with Numeric in 1995 and there are patterns such as the heavy use of macros written to mimic Python's C-API as well as account for compiler technology of the late 90's. There is also only a small group of volunteers who have had very little -time to spend on improving this API. +time to spend on improving this API. There is an ongoing effort to improve the API. It is important in this effort to ensure that code that compiles for NumPy 1.X continues to compile for NumPy 1.X. At the same time, certain API's will be marked as deprecated so that future-looking code can avoid these API's and -follow better practices. +follow better practices. Another important role played by deprecation markings in the C API is to move towards hiding internal details of the NumPy implementation. For those @@ -44,7 +44,7 @@ versions of NumPy should not have major C-API changes, however, that prevent code that worked on a previous minor release. For example, we will do our best to ensure that code that compiled and worked on NumPy 1.4 should continue to work on NumPy 1.7 (but perhaps with compiler -warnings). +warnings). To use the NPY_NO_DEPRECATED_API mechanism, you need to #define it to the target API version of NumPy before #including any NumPy headers. diff --git a/doc/source/reference/c-api.iterator.rst b/doc/source/reference/c-api.iterator.rst index 1e3565bc1..153792ac8 100644 --- a/doc/source/reference/c-api.iterator.rst +++ b/doc/source/reference/c-api.iterator.rst @@ -57,7 +57,7 @@ Here is a conversion table for which functions to use with the new iterator: :cfunc:`PyArray_ITER_GOTO1D` :cfunc:`NpyIter_GotoIndex` or :cfunc:`NpyIter_GotoIterIndex` :cfunc:`PyArray_ITER_NOTDONE` Return value of ``iternext`` function pointer -*Multi-iterator Functions* +*Multi-iterator Functions* :cfunc:`PyArray_MultiIterNew` :cfunc:`NpyIter_MultiNew` :cfunc:`PyArray_MultiIter_RESET` :cfunc:`NpyIter_Reset` :cfunc:`PyArray_MultiIter_NEXT` Function pointer from :cfunc:`NpyIter_GetIterNext` @@ -69,7 +69,7 @@ Here is a conversion table for which functions to use with the new iterator: :cfunc:`PyArray_MultiIter_NOTDONE` Return value of ``iternext`` function pointer :cfunc:`PyArray_Broadcast` Handled by :cfunc:`NpyIter_MultiNew` :cfunc:`PyArray_RemoveSmallest` Iterator flag :cdata:`NPY_ITER_EXTERNAL_LOOP` -*Other Functions* +*Other Functions* :cfunc:`PyArray_ConvertToCommonType` Iterator flag :cdata:`NPY_ITER_COMMON_DTYPE` ===================================== ============================================= @@ -649,7 +649,7 @@ Construction and Destruction -1 which means ``newaxis``. Within each ``op_axes[j]`` array, axes may not be repeated. The following example is how normal broadcasting applies to a 3-D array, a 2-D array, a 1-D array and a scalar. - + **Note**: Before NumPy 1.8 ``oa_ndim == 0` was used for signalling that that ``op_axes`` and ``itershape`` are unused. This is deprecated and should be replaced with -1. Better backward compatibility may be @@ -1229,7 +1229,7 @@ Functions For Iteration Gets the array of data pointers directly into the arrays (never into the buffers), corresponding to iteration index 0. - + These pointers are different from the pointers accepted by ``NpyIter_ResetBasePointers``, because the direction along some axes may have been reversed. diff --git a/doc/source/reference/routines.char.rst b/doc/source/reference/routines.char.rst index 41af947c8..7413e3615 100644 --- a/doc/source/reference/routines.char.rst +++ b/doc/source/reference/routines.char.rst @@ -84,4 +84,3 @@ Convenience class :toctree: generated/ chararray - diff --git a/doc/source/reference/routines.datetime.rst b/doc/source/reference/routines.datetime.rst index aab6f1694..875ad1124 100644 --- a/doc/source/reference/routines.datetime.rst +++ b/doc/source/reference/routines.datetime.rst @@ -17,4 +17,3 @@ Business Day Functions is_busday busday_offset busday_count - diff --git a/doc/source/reference/routines.dual.rst b/doc/source/reference/routines.dual.rst index 456fc5c02..4ed7098d6 100644 --- a/doc/source/reference/routines.dual.rst +++ b/doc/source/reference/routines.dual.rst @@ -45,4 +45,3 @@ Other .. autosummary:: i0 - diff --git a/doc/source/reference/routines.emath.rst b/doc/source/reference/routines.emath.rst index 9f6c2aaa7..c0c5b61fc 100644 --- a/doc/source/reference/routines.emath.rst +++ b/doc/source/reference/routines.emath.rst @@ -7,4 +7,3 @@ Mathematical functions with automatic domain (:mod:`numpy.emath`) available after :mod:`numpy` is imported. .. automodule:: numpy.lib.scimath - diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst index 736755338..6eda37578 100644 --- a/doc/source/reference/routines.ma.rst +++ b/doc/source/reference/routines.ma.rst @@ -400,5 +400,3 @@ Miscellanea ma.ediff1d ma.indices ma.where - - diff --git a/doc/source/reference/routines.numarray.rst b/doc/source/reference/routines.numarray.rst index 36e5aa764..dab63fbdf 100644 --- a/doc/source/reference/routines.numarray.rst +++ b/doc/source/reference/routines.numarray.rst @@ -3,4 +3,3 @@ Numarray compatibility (:mod:`numpy.numarray`) ********************************************** .. automodule:: numpy.numarray - diff --git a/doc/source/reference/routines.oldnumeric.rst b/doc/source/reference/routines.oldnumeric.rst index d7f15bcfd..9cab2e9d9 100644 --- a/doc/source/reference/routines.oldnumeric.rst +++ b/doc/source/reference/routines.oldnumeric.rst @@ -5,4 +5,3 @@ Old Numeric compatibility (:mod:`numpy.oldnumeric`) .. currentmodule:: numpy .. automodule:: numpy.oldnumeric - diff --git a/doc/source/reference/swig.interface-file.rst b/doc/source/reference/swig.interface-file.rst index 8ef6c80ab..d835a4c4f 100644 --- a/doc/source/reference/swig.interface-file.rst +++ b/doc/source/reference/swig.interface-file.rst @@ -444,7 +444,7 @@ arrays with views into memory that is managed. See the discussion `here * ``(DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_ARRAY4)`` * ``(DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4)`` * ``(DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, DATA_TYPE** ARGOUTVIEWM_FARRAY4)`` - + Output Arrays ````````````` diff --git a/doc/source/reference/swig.testing.rst b/doc/source/reference/swig.testing.rst index 8b19e4b28..decc681c5 100644 --- a/doc/source/reference/swig.testing.rst +++ b/doc/source/reference/swig.testing.rst @@ -15,8 +15,8 @@ this results in 1,427 individual unit tests that are performed when To facilitate this many similar unit tests, some high-level programming techniques are employed, including C and `SWIG`_ macros, -as well as Python inheritance. The purpose of this document is to describe -the testing infrastructure employed to verify that the ``numpy.i`` +as well as Python inheritance. The purpose of this document is to describe +the testing infrastructure employed to verify that the ``numpy.i`` typemaps are working as expected. Testing Organization @@ -57,9 +57,9 @@ Two-dimensional arrays are tested in exactly the same manner. The above description applies, but with ``Matrix`` substituted for ``Vector``. For three-dimensional tests, substitute ``Tensor`` for ``Vector``. For four-dimensional tests, substitute ``SuperTensor`` -for ``Vector``. +for ``Vector``. For the descriptions that follow, we will reference the -``Vector`` tests, but the same information applies to ``Matrix``, +``Vector`` tests, but the same information applies to ``Matrix``, ``Tensor`` and ``SuperTensor`` tests. The command ``make test`` will ensure that all of the test software is @@ -108,7 +108,7 @@ Testing SWIG Interface Files ``Vector.i`` is a `SWIG`_ interface file that defines python module ``Vector``. It follows the conventions for using ``numpy.i`` as described in this chapter. It defines a `SWIG`_ macro -``%apply_numpy_typemaps`` that has a single argument ``TYPE``. +``%apply_numpy_typemaps`` that has a single argument ``TYPE``. It uses the `SWIG`_ directive ``%apply`` to apply the provided typemaps to the argument signatures found in ``Vector.h``. This macro is then implemented for all of the data types supported by diff --git a/doc/source/user/basics.io.rst b/doc/source/user/basics.io.rst index 73947a2ef..54a65662b 100644 --- a/doc/source/user/basics.io.rst +++ b/doc/source/user/basics.io.rst @@ -5,4 +5,4 @@ I/O with Numpy .. toctree:: :maxdepth: 2 - basics.io.genfromtxt
\ No newline at end of file + basics.io.genfromtxt diff --git a/doc/swig/test/setup.py b/doc/swig/test/setup.py index c54b42ed4..e39114f91 100755 --- a/doc/swig/test/setup.py +++ b/doc/swig/test/setup.py @@ -59,6 +59,6 @@ setup(name = "NumpyTypemapTests", author = "Bill Spotz", py_modules = ["Array", "Farray", "Vector", "Matrix", "Tensor", "Fortran"], - ext_modules = [_Array , _Farray , _Vector , _Matrix , _Tensor, + ext_modules = [_Array, _Farray, _Vector, _Matrix, _Tensor, _Fortran] ) diff --git a/doc/swig/test/testArray.py b/doc/swig/test/testArray.py index 278a75f7a..d986de3b3 100755 --- a/doc/swig/test/testArray.py +++ b/doc/swig/test/testArray.py @@ -115,7 +115,7 @@ class Array1TestCase(unittest.TestCase): a = self.array1.view() self.failUnless(isinstance(a, np.ndarray)) self.failUnless(len(a) == self.length) - self.failUnless((a == [1,2,3,4,5]).all()) + self.failUnless((a == [1, 2, 3, 4, 5]).all()) ###################################################################### @@ -138,7 +138,7 @@ class Array2TestCase(unittest.TestCase): def testConstructor2(self): "Test Array2 array constructor" - na = np.zeros((3,4), dtype="l") + na = np.zeros((3, 4), dtype="l") aa = Array.Array2(na) self.failUnless(isinstance(aa, Array.Array2)) diff --git a/doc/swig/test/testFarray.py b/doc/swig/test/testFarray.py index 3905e26bd..15683b70b 100755 --- a/doc/swig/test/testFarray.py +++ b/doc/swig/test/testFarray.py @@ -16,7 +16,7 @@ else: BadListError = ValueError # Add the distutils-generated build directory to the python search path and then # import the extension module libDir = "lib.%s-%s" % (get_platform(), sys.version[:3]) -sys.path.insert(0,os.path.join("build", libDir)) +sys.path.insert(0, os.path.join("build", libDir)) import Farray ###################################################################### @@ -36,7 +36,7 @@ class FarrayTestCase(unittest.TestCase): "Test Farray copy constructor" for i in range(self.nrows): for j in range(self.ncols): - self.array[i,j] = i + j + self.array[i, j] = i + j arrayCopy = Farray.Farray(self.array) self.failUnless(arrayCopy == self.array) @@ -66,10 +66,10 @@ class FarrayTestCase(unittest.TestCase): n = self.ncols for i in range(m): for j in range(n): - self.array[i,j] = i*j + self.array[i, j] = i*j for i in range(m): for j in range(n): - self.failUnless(self.array[i,j] == i*j) + self.failUnless(self.array[i, j] == i*j) def testSetBad1(self): "Test Farray __setitem__ method, negative row" @@ -114,7 +114,7 @@ class FarrayTestCase(unittest.TestCase): """ for i in range(self.nrows): for j in range(self.ncols): - self.array[i,j] = i+j + self.array[i, j] = i+j self.failUnless(self.array.asString() == result) def testStr(self): @@ -128,20 +128,20 @@ class FarrayTestCase(unittest.TestCase): """ for i in range(self.nrows): for j in range(self.ncols): - self.array[i,j] = i-j + self.array[i, j] = i-j self.failUnless(str(self.array) == result) def testView(self): "Test Farray view method" for i in range(self.nrows): for j in range(self.ncols): - self.array[i,j] = i+j + self.array[i, j] = i+j a = self.array.view() self.failUnless(isinstance(a, np.ndarray)) self.failUnless(a.flags.f_contiguous) for i in range(self.nrows): for j in range(self.ncols): - self.failUnless(a[i,j] == i+j) + self.failUnless(a[i, j] == i+j) ###################################################################### diff --git a/doc/swig/test/testFortran.py b/doc/swig/test/testFortran.py index 2175ad1bf..a42af950e 100644 --- a/doc/swig/test/testFortran.py +++ b/doc/swig/test/testFortran.py @@ -47,7 +47,7 @@ class FortranTestCase(unittest.TestCase): "Test Fortran matrix initialized from nested list fortranarray" print(self.typeStr, "... ", end=' ', file=sys.stderr) second = Fortran.__dict__[self.typeStr + "SecondElement"] - matrix = np.asfortranarray([[0,1,2],[3,4,5],[6,7,8]], self.typeCode) + matrix = np.asfortranarray([[0, 1, 2], [3, 4, 5], [6, 7, 8]], self.typeCode) self.assertEquals(second(matrix), 3) ###################################################################### diff --git a/doc/swig/test/testMatrix.py b/doc/swig/test/testMatrix.py index d1721941e..af234e0e9 100755 --- a/doc/swig/test/testMatrix.py +++ b/doc/swig/test/testMatrix.py @@ -29,7 +29,7 @@ class MatrixTestCase(unittest.TestCase): "Test det function" print(self.typeStr, "... ", end=' ', file=sys.stderr) det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7],[6,9]] + matrix = [[8, 7], [6, 9]] self.assertEquals(det(matrix), 30) # Test (type IN_ARRAY2[ANY][ANY]) typemap @@ -37,7 +37,7 @@ class MatrixTestCase(unittest.TestCase): "Test det function with bad list" print(self.typeStr, "... ", end=' ', file=sys.stderr) det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7], ["e", "pi"]] + matrix = [[8, 7], ["e", "pi"]] self.assertRaises(BadListError, det, matrix) # Test (type IN_ARRAY2[ANY][ANY]) typemap @@ -45,7 +45,7 @@ class MatrixTestCase(unittest.TestCase): "Test det function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [8,7] + matrix = [8, 7] self.assertRaises(TypeError, det, matrix) # Test (type IN_ARRAY2[ANY][ANY]) typemap @@ -53,7 +53,7 @@ class MatrixTestCase(unittest.TestCase): "Test det function with wrong size" print(self.typeStr, "... ", end=' ', file=sys.stderr) det = Matrix.__dict__[self.typeStr + "Det"] - matrix = [[8,7,6], [5,4,3], [2,1,0]] + matrix = [[8, 7, 6], [5, 4, 3], [2, 1, 0]] self.assertRaises(TypeError, det, matrix) # Test (type IN_ARRAY2[ANY][ANY]) typemap @@ -68,7 +68,7 @@ class MatrixTestCase(unittest.TestCase): "Test max function" print(self.typeStr, "... ", end=' ', file=sys.stderr) max = Matrix.__dict__[self.typeStr + "Max"] - matrix = [[6,5,4],[3,2,1]] + matrix = [[6, 5, 4], [3, 2, 1]] self.assertEquals(max(matrix), 6) # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap @@ -76,7 +76,7 @@ class MatrixTestCase(unittest.TestCase): "Test max function with bad list" print(self.typeStr, "... ", end=' ', file=sys.stderr) max = Matrix.__dict__[self.typeStr + "Max"] - matrix = [[6,"five",4], ["three", 2, "one"]] + matrix = [[6, "five", 4], ["three", 2, "one"]] self.assertRaises(BadListError, max, matrix) # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap @@ -98,7 +98,7 @@ class MatrixTestCase(unittest.TestCase): "Test min function" print(self.typeStr, "... ", end=' ', file=sys.stderr) min = Matrix.__dict__[self.typeStr + "Min"] - matrix = [[9,8],[7,6],[5,4]] + matrix = [[9, 8], [7, 6], [5, 4]] self.assertEquals(min(matrix), 4) # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap @@ -106,7 +106,7 @@ class MatrixTestCase(unittest.TestCase): "Test min function with bad list" print(self.typeStr, "... ", end=' ', file=sys.stderr) min = Matrix.__dict__[self.typeStr + "Min"] - matrix = [["nine","eight"], ["seven","six"]] + matrix = [["nine", "eight"], ["seven", "six"]] self.assertRaises(BadListError, min, matrix) # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap @@ -114,7 +114,7 @@ class MatrixTestCase(unittest.TestCase): "Test min function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) min = Matrix.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, [1,3,5,7,9]) + self.assertRaises(TypeError, min, [1, 3, 5, 7, 9]) # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap def testMinNonContainer(self): @@ -128,16 +128,16 @@ class MatrixTestCase(unittest.TestCase): "Test scale function" print(self.typeStr, "... ", end=' ', file=sys.stderr) scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],self.typeCode) - scale(matrix,4) - self.assertEquals((matrix == [[4,8,12],[8,4,8],[12,8,4]]).all(), True) + matrix = np.array([[1, 2, 3], [2, 1, 2], [3, 2, 1]], self.typeCode) + scale(matrix, 4) + self.assertEquals((matrix == [[4, 8, 12], [8, 4, 8], [12, 8, 4]]).all(), True) # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap def testScaleWrongDim(self): "Test scale function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([1,2,2,1],self.typeCode) + matrix = np.array([1, 2, 2, 1], self.typeCode) self.assertRaises(TypeError, scale, matrix) # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap @@ -145,7 +145,7 @@ class MatrixTestCase(unittest.TestCase): "Test scale function with wrong size" print(self.typeStr, "... ", end=' ', file=sys.stderr) scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([[1,2],[2,1]],self.typeCode) + matrix = np.array([[1, 2], [2, 1]], self.typeCode) self.assertRaises(TypeError, scale, matrix) # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap @@ -153,7 +153,7 @@ class MatrixTestCase(unittest.TestCase): "Test scale function with wrong type" print(self.typeStr, "... ", end=' ', file=sys.stderr) scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],'c') + matrix = np.array([[1, 2, 3], [2, 1, 2], [3, 2, 1]], 'c') self.assertRaises(TypeError, scale, matrix) # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap @@ -161,7 +161,7 @@ class MatrixTestCase(unittest.TestCase): "Test scale function with non-array" print(self.typeStr, "... ", end=' ', file=sys.stderr) scale = Matrix.__dict__[self.typeStr + "Scale"] - matrix = [[1,2,3],[2,1,2],[3,2,1]] + matrix = [[1, 2, 3], [2, 1, 2], [3, 2, 1]] self.assertRaises(TypeError, scale, matrix) # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap @@ -169,16 +169,16 @@ class MatrixTestCase(unittest.TestCase): "Test floor function" print(self.typeStr, "... ", end=' ', file=sys.stderr) floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = np.array([[6,7],[8,9]],self.typeCode) - floor(matrix,7) - np.testing.assert_array_equal(matrix, np.array([[7,7],[8,9]])) + matrix = np.array([[6, 7], [8, 9]], self.typeCode) + floor(matrix, 7) + np.testing.assert_array_equal(matrix, np.array([[7, 7], [8, 9]])) # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap def testFloorWrongDim(self): "Test floor function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = np.array([6,7,8,9],self.typeCode) + matrix = np.array([6, 7, 8, 9], self.typeCode) self.assertRaises(TypeError, floor, matrix) # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap @@ -186,7 +186,7 @@ class MatrixTestCase(unittest.TestCase): "Test floor function with wrong type" print(self.typeStr, "... ", end=' ', file=sys.stderr) floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = np.array([[6,7], [8,9]],'c') + matrix = np.array([[6, 7], [8, 9]], 'c') self.assertRaises(TypeError, floor, matrix) # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap @@ -194,7 +194,7 @@ class MatrixTestCase(unittest.TestCase): "Test floor function with non-array" print(self.typeStr, "... ", end=' ', file=sys.stderr) floor = Matrix.__dict__[self.typeStr + "Floor"] - matrix = [[6,7], [8,9]] + matrix = [[6, 7], [8, 9]] self.assertRaises(TypeError, floor, matrix) # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap @@ -202,16 +202,16 @@ class MatrixTestCase(unittest.TestCase): "Test ceil function" print(self.typeStr, "... ", end=' ', file=sys.stderr) ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = np.array([[1,2],[3,4]],self.typeCode) - ceil(matrix,3) - np.testing.assert_array_equal(matrix, np.array([[1,2],[3,3]])) + matrix = np.array([[1, 2], [3, 4]], self.typeCode) + ceil(matrix, 3) + np.testing.assert_array_equal(matrix, np.array([[1, 2], [3, 3]])) # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap def testCeilWrongDim(self): "Test ceil function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = np.array([1,2,3,4],self.typeCode) + matrix = np.array([1, 2, 3, 4], self.typeCode) self.assertRaises(TypeError, ceil, matrix) # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap @@ -219,7 +219,7 @@ class MatrixTestCase(unittest.TestCase): "Test ceil function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = np.array([[1,2], [3,4]],'c') + matrix = np.array([[1, 2], [3, 4]], 'c') self.assertRaises(TypeError, ceil, matrix) # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap @@ -227,7 +227,7 @@ class MatrixTestCase(unittest.TestCase): "Test ceil function with non-array" print(self.typeStr, "... ", end=' ', file=sys.stderr) ceil = Matrix.__dict__[self.typeStr + "Ceil"] - matrix = [[1,2], [3,4]] + matrix = [[1, 2], [3, 4]] self.assertRaises(TypeError, ceil, matrix) # Test (type ARGOUT_ARRAY2[ANY][ANY]) typemap @@ -235,9 +235,9 @@ class MatrixTestCase(unittest.TestCase): "Test luSplit function" print(self.typeStr, "... ", end=' ', file=sys.stderr) luSplit = Matrix.__dict__[self.typeStr + "LUSplit"] - lower, upper = luSplit([[1,2,3],[4,5,6],[7,8,9]]) - self.assertEquals((lower == [[1,0,0],[4,5,0],[7,8,9]]).all(), True) - self.assertEquals((upper == [[0,2,3],[0,0,6],[0,0,0]]).all(), True) + lower, upper = luSplit([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + self.assertEquals((lower == [[1, 0, 0], [4, 5, 0], [7, 8, 9]]).all(), True) + self.assertEquals((upper == [[0, 2, 3], [0, 0, 6], [0, 0, 0]]).all(), True) ###################################################################### diff --git a/doc/swig/test/testSuperTensor.py b/doc/swig/test/testSuperTensor.py index f4ae09e76..ff1f86df2 100644 --- a/doc/swig/test/testSuperTensor.py +++ b/doc/swig/test/testSuperTensor.py @@ -30,10 +30,10 @@ class SuperTensorTestCase(unittest.TestCase): "Test norm function" print >>sys.stderr, self.typeStr, "... ", norm = SuperTensor.__dict__[self.typeStr + "Norm"] - supertensor = np.arange(2*2*2*2,dtype=self.typeCode).reshape((2,2,2,2)) + supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2)) #Note: cludge to get an answer of the same type as supertensor. #Answer is simply sqrt(sum(supertensor*supertensor)/16) - answer = np.array([np.sqrt(np.sum(supertensor.astype('d')*supertensor)/16.)],dtype=self.typeCode)[0] + answer = np.array([np.sqrt(np.sum(supertensor.astype('d')*supertensor)/16.)], dtype=self.typeCode)[0] self.assertAlmostEqual(norm(supertensor), answer, 6) # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap @@ -41,7 +41,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test norm function with bad list" print >>sys.stderr, self.typeStr, "... ", norm = SuperTensor.__dict__[self.typeStr + "Norm"] - supertensor = [[[[0,"one"],[2,3]], [[3,"two"],[1,0]]],[[[0,"one"],[2,3]], [[3,"two"],[1,0]]]] + supertensor = [[[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]], [[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]]] self.assertRaises(BadListError, norm, supertensor) # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap @@ -49,7 +49,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test norm function with wrong dimensions" print >>sys.stderr, self.typeStr, "... ", norm = SuperTensor.__dict__[self.typeStr + "Norm"] - supertensor = np.arange(2*2*2,dtype=self.typeCode).reshape((2,2,2)) + supertensor = np.arange(2*2*2, dtype=self.typeCode).reshape((2, 2, 2)) self.assertRaises(TypeError, norm, supertensor) # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap @@ -57,7 +57,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test norm function with wrong size" print >>sys.stderr, self.typeStr, "... ", norm = SuperTensor.__dict__[self.typeStr + "Norm"] - supertensor = np.arange(3*2*2,dtype=self.typeCode).reshape((3,2,2)) + supertensor = np.arange(3*2*2, dtype=self.typeCode).reshape((3, 2, 2)) self.assertRaises(TypeError, norm, supertensor) # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap @@ -72,7 +72,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test max function" print >>sys.stderr, self.typeStr, "... ", max = SuperTensor.__dict__[self.typeStr + "Max"] - supertensor = [[[[1,2], [3,4]], [[5,6], [7,8]]],[[[1,2], [3,4]], [[5,6], [7,8]]]] + supertensor = [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]] self.assertEquals(max(supertensor), 8) # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap @@ -80,7 +80,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test max function with bad list" print >>sys.stderr, self.typeStr, "... ", max = SuperTensor.__dict__[self.typeStr + "Max"] - supertensor = [[[[1,"two"], [3,4]], [[5,"six"], [7,8]]],[[[1,"two"], [3,4]], [[5,"six"], [7,8]]]] + supertensor = [[[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]], [[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]]] self.assertRaises(BadListError, max, supertensor) # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap @@ -102,7 +102,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test min function" print >>sys.stderr, self.typeStr, "... ", min = SuperTensor.__dict__[self.typeStr + "Min"] - supertensor = [[[[9,8], [7,6]], [[5,4], [3,2]]],[[[9,8], [7,6]], [[5,4], [3,2]]]] + supertensor = [[[[9, 8], [7, 6]], [[5, 4], [3, 2]]], [[[9, 8], [7, 6]], [[5, 4], [3, 2]]]] self.assertEquals(min(supertensor), 2) # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap @@ -110,7 +110,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test min function with bad list" print >>sys.stderr, self.typeStr, "... ", min = SuperTensor.__dict__[self.typeStr + "Min"] - supertensor = [[[["nine",8], [7,6]], [["five",4], [3,2]]],[[["nine",8], [7,6]], [["five",4], [3,2]]]] + supertensor = [[[["nine", 8], [7, 6]], [["five", 4], [3, 2]]], [[["nine", 8], [7, 6]], [["five", 4], [3, 2]]]] self.assertRaises(BadListError, min, supertensor) # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap @@ -125,16 +125,16 @@ class SuperTensorTestCase(unittest.TestCase): "Test min function with wrong dimensions" print >>sys.stderr, self.typeStr, "... ", min = SuperTensor.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, [[1,3],[5,7]]) + self.assertRaises(TypeError, min, [[1, 3], [5, 7]]) # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap def testScale(self): "Test scale function" print >>sys.stderr, self.typeStr, "... ", scale = SuperTensor.__dict__[self.typeStr + "Scale"] - supertensor = np.arange(3*3*3*3,dtype=self.typeCode).reshape((3,3,3,3)) + supertensor = np.arange(3*3*3*3, dtype=self.typeCode).reshape((3, 3, 3, 3)) answer = supertensor.copy()*4 - scale(supertensor,4) + scale(supertensor, 4) self.assertEquals((supertensor == answer).all(), True) # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap @@ -142,9 +142,9 @@ class SuperTensorTestCase(unittest.TestCase): "Test scale function with wrong type" print >>sys.stderr, self.typeStr, "... ", scale = SuperTensor.__dict__[self.typeStr + "Scale"] - supertensor = np.array([[[1,0,1], [0,1,0], [1,0,1]], - [[0,1,0], [1,0,1], [0,1,0]], - [[1,0,1], [0,1,0], [1,0,1]]],'c') + supertensor = np.array([[[1, 0, 1], [0, 1, 0], [1, 0, 1]], + [[0, 1, 0], [1, 0, 1], [0, 1, 0]], + [[1, 0, 1], [0, 1, 0], [1, 0, 1]]], 'c') self.assertRaises(TypeError, scale, supertensor) # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap @@ -152,8 +152,8 @@ class SuperTensorTestCase(unittest.TestCase): "Test scale function with wrong dimensions" print >>sys.stderr, self.typeStr, "... ", scale = SuperTensor.__dict__[self.typeStr + "Scale"] - supertensor = np.array([[1,0,1], [0,1,0], [1,0,1], - [0,1,0], [1,0,1], [0,1,0]],self.typeCode) + supertensor = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1], + [0, 1, 0], [1, 0, 1], [0, 1, 0]], self.typeCode) self.assertRaises(TypeError, scale, supertensor) # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap @@ -161,9 +161,9 @@ class SuperTensorTestCase(unittest.TestCase): "Test scale function with wrong size" print >>sys.stderr, self.typeStr, "... ", scale = SuperTensor.__dict__[self.typeStr + "Scale"] - supertensor = np.array([[[1,0], [0,1], [1,0]], - [[0,1], [1,0], [0,1]], - [[1,0], [0,1], [1,0]]],self.typeCode) + supertensor = np.array([[[1, 0], [0, 1], [1, 0]], + [[0, 1], [1, 0], [0, 1]], + [[1, 0], [0, 1], [1, 0]]], self.typeCode) self.assertRaises(TypeError, scale, supertensor) # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap @@ -177,12 +177,12 @@ class SuperTensorTestCase(unittest.TestCase): def testFloor(self): "Test floor function" print >>sys.stderr, self.typeStr, "... ", - supertensor = np.arange(2*2*2*2,dtype=self.typeCode).reshape((2,2,2,2)) + supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2)) answer = supertensor.copy() answer[answer < 4] = 4 floor = SuperTensor.__dict__[self.typeStr + "Floor"] - floor(supertensor,4) + floor(supertensor, 4) np.testing.assert_array_equal(supertensor, answer) # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap @@ -190,7 +190,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test floor function with wrong type" print >>sys.stderr, self.typeStr, "... ", floor = SuperTensor.__dict__[self.typeStr + "Floor"] - supertensor = np.ones(2*2*2*2,dtype='c').reshape((2,2,2,2)) + supertensor = np.ones(2*2*2*2, dtype='c').reshape((2, 2, 2, 2)) self.assertRaises(TypeError, floor, supertensor) # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap @@ -198,7 +198,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test floor function with wrong type" print >>sys.stderr, self.typeStr, "... ", floor = SuperTensor.__dict__[self.typeStr + "Floor"] - supertensor = np.arange(2*2*2,dtype=self.typeCode).reshape((2,2,2)) + supertensor = np.arange(2*2*2, dtype=self.typeCode).reshape((2, 2, 2)) self.assertRaises(TypeError, floor, supertensor) # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap @@ -212,11 +212,11 @@ class SuperTensorTestCase(unittest.TestCase): def testCeil(self): "Test ceil function" print >>sys.stderr, self.typeStr, "... ", - supertensor = np.arange(2*2*2*2,dtype=self.typeCode).reshape((2,2,2,2)) + supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2)) answer = supertensor.copy() answer[answer > 5] = 5 ceil = SuperTensor.__dict__[self.typeStr + "Ceil"] - ceil(supertensor,5) + ceil(supertensor, 5) np.testing.assert_array_equal(supertensor, answer) # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap @@ -224,7 +224,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test ceil function with wrong type" print >>sys.stderr, self.typeStr, "... ", ceil = SuperTensor.__dict__[self.typeStr + "Ceil"] - supertensor = np.ones(2*2*2*2,'c').reshape((2,2,2,2)) + supertensor = np.ones(2*2*2*2, 'c').reshape((2, 2, 2, 2)) self.assertRaises(TypeError, ceil, supertensor) # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap @@ -232,7 +232,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test ceil function with wrong dimensions" print >>sys.stderr, self.typeStr, "... ", ceil = SuperTensor.__dict__[self.typeStr + "Ceil"] - supertensor = np.arange(2*2*2,dtype=self.typeCode).reshape((2,2,2)) + supertensor = np.arange(2*2*2, dtype=self.typeCode).reshape((2, 2, 2)) self.assertRaises(TypeError, ceil, supertensor) # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap @@ -240,7 +240,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test ceil function with non-array" print >>sys.stderr, self.typeStr, "... ", ceil = SuperTensor.__dict__[self.typeStr + "Ceil"] - supertensor = np.arange(2*2*2*2,dtype=self.typeCode).reshape((2,2,2,2)).tolist() + supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2)).tolist() self.assertRaises(TypeError, ceil, supertensor) # Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap @@ -248,7 +248,7 @@ class SuperTensorTestCase(unittest.TestCase): "Test luSplit function" print >>sys.stderr, self.typeStr, "... ", luSplit = SuperTensor.__dict__[self.typeStr + "LUSplit"] - supertensor = np.ones(2*2*2*2,dtype=self.typeCode).reshape((2,2,2,2)) + supertensor = np.ones(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2)) answer_upper = [[[[0, 0], [0, 1]], [[0, 1], [1, 1]]], [[[0, 1], [1, 1]], [[1, 1], [1, 1]]]] answer_lower = [[[[1, 1], [1, 0]], [[1, 0], [0, 0]]], [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]] lower, upper = luSplit(supertensor) diff --git a/doc/swig/test/testTensor.py b/doc/swig/test/testTensor.py index b6dd2e98a..a9390ebb1 100755 --- a/doc/swig/test/testTensor.py +++ b/doc/swig/test/testTensor.py @@ -31,8 +31,8 @@ class TensorTestCase(unittest.TestCase): "Test norm function" print(self.typeStr, "... ", end=' ', file=sys.stderr) norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,1], [2,3]], - [[3,2], [1,0]]] + tensor = [[[0, 1], [2, 3]], + [[3, 2], [1, 0]]] if isinstance(self.result, int): self.assertEquals(norm(tensor), self.result) else: @@ -43,8 +43,8 @@ class TensorTestCase(unittest.TestCase): "Test norm function with bad list" print(self.typeStr, "... ", end=' ', file=sys.stderr) norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,"one"],[2,3]], - [[3,"two"],[1,0]]] + tensor = [[[0, "one"], [2, 3]], + [[3, "two"], [1, 0]]] self.assertRaises(BadListError, norm, tensor) # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap @@ -52,8 +52,8 @@ class TensorTestCase(unittest.TestCase): "Test norm function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[0,1,2,3], - [3,2,1,0]] + tensor = [[0, 1, 2, 3], + [3, 2, 1, 0]] self.assertRaises(TypeError, norm, tensor) # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap @@ -61,8 +61,8 @@ class TensorTestCase(unittest.TestCase): "Test norm function with wrong size" print(self.typeStr, "... ", end=' ', file=sys.stderr) norm = Tensor.__dict__[self.typeStr + "Norm"] - tensor = [[[0,1,0], [2,3,2]], - [[3,2,3], [1,0,1]]] + tensor = [[[0, 1, 0], [2, 3, 2]], + [[3, 2, 3], [1, 0, 1]]] self.assertRaises(TypeError, norm, tensor) # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap @@ -77,8 +77,8 @@ class TensorTestCase(unittest.TestCase): "Test max function" print(self.typeStr, "... ", end=' ', file=sys.stderr) max = Tensor.__dict__[self.typeStr + "Max"] - tensor = [[[1,2], [3,4]], - [[5,6], [7,8]]] + tensor = [[[1, 2], [3, 4]], + [[5, 6], [7, 8]]] self.assertEquals(max(tensor), 8) # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap @@ -86,8 +86,8 @@ class TensorTestCase(unittest.TestCase): "Test max function with bad list" print(self.typeStr, "... ", end=' ', file=sys.stderr) max = Tensor.__dict__[self.typeStr + "Max"] - tensor = [[[1,"two"], [3,4]], - [[5,"six"], [7,8]]] + tensor = [[[1, "two"], [3, 4]], + [[5, "six"], [7, 8]]] self.assertRaises(BadListError, max, tensor) # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap @@ -109,8 +109,8 @@ class TensorTestCase(unittest.TestCase): "Test min function" print(self.typeStr, "... ", end=' ', file=sys.stderr) min = Tensor.__dict__[self.typeStr + "Min"] - tensor = [[[9,8], [7,6]], - [[5,4], [3,2]]] + tensor = [[[9, 8], [7, 6]], + [[5, 4], [3, 2]]] self.assertEquals(min(tensor), 2) # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap @@ -118,8 +118,8 @@ class TensorTestCase(unittest.TestCase): "Test min function with bad list" print(self.typeStr, "... ", end=' ', file=sys.stderr) min = Tensor.__dict__[self.typeStr + "Min"] - tensor = [[["nine",8], [7,6]], - [["five",4], [3,2]]] + tensor = [[["nine", 8], [7, 6]], + [["five", 4], [3, 2]]] self.assertRaises(BadListError, min, tensor) # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap @@ -134,29 +134,29 @@ class TensorTestCase(unittest.TestCase): "Test min function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) min = Tensor.__dict__[self.typeStr + "Min"] - self.assertRaises(TypeError, min, [[1,3],[5,7]]) + self.assertRaises(TypeError, min, [[1, 3], [5, 7]]) # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap def testScale(self): "Test scale function" print(self.typeStr, "... ", end=' ', file=sys.stderr) scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]], - [[0,1,0], [1,0,1], [0,1,0]], - [[1,0,1], [0,1,0], [1,0,1]]],self.typeCode) - scale(tensor,4) - self.assertEquals((tensor == [[[4,0,4], [0,4,0], [4,0,4]], - [[0,4,0], [4,0,4], [0,4,0]], - [[4,0,4], [0,4,0], [4,0,4]]]).all(), True) + tensor = np.array([[[1, 0, 1], [0, 1, 0], [1, 0, 1]], + [[0, 1, 0], [1, 0, 1], [0, 1, 0]], + [[1, 0, 1], [0, 1, 0], [1, 0, 1]]], self.typeCode) + scale(tensor, 4) + self.assertEquals((tensor == [[[4, 0, 4], [0, 4, 0], [4, 0, 4]], + [[0, 4, 0], [4, 0, 4], [0, 4, 0]], + [[4, 0, 4], [0, 4, 0], [4, 0, 4]]]).all(), True) # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap def testScaleWrongType(self): "Test scale function with wrong type" print(self.typeStr, "... ", end=' ', file=sys.stderr) scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]], - [[0,1,0], [1,0,1], [0,1,0]], - [[1,0,1], [0,1,0], [1,0,1]]],'c') + tensor = np.array([[[1, 0, 1], [0, 1, 0], [1, 0, 1]], + [[0, 1, 0], [1, 0, 1], [0, 1, 0]], + [[1, 0, 1], [0, 1, 0], [1, 0, 1]]], 'c') self.assertRaises(TypeError, scale, tensor) # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap @@ -164,8 +164,8 @@ class TensorTestCase(unittest.TestCase): "Test scale function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[1,0,1], [0,1,0], [1,0,1], - [0,1,0], [1,0,1], [0,1,0]],self.typeCode) + tensor = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1], + [0, 1, 0], [1, 0, 1], [0, 1, 0]], self.typeCode) self.assertRaises(TypeError, scale, tensor) # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap @@ -173,9 +173,9 @@ class TensorTestCase(unittest.TestCase): "Test scale function with wrong size" print(self.typeStr, "... ", end=' ', file=sys.stderr) scale = Tensor.__dict__[self.typeStr + "Scale"] - tensor = np.array([[[1,0], [0,1], [1,0]], - [[0,1], [1,0], [0,1]], - [[1,0], [0,1], [1,0]]],self.typeCode) + tensor = np.array([[[1, 0], [0, 1], [1, 0]], + [[0, 1], [1, 0], [0, 1]], + [[1, 0], [0, 1], [1, 0]]], self.typeCode) self.assertRaises(TypeError, scale, tensor) # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap @@ -190,19 +190,19 @@ class TensorTestCase(unittest.TestCase): "Test floor function" print(self.typeStr, "... ", end=' ', file=sys.stderr) floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = np.array([[[1,2], [3,4]], - [[5,6], [7,8]]],self.typeCode) - floor(tensor,4) - np.testing.assert_array_equal(tensor, np.array([[[4,4], [4,4]], - [[5,6], [7,8]]])) + tensor = np.array([[[1, 2], [3, 4]], + [[5, 6], [7, 8]]], self.typeCode) + floor(tensor, 4) + np.testing.assert_array_equal(tensor, np.array([[[4, 4], [4, 4]], + [[5, 6], [7, 8]]])) # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap def testFloorWrongType(self): "Test floor function with wrong type" print(self.typeStr, "... ", end=' ', file=sys.stderr) floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = np.array([[[1,2], [3,4]], - [[5,6], [7,8]]],'c') + tensor = np.array([[[1, 2], [3, 4]], + [[5, 6], [7, 8]]], 'c') self.assertRaises(TypeError, floor, tensor) # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap @@ -210,7 +210,7 @@ class TensorTestCase(unittest.TestCase): "Test floor function with wrong type" print(self.typeStr, "... ", end=' ', file=sys.stderr) floor = Tensor.__dict__[self.typeStr + "Floor"] - tensor = np.array([[1,2], [3,4], [5,6], [7,8]],self.typeCode) + tensor = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], self.typeCode) self.assertRaises(TypeError, floor, tensor) # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap @@ -225,19 +225,19 @@ class TensorTestCase(unittest.TestCase): "Test ceil function" print(self.typeStr, "... ", end=' ', file=sys.stderr) ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = np.array([[[9,8], [7,6]], - [[5,4], [3,2]]],self.typeCode) - ceil(tensor,5) - np.testing.assert_array_equal(tensor, np.array([[[5,5], [5,5]], - [[5,4], [3,2]]])) + tensor = np.array([[[9, 8], [7, 6]], + [[5, 4], [3, 2]]], self.typeCode) + ceil(tensor, 5) + np.testing.assert_array_equal(tensor, np.array([[[5, 5], [5, 5]], + [[5, 4], [3, 2]]])) # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap def testCeilWrongType(self): "Test ceil function with wrong type" print(self.typeStr, "... ", end=' ', file=sys.stderr) ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = np.array([[[9,8], [7,6]], - [[5,4], [3,2]]],'c') + tensor = np.array([[[9, 8], [7, 6]], + [[5, 4], [3, 2]]], 'c') self.assertRaises(TypeError, ceil, tensor) # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap @@ -245,7 +245,7 @@ class TensorTestCase(unittest.TestCase): "Test ceil function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = np.array([[9,8], [7,6], [5,4], [3,2]], self.typeCode) + tensor = np.array([[9, 8], [7, 6], [5, 4], [3, 2]], self.typeCode) self.assertRaises(TypeError, ceil, tensor) # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap @@ -253,8 +253,8 @@ class TensorTestCase(unittest.TestCase): "Test ceil function with non-array" print(self.typeStr, "... ", end=' ', file=sys.stderr) ceil = Tensor.__dict__[self.typeStr + "Ceil"] - tensor = [[[9,8], [7,6]], - [[5,4], [3,2]]] + tensor = [[[9, 8], [7, 6]], + [[5, 4], [3, 2]]] self.assertRaises(TypeError, ceil, tensor) # Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap @@ -262,12 +262,12 @@ class TensorTestCase(unittest.TestCase): "Test luSplit function" print(self.typeStr, "... ", end=' ', file=sys.stderr) luSplit = Tensor.__dict__[self.typeStr + "LUSplit"] - lower, upper = luSplit([[[1,1], [1,1]], - [[1,1], [1,1]]]) - self.assertEquals((lower == [[[1,1], [1,0]], - [[1,0], [0,0]]]).all(), True) - self.assertEquals((upper == [[[0,0], [0,1]], - [[0,1], [1,1]]]).all(), True) + lower, upper = luSplit([[[1, 1], [1, 1]], + [[1, 1], [1, 1]]]) + self.assertEquals((lower == [[[1, 1], [1, 0]], + [[1, 0], [0, 0]]]).all(), True) + self.assertEquals((upper == [[[0, 0], [0, 1]], + [[0, 1], [1, 1]]]).all(), True) ###################################################################### diff --git a/doc/swig/test/testVector.py b/doc/swig/test/testVector.py index d644e464c..e7d019cf7 100755 --- a/doc/swig/test/testVector.py +++ b/doc/swig/test/testVector.py @@ -50,7 +50,7 @@ class VectorTestCase(unittest.TestCase): "Test length function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) length = Vector.__dict__[self.typeStr + "Length"] - self.assertRaises(TypeError, length, [[1,2], [3,4]]) + self.assertRaises(TypeError, length, [[1, 2], [3, 4]]) # Test the (type IN_ARRAY1[ANY]) typemap def testLengthNonContainer(self): @@ -64,21 +64,21 @@ class VectorTestCase(unittest.TestCase): "Test prod function" print(self.typeStr, "... ", end=' ', file=sys.stderr) prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertEquals(prod([1,2,3,4]), 24) + self.assertEquals(prod([1, 2, 3, 4]), 24) # Test the (type* IN_ARRAY1, int DIM1) typemap def testProdBadList(self): "Test prod function with bad list" print(self.typeStr, "... ", end=' ', file=sys.stderr) prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertRaises(BadListError, prod, [[1,"two"], ["e","pi"]]) + self.assertRaises(BadListError, prod, [[1, "two"], ["e", "pi"]]) # Test the (type* IN_ARRAY1, int DIM1) typemap def testProdWrongDim(self): "Test prod function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) prod = Vector.__dict__[self.typeStr + "Prod"] - self.assertRaises(TypeError, prod, [[1,2], [8,9]]) + self.assertRaises(TypeError, prod, [[1, 2], [8, 9]]) # Test the (type* IN_ARRAY1, int DIM1) typemap def testProdNonContainer(self): @@ -92,21 +92,21 @@ class VectorTestCase(unittest.TestCase): "Test sum function" print(self.typeStr, "... ", end=' ', file=sys.stderr) sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertEquals(sum([5,6,7,8]), 26) + self.assertEquals(sum([5, 6, 7, 8]), 26) # Test the (int DIM1, type* IN_ARRAY1) typemap def testSumBadList(self): "Test sum function with bad list" print(self.typeStr, "... ", end=' ', file=sys.stderr) sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertRaises(BadListError, sum, [3,4, 5, "pi"]) + self.assertRaises(BadListError, sum, [3, 4, 5, "pi"]) # Test the (int DIM1, type* IN_ARRAY1) typemap def testSumWrongDim(self): "Test sum function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) sum = Vector.__dict__[self.typeStr + "Sum"] - self.assertRaises(TypeError, sum, [[3,4], [5,6]]) + self.assertRaises(TypeError, sum, [[3, 4], [5, 6]]) # Test the (int DIM1, type* IN_ARRAY1) typemap def testSumNonContainer(self): @@ -120,16 +120,16 @@ class VectorTestCase(unittest.TestCase): "Test reverse function" print(self.typeStr, "... ", end=' ', file=sys.stderr) reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([1,2,4],self.typeCode) + vector = np.array([1, 2, 4], self.typeCode) reverse(vector) - self.assertEquals((vector == [4,2,1]).all(), True) + self.assertEquals((vector == [4, 2, 1]).all(), True) # Test the (type INPLACE_ARRAY1[ANY]) typemap def testReverseWrongDim(self): "Test reverse function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([[1,2], [3,4]],self.typeCode) + vector = np.array([[1, 2], [3, 4]], self.typeCode) self.assertRaises(TypeError, reverse, vector) # Test the (type INPLACE_ARRAY1[ANY]) typemap @@ -137,7 +137,7 @@ class VectorTestCase(unittest.TestCase): "Test reverse function with wrong size" print(self.typeStr, "... ", end=' ', file=sys.stderr) reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([9,8,7,6,5,4],self.typeCode) + vector = np.array([9, 8, 7, 6, 5, 4], self.typeCode) self.assertRaises(TypeError, reverse, vector) # Test the (type INPLACE_ARRAY1[ANY]) typemap @@ -145,7 +145,7 @@ class VectorTestCase(unittest.TestCase): "Test reverse function with wrong type" print(self.typeStr, "... ", end=' ', file=sys.stderr) reverse = Vector.__dict__[self.typeStr + "Reverse"] - vector = np.array([1,2,4],'c') + vector = np.array([1, 2, 4], 'c') self.assertRaises(TypeError, reverse, vector) # Test the (type INPLACE_ARRAY1[ANY]) typemap @@ -153,23 +153,23 @@ class VectorTestCase(unittest.TestCase): "Test reverse function with non-array" print(self.typeStr, "... ", end=' ', file=sys.stderr) reverse = Vector.__dict__[self.typeStr + "Reverse"] - self.assertRaises(TypeError, reverse, [2,4,6]) + self.assertRaises(TypeError, reverse, [2, 4, 6]) # Test the (type* INPLACE_ARRAY1, int DIM1) typemap def testOnes(self): "Test ones function" print(self.typeStr, "... ", end=' ', file=sys.stderr) ones = Vector.__dict__[self.typeStr + "Ones"] - vector = np.zeros(5,self.typeCode) + vector = np.zeros(5, self.typeCode) ones(vector) - np.testing.assert_array_equal(vector, np.array([1,1,1,1,1])) + np.testing.assert_array_equal(vector, np.array([1, 1, 1, 1, 1])) # Test the (type* INPLACE_ARRAY1, int DIM1) typemap def testOnesWrongDim(self): "Test ones function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) ones = Vector.__dict__[self.typeStr + "Ones"] - vector = np.zeros((5,5),self.typeCode) + vector = np.zeros((5, 5), self.typeCode) self.assertRaises(TypeError, ones, vector) # Test the (type* INPLACE_ARRAY1, int DIM1) typemap @@ -177,7 +177,7 @@ class VectorTestCase(unittest.TestCase): "Test ones function with wrong type" print(self.typeStr, "... ", end=' ', file=sys.stderr) ones = Vector.__dict__[self.typeStr + "Ones"] - vector = np.zeros((5,5),'c') + vector = np.zeros((5, 5), 'c') self.assertRaises(TypeError, ones, vector) # Test the (type* INPLACE_ARRAY1, int DIM1) typemap @@ -185,23 +185,23 @@ class VectorTestCase(unittest.TestCase): "Test ones function with non-array" print(self.typeStr, "... ", end=' ', file=sys.stderr) ones = Vector.__dict__[self.typeStr + "Ones"] - self.assertRaises(TypeError, ones, [2,4,6,8]) + self.assertRaises(TypeError, ones, [2, 4, 6, 8]) # Test the (int DIM1, type* INPLACE_ARRAY1) typemap def testZeros(self): "Test zeros function" print(self.typeStr, "... ", end=' ', file=sys.stderr) zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = np.ones(5,self.typeCode) + vector = np.ones(5, self.typeCode) zeros(vector) - np.testing.assert_array_equal(vector, np.array([0,0,0,0,0])) + np.testing.assert_array_equal(vector, np.array([0, 0, 0, 0, 0])) # Test the (int DIM1, type* INPLACE_ARRAY1) typemap def testZerosWrongDim(self): "Test zeros function with wrong dimensions" print(self.typeStr, "... ", end=' ', file=sys.stderr) zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = np.ones((5,5),self.typeCode) + vector = np.ones((5, 5), self.typeCode) self.assertRaises(TypeError, zeros, vector) # Test the (int DIM1, type* INPLACE_ARRAY1) typemap @@ -209,7 +209,7 @@ class VectorTestCase(unittest.TestCase): "Test zeros function with wrong type" print(self.typeStr, "... ", end=' ', file=sys.stderr) zeros = Vector.__dict__[self.typeStr + "Zeros"] - vector = np.ones(6,'c') + vector = np.ones(6, 'c') self.assertRaises(TypeError, zeros, vector) # Test the (int DIM1, type* INPLACE_ARRAY1) typemap @@ -217,16 +217,16 @@ class VectorTestCase(unittest.TestCase): "Test zeros function with non-array" print(self.typeStr, "... ", end=' ', file=sys.stderr) zeros = Vector.__dict__[self.typeStr + "Zeros"] - self.assertRaises(TypeError, zeros, [1,3,5,7,9]) + self.assertRaises(TypeError, zeros, [1, 3, 5, 7, 9]) # Test the (type ARGOUT_ARRAY1[ANY]) typemap def testEOSplit(self): "Test eoSplit function" print(self.typeStr, "... ", end=' ', file=sys.stderr) eoSplit = Vector.__dict__[self.typeStr + "EOSplit"] - even, odd = eoSplit([1,2,3]) - self.assertEquals((even == [1,0,3]).all(), True) - self.assertEquals((odd == [0,2,0]).all(), True) + even, odd = eoSplit([1, 2, 3]) + self.assertEquals((even == [1, 0, 3]).all(), True) + self.assertEquals((odd == [0, 2, 0]).all(), True) # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap def testTwos(self): @@ -234,7 +234,7 @@ class VectorTestCase(unittest.TestCase): print(self.typeStr, "... ", end=' ', file=sys.stderr) twos = Vector.__dict__[self.typeStr + "Twos"] vector = twos(5) - self.assertEquals((vector == [2,2,2,2,2]).all(), True) + self.assertEquals((vector == [2, 2, 2, 2, 2]).all(), True) # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap def testTwosNonInt(self): @@ -249,7 +249,7 @@ class VectorTestCase(unittest.TestCase): print(self.typeStr, "... ", end=' ', file=sys.stderr) threes = Vector.__dict__[self.typeStr + "Threes"] vector = threes(6) - self.assertEquals((vector == [3,3,3,3,3,3]).all(), True) + self.assertEquals((vector == [3, 3, 3, 3, 3, 3]).all(), True) # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap def testThreesNonInt(self): diff --git a/doc/ufuncs.rst.txt b/doc/ufuncs.rst.txt index fa107cc21..d628b3f95 100644 --- a/doc/ufuncs.rst.txt +++ b/doc/ufuncs.rst.txt @@ -96,8 +96,3 @@ If there are object arrays involved then loop->obj gets set to 1. Then there ar - The buffer[i] memory receives the PyObject input after the cast. This is a new reference which will be "stolen" as it is copied over into memory. The only problem is that what is presently in memory must be DECREF'd first. - - - - - diff --git a/numpy/__init__.py b/numpy/__init__.py index b4be4d707..532ec14c8 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -195,4 +195,3 @@ else: import warnings warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") - diff --git a/numpy/_import_tools.py b/numpy/_import_tools.py index ba9e021c7..3343f6c05 100644 --- a/numpy/_import_tools.py +++ b/numpy/_import_tools.py @@ -15,14 +15,14 @@ class PackageLoader(object): else: _level = 1 self.parent_frame = frame = sys._getframe(_level) - self.parent_name = eval('__name__',frame.f_globals,frame.f_locals) - parent_path = eval('__path__',frame.f_globals,frame.f_locals) + self.parent_name = eval('__name__', frame.f_globals, frame.f_locals) + parent_path = eval('__path__', frame.f_globals, frame.f_locals) if isinstance(parent_path, str): parent_path = [parent_path] self.parent_path = parent_path if '__all__' not in frame.f_locals: - exec('__all__ = []',frame.f_globals,frame.f_locals) - self.parent_export_names = eval('__all__',frame.f_globals,frame.f_locals) + exec('__all__ = []', frame.f_globals, frame.f_locals) + self.parent_export_names = eval('__all__', frame.f_globals, frame.f_locals) self.info_modules = {} self.imported_packages = [] @@ -32,17 +32,17 @@ class PackageLoader(object): """ Return list of (package name,info.py file) from parent_path subdirectories. """ from glob import glob - files = glob(os.path.join(parent_path,package_dir,'info.py')) - for info_file in glob(os.path.join(parent_path,package_dir,'info.pyc')): + files = glob(os.path.join(parent_path, package_dir, 'info.py')) + for info_file in glob(os.path.join(parent_path, package_dir, 'info.pyc')): if info_file[:-1] not in files: files.append(info_file) info_files = [] for info_file in files: package_name = os.path.dirname(info_file[len(parent_path)+1:])\ - .replace(os.sep,'.') + .replace(os.sep, '.') if parent_package: package_name = parent_package + '.' + package_name - info_files.append((package_name,info_file)) + info_files.append((package_name, info_file)) info_files.extend(self._get_info_files('*', os.path.dirname(info_file), package_name)) @@ -57,7 +57,7 @@ class PackageLoader(object): if packages is None: for path in self.parent_path: - info_files.extend(self._get_info_files('*',path)) + info_files.extend(self._get_info_files('*', path)) else: for package_name in packages: package_dir = os.path.join(*package_name.split('.')) @@ -73,30 +73,30 @@ class PackageLoader(object): except ImportError as msg: self.warn('No scipy-style subpackage %r found in %s. '\ 'Ignoring: %s'\ - % (package_name,':'.join(self.parent_path), msg)) + % (package_name, ':'.join(self.parent_path), msg)) - for package_name,info_file in info_files: + for package_name, info_file in info_files: if package_name in info_modules: continue fullname = self.parent_name +'.'+ package_name if info_file[-1]=='c': - filedescriptor = ('.pyc','rb',2) + filedescriptor = ('.pyc', 'rb', 2) else: - filedescriptor = ('.py','U',1) + filedescriptor = ('.py', 'U', 1) try: info_module = imp.load_module(fullname+'.info', - open(info_file,filedescriptor[1]), + open(info_file, filedescriptor[1]), info_file, filedescriptor) except Exception as msg: self.error(msg) info_module = None - if info_module is None or getattr(info_module,'ignore',False): - info_modules.pop(package_name,None) + if info_module is None or getattr(info_module, 'ignore', False): + info_modules.pop(package_name, None) else: - self._init_info_modules(getattr(info_module,'depends',[])) + self._init_info_modules(getattr(info_module, 'depends', [])) info_modules[package_name] = info_module return @@ -107,8 +107,8 @@ class PackageLoader(object): """ depend_dict = {} - for name,info_module in self.info_modules.items(): - depend_dict[name] = getattr(info_module,'depends',[]) + for name, info_module in self.info_modules.items(): + depend_dict[name] = getattr(info_module, 'depends', []) package_names = [] for name in depend_dict.keys(): @@ -165,10 +165,10 @@ class PackageLoader(object): """ frame = self.parent_frame self.info_modules = {} - if options.get('force',False): + if options.get('force', False): self.imported_packages = [] - self.verbose = verbose = options.get('verbose',-1) - postpone = options.get('postpone',None) + self.verbose = verbose = options.get('verbose', -1) + postpone = options.get('postpone', None) self._init_info_modules(packages or None) self.log('Imports to %r namespace\n----------------------------'\ @@ -178,13 +178,13 @@ class PackageLoader(object): if package_name in self.imported_packages: continue info_module = self.info_modules[package_name] - global_symbols = getattr(info_module,'global_symbols',[]) - postpone_import = getattr(info_module,'postpone_import',False) + global_symbols = getattr(info_module, 'global_symbols', []) + postpone_import = getattr(info_module, 'postpone_import', False) if (postpone and not global_symbols) \ or (postpone_import and postpone is not None): continue - old_object = frame.f_locals.get(package_name,None) + old_object = frame.f_locals.get(package_name, None) cmdstr = 'import '+package_name if self._execcmd(cmdstr): @@ -195,7 +195,7 @@ class PackageLoader(object): new_object = frame.f_locals.get(package_name) if old_object is not None and old_object is not new_object: self.warn('Overwriting %s=%s (was %s)' \ - % (package_name,self._obj2repr(new_object), + % (package_name, self._obj2repr(new_object), self._obj2repr(old_object))) if '.' not in package_name: @@ -205,10 +205,10 @@ class PackageLoader(object): if symbol=='*': symbols = eval('getattr(%s,"__all__",None)'\ % (package_name), - frame.f_globals,frame.f_locals) + frame.f_globals, frame.f_locals) if symbols is None: symbols = eval('dir(%s)' % (package_name), - frame.f_globals,frame.f_locals) + frame.f_globals, frame.f_locals) symbols = [s for s in symbols if not s.startswith('_')] else: symbols = [symbol] @@ -224,11 +224,11 @@ class PackageLoader(object): continue if verbose!=-1: - for s,old_object in old_objects.items(): + for s, old_object in old_objects.items(): new_object = frame.f_locals[s] if new_object is not old_object: self.warn('Overwriting %s=%s (was %s)' \ - % (s,self._obj2repr(new_object), + % (s, self._obj2repr(new_object), self._obj2repr(old_object))) if symbol=='*': @@ -238,60 +238,60 @@ class PackageLoader(object): return - def _execcmd(self,cmdstr): + def _execcmd(self, cmdstr): """ Execute command in parent_frame.""" frame = self.parent_frame try: - exec (cmdstr, frame.f_globals,frame.f_locals) + exec (cmdstr, frame.f_globals, frame.f_locals) except Exception as msg: - self.error('%s -> failed: %s' % (cmdstr,msg)) + self.error('%s -> failed: %s' % (cmdstr, msg)) return True else: self.log('%s -> success' % (cmdstr)) return - def _obj2repr(self,obj): + def _obj2repr(self, obj): """ Return repr(obj) with""" - module = getattr(obj,'__module__',None) - file = getattr(obj,'__file__',None) + module = getattr(obj, '__module__', None) + file = getattr(obj, '__file__', None) if module is not None: return repr(obj) + ' from ' + module if file is not None: return repr(obj) + ' from ' + file return repr(obj) - def log(self,mess): + def log(self, mess): if self.verbose>1: print(str(mess), file=sys.stderr) - def warn(self,mess): + def warn(self, mess): if self.verbose>=0: print(str(mess), file=sys.stderr) - def error(self,mess): + def error(self, mess): if self.verbose!=-1: print(str(mess), file=sys.stderr) def _get_doc_title(self, info_module): """ Get the title from a package info.py file. """ - title = getattr(info_module,'__doc_title__',None) + title = getattr(info_module, '__doc_title__', None) if title is not None: return title - title = getattr(info_module,'__doc__',None) + title = getattr(info_module, '__doc__', None) if title is not None: - title = title.lstrip().split('\n',1)[0] + title = title.lstrip().split('\n', 1)[0] return title return '* Not Available *' def _format_titles(self,titles,colsep='---'): display_window_width = 70 # How to determine the correct value in runtime?? - lengths = [len(name)-name.find('.')-1 for (name,title) in titles]+[0] + lengths = [len(name)-name.find('.')-1 for (name, title) in titles]+[0] max_length = max(lengths) lines = [] - for (name,title) in titles: + for (name, title) in titles: name = name[name.find('.')+1:] w = max_length - len(name) words = title.split() - line = '%s%s %s' % (name,w*' ',colsep) + line = '%s%s %s' % (name, w*' ', colsep) tab = len(line) * ' ' while words: word = words.pop(0) @@ -313,14 +313,14 @@ class PackageLoader(object): titles = [] symbols = [] for package_name, info_module in self.info_modules.items(): - global_symbols = getattr(info_module,'global_symbols',[]) + global_symbols = getattr(info_module, 'global_symbols', []) fullname = self.parent_name +'.'+ package_name note = '' if fullname not in sys.modules: note = ' [*]' - titles.append((fullname,self._get_doc_title(info_module) + note)) + titles.append((fullname, self._get_doc_title(info_module) + note)) if global_symbols: - symbols.append((package_name,', '.join(global_symbols))) + symbols.append((package_name, ', '.join(global_symbols))) retstr = self._format_titles(titles) +\ '\n [*] - using a package requires explicit import (see pkgload)' @@ -329,20 +329,20 @@ class PackageLoader(object): if symbols: retstr += """\n\nGlobal symbols from subpackages"""\ """\n-------------------------------\n""" +\ - self._format_titles(symbols,'-->') + self._format_titles(symbols, '-->') return retstr class PackageLoaderDebug(PackageLoader): - def _execcmd(self,cmdstr): + def _execcmd(self, cmdstr): """ Execute command in parent_frame.""" frame = self.parent_frame - print('Executing',repr(cmdstr),'...', end=' ') + print('Executing', repr(cmdstr), '...', end=' ') sys.stdout.flush() - exec (cmdstr, frame.f_globals,frame.f_locals) + exec (cmdstr, frame.f_globals, frame.f_locals) print('ok') sys.stdout.flush() return -if int(os.environ.get('NUMPY_IMPORT_DEBUG','0')): +if int(os.environ.get('NUMPY_IMPORT_DEBUG', '0')): PackageLoader = PackageLoaderDebug diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py index 65a416a9f..c0dc56f30 100644 --- a/numpy/add_newdocs.py +++ b/numpy/add_newdocs.py @@ -925,7 +925,7 @@ add_newdoc('numpy.core.multiarray', 'count_nonzero', 5 """) -add_newdoc('numpy.core.multiarray','set_typeDict', +add_newdoc('numpy.core.multiarray', 'set_typeDict', """set_typeDict(dict) Set the internal dictionary that can look up an array type using a @@ -1280,10 +1280,10 @@ add_newdoc('numpy.core', 'inner', """) -add_newdoc('numpy.core','fastCopyAndTranspose', +add_newdoc('numpy.core', 'fastCopyAndTranspose', """_fastCopyAndTranspose(a)""") -add_newdoc('numpy.core.multiarray','correlate', +add_newdoc('numpy.core.multiarray', 'correlate', """cross_correlate(a,v, mode=0)""") add_newdoc('numpy.core.multiarray', 'arange', @@ -1347,14 +1347,14 @@ add_newdoc('numpy.core.multiarray', 'arange', """) -add_newdoc('numpy.core.multiarray','_get_ndarray_c_version', +add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', """_get_ndarray_c_version() Return the compile time NDARRAY_VERSION number. """) -add_newdoc('numpy.core.multiarray','_reconstruct', +add_newdoc('numpy.core.multiarray', '_reconstruct', """_reconstruct(subtype, shape, dtype) Construct an empty array. Used by Pickles. @@ -2648,7 +2648,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', or the ultimate owner of the memory exposes a writeable buffer interface or is a string. - Arrays can be both C-style and Fortran-style contiguous simultaneously. + Arrays can be both C-style and Fortran-style contiguous simultaneously. This is clear for 1-dimensional arrays, but can also be true for higher dimensional arrays. diff --git a/numpy/build_utils/common.py b/numpy/build_utils/common.py index 3592076a1..8435c462c 100644 --- a/numpy/build_utils/common.py +++ b/numpy/build_utils/common.py @@ -73,9 +73,9 @@ def pyod(filename): else: return _pyod3() -_BEFORE_SEQ = ['000','000','000','000','000','000','000','000', - '001','043','105','147','211','253','315','357'] -_AFTER_SEQ = ['376', '334','272','230','166','124','062','020'] +_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000', + '001', '043', '105', '147', '211', '253', '315', '357'] +_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020'] _IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] _IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] diff --git a/numpy/compat/setup.py b/numpy/compat/setup.py index ce0ad8c72..c163bcaf9 100644 --- a/numpy/compat/setup.py +++ b/numpy/compat/setup.py @@ -4,7 +4,7 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('compat',parent_package,top_path) + config = Configuration('compat', parent_package, top_path) return config if __name__ == '__main__': diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index 50af6e548..79bc72a8c 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -32,7 +32,7 @@ from .fromnumeric import amax as max, amin as min, \ round_ as round from .numeric import absolute as abs -__all__ = ['char','rec','memmap'] +__all__ = ['char', 'rec', 'memmap'] __all__ += numeric.__all__ __all__ += fromnumeric.__all__ __all__ += rec.__all__ @@ -58,7 +58,7 @@ def _ufunc_reconstruct(module, name): def _ufunc_reduce(func): from pickle import whichmodule name = func.__name__ - return _ufunc_reconstruct, (whichmodule(func,name), name) + return _ufunc_reconstruct, (whichmodule(func, name), name) import sys diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 8046de149..d32f59390 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -24,7 +24,7 @@ def _makenames_list(adict, align): for fname in fnames: obj = adict[fname] n = len(obj) - if not isinstance(obj, tuple) or n not in [2,3]: + if not isinstance(obj, tuple) or n not in [2, 3]: raise ValueError("entry not a 2- or 3- tuple") if (n > 2) and (obj[2] == fname): continue @@ -108,10 +108,10 @@ def _array_descr(descriptor): for field in ordered_fields: if field[1] > offset: num = field[1] - offset - result.append(('','|V%d' % num)) + result.append(('', '|V%d' % num)) offset += num if len(field) > 3: - name = (field[2],field[3]) + name = (field[2], field[3]) else: name = field[2] if field[0].subdtype: diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py index 27e445a54..8f0027616 100644 --- a/numpy/core/_methods.py +++ b/numpy/core/_methods.py @@ -121,4 +121,3 @@ def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): ret = ret.dtype.type(um.sqrt(ret)) return ret - diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index ad6a5d074..db491e6f5 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -214,7 +214,7 @@ def _leading_trailing(a): l = [_leading_trailing(a[i]) for i in range( min(len(a), _summaryEdgeItems))] l.extend([_leading_trailing(a[-i]) for i in range( - min(len(a), _summaryEdgeItems),0,-1)]) + min(len(a), _summaryEdgeItems), 0, -1)]) else: l = [_leading_trailing(a[i]) for i in range(0, len(a))] b = _nc.concatenate(tuple(l)) @@ -510,7 +510,7 @@ def _formatArray(a, format_function, rank, max_line_len, s += _formatArray(a[i], format_function, rank-1, max_line_len, " " + next_line_prefix, separator, edge_items, summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) + s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1) if summary_insert1: s += next_line_prefix + summary_insert1 + "\n" @@ -521,7 +521,7 @@ def _formatArray(a, format_function, rank, max_line_len, s += _formatArray(a[-i], format_function, rank-1, max_line_len, " " + next_line_prefix, separator, edge_items, summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1) + s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1) if leading_items or trailing_items > 1: s += next_line_prefix s += _formatArray(a[-1], format_function, rank-1, max_line_len, @@ -750,4 +750,3 @@ class TimedeltaFormat(object): def __call__(self, x): return self.format % x.astype('i8') - diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 62e4f9fc8..ad054920a 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -72,7 +72,7 @@ def remove_whitespace(s): return ''.join(s.split()) def _repl(str): - return str.replace('Bool','npy_bool') + return str.replace('Bool', 'npy_bool') class Function(object): def __init__(self, name, return_type, args, doc=''): diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 1bc22d777..e02cb8709 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -237,7 +237,7 @@ for code in 'bhilq': # an object which expands a list of character codes into an array of # TypeDescriptions. defdict = { -'add' : +'add': Ufunc(2, 1, Zero, docstrings.get('numpy.core.umath.add'), 'PyUFunc_AdditionTypeResolver', @@ -248,7 +248,7 @@ defdict = { ], TD(O, f='PyNumber_Add'), ), -'subtract' : +'subtract': Ufunc(2, 1, None, # Zero is only a unit to the right, not the left docstrings.get('numpy.core.umath.subtract'), 'PyUFunc_SubtractionTypeResolver', @@ -259,7 +259,7 @@ defdict = { ], TD(O, f='PyNumber_Subtract'), ), -'multiply' : +'multiply': Ufunc(2, 1, One, docstrings.get('numpy.core.umath.multiply'), 'PyUFunc_MultiplicationTypeResolver', @@ -271,7 +271,7 @@ defdict = { ], TD(O, f='PyNumber_Multiply'), ), -'divide' : +'divide': Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy.core.umath.divide'), 'PyUFunc_DivisionTypeResolver', @@ -282,7 +282,7 @@ defdict = { ], TD(O, f='PyNumber_Divide'), ), -'floor_divide' : +'floor_divide': Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy.core.umath.floor_divide'), 'PyUFunc_DivisionTypeResolver', @@ -293,7 +293,7 @@ defdict = { ], TD(O, f='PyNumber_FloorDivide'), ), -'true_divide' : +'true_divide': Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy.core.umath.true_divide'), 'PyUFunc_DivisionTypeResolver', @@ -306,14 +306,14 @@ defdict = { ], TD(O, f='PyNumber_TrueDivide'), ), -'conjugate' : +'conjugate': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.conjugate'), None, TD(ints+flts+cmplx), TD(P, f='conjugate'), ), -'fmod' : +'fmod': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.fmod'), None, @@ -321,14 +321,14 @@ defdict = { TD(flts, f='fmod', astype={'e':'f'}), TD(P, f='fmod'), ), -'square' : +'square': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.square'), None, TD(ints+inexact), TD(O, f='Py_square'), ), -'reciprocal' : +'reciprocal': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.reciprocal'), None, @@ -337,14 +337,14 @@ defdict = { ), # This is no longer used as numpy.ones_like, however it is # still used by some internal calls. -'_ones_like' : +'_ones_like': Ufunc(1, 1, None, docstrings.get('numpy.core.umath._ones_like'), 'PyUFunc_OnesLikeTypeResolver', TD(noobj), TD(O, f='Py_get_one'), ), -'power' : +'power': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.power'), None, @@ -352,7 +352,7 @@ defdict = { TD(inexact, f='pow', astype={'e':'f'}), TD(O, f='npy_ObjectPower'), ), -'absolute' : +'absolute': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.absolute'), 'PyUFunc_AbsoluteTypeResolver', @@ -360,13 +360,13 @@ defdict = { TD(cmplx, out=('f', 'd', 'g')), TD(O, f='PyNumber_Absolute'), ), -'_arg' : +'_arg': Ufunc(1, 1, None, docstrings.get('numpy.core.umath._arg'), None, TD(cmplx, out=('f', 'd', 'g')), ), -'negative' : +'negative': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.negative'), 'PyUFunc_SimpleUnaryOperationTypeResolver', @@ -374,316 +374,316 @@ defdict = { TD(cmplx, f='neg'), TD(O, f='PyNumber_Negative'), ), -'sign' : +'sign': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.sign'), 'PyUFunc_SimpleUnaryOperationTypeResolver', TD(nobool_or_datetime), ), -'greater' : +'greater': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.greater'), 'PyUFunc_SimpleBinaryComparisonTypeResolver', TD(all, out='?'), ), -'greater_equal' : +'greater_equal': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.greater_equal'), 'PyUFunc_SimpleBinaryComparisonTypeResolver', TD(all, out='?'), ), -'less' : +'less': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.less'), 'PyUFunc_SimpleBinaryComparisonTypeResolver', TD(all, out='?'), ), -'less_equal' : +'less_equal': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.less_equal'), 'PyUFunc_SimpleBinaryComparisonTypeResolver', TD(all, out='?'), ), -'equal' : +'equal': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.equal'), 'PyUFunc_SimpleBinaryComparisonTypeResolver', TD(all, out='?'), ), -'not_equal' : +'not_equal': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.not_equal'), 'PyUFunc_SimpleBinaryComparisonTypeResolver', TD(all, out='?'), ), -'logical_and' : +'logical_and': Ufunc(2, 1, One, docstrings.get('numpy.core.umath.logical_and'), 'PyUFunc_SimpleBinaryComparisonTypeResolver', TD(nodatetime_or_obj, out='?'), TD(O, f='npy_ObjectLogicalAnd'), ), -'logical_not' : +'logical_not': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.logical_not'), None, TD(nodatetime_or_obj, out='?'), TD(O, f='npy_ObjectLogicalNot'), ), -'logical_or' : +'logical_or': Ufunc(2, 1, Zero, docstrings.get('numpy.core.umath.logical_or'), 'PyUFunc_SimpleBinaryComparisonTypeResolver', TD(nodatetime_or_obj, out='?'), TD(O, f='npy_ObjectLogicalOr'), ), -'logical_xor' : +'logical_xor': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.logical_xor'), 'PyUFunc_SimpleBinaryComparisonTypeResolver', TD(nodatetime_or_obj, out='?'), TD(P, f='logical_xor'), ), -'maximum' : +'maximum': Ufunc(2, 1, ReorderableNone, docstrings.get('numpy.core.umath.maximum'), 'PyUFunc_SimpleBinaryOperationTypeResolver', TD(noobj), TD(O, f='npy_ObjectMax') ), -'minimum' : +'minimum': Ufunc(2, 1, ReorderableNone, docstrings.get('numpy.core.umath.minimum'), 'PyUFunc_SimpleBinaryOperationTypeResolver', TD(noobj), TD(O, f='npy_ObjectMin') ), -'fmax' : +'fmax': Ufunc(2, 1, ReorderableNone, docstrings.get('numpy.core.umath.fmax'), 'PyUFunc_SimpleBinaryOperationTypeResolver', TD(noobj), TD(O, f='npy_ObjectMax') ), -'fmin' : +'fmin': Ufunc(2, 1, ReorderableNone, docstrings.get('numpy.core.umath.fmin'), 'PyUFunc_SimpleBinaryOperationTypeResolver', TD(noobj), TD(O, f='npy_ObjectMin') ), -'logaddexp' : +'logaddexp': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.logaddexp'), None, TD(flts, f="logaddexp", astype={'e':'f'}) ), -'logaddexp2' : +'logaddexp2': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.logaddexp2'), None, TD(flts, f="logaddexp2", astype={'e':'f'}) ), -'bitwise_and' : +'bitwise_and': Ufunc(2, 1, One, docstrings.get('numpy.core.umath.bitwise_and'), None, TD(bints), TD(O, f='PyNumber_And'), ), -'bitwise_or' : +'bitwise_or': Ufunc(2, 1, Zero, docstrings.get('numpy.core.umath.bitwise_or'), None, TD(bints), TD(O, f='PyNumber_Or'), ), -'bitwise_xor' : +'bitwise_xor': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.bitwise_xor'), None, TD(bints), TD(O, f='PyNumber_Xor'), ), -'invert' : +'invert': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.invert'), None, TD(bints), TD(O, f='PyNumber_Invert'), ), -'left_shift' : +'left_shift': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.left_shift'), None, TD(ints), TD(O, f='PyNumber_Lshift'), ), -'right_shift' : +'right_shift': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.right_shift'), None, TD(ints), TD(O, f='PyNumber_Rshift'), ), -'degrees' : +'degrees': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.degrees'), None, TD(fltsP, f='degrees', astype={'e':'f'}), ), -'rad2deg' : +'rad2deg': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.rad2deg'), None, TD(fltsP, f='rad2deg', astype={'e':'f'}), ), -'radians' : +'radians': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.radians'), None, TD(fltsP, f='radians', astype={'e':'f'}), ), -'deg2rad' : +'deg2rad': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.deg2rad'), None, TD(fltsP, f='deg2rad', astype={'e':'f'}), ), -'arccos' : +'arccos': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arccos'), None, TD(inexact, f='acos', astype={'e':'f'}), TD(P, f='arccos'), ), -'arccosh' : +'arccosh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arccosh'), None, TD(inexact, f='acosh', astype={'e':'f'}), TD(P, f='arccosh'), ), -'arcsin' : +'arcsin': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arcsin'), None, TD(inexact, f='asin', astype={'e':'f'}), TD(P, f='arcsin'), ), -'arcsinh' : +'arcsinh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arcsinh'), None, TD(inexact, f='asinh', astype={'e':'f'}), TD(P, f='arcsinh'), ), -'arctan' : +'arctan': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arctan'), None, TD(inexact, f='atan', astype={'e':'f'}), TD(P, f='arctan'), ), -'arctanh' : +'arctanh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arctanh'), None, TD(inexact, f='atanh', astype={'e':'f'}), TD(P, f='arctanh'), ), -'cos' : +'cos': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.cos'), None, TD(inexact, f='cos', astype={'e':'f'}), TD(P, f='cos'), ), -'sin' : +'sin': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.sin'), None, TD(inexact, f='sin', astype={'e':'f'}), TD(P, f='sin'), ), -'tan' : +'tan': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.tan'), None, TD(inexact, f='tan', astype={'e':'f'}), TD(P, f='tan'), ), -'cosh' : +'cosh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.cosh'), None, TD(inexact, f='cosh', astype={'e':'f'}), TD(P, f='cosh'), ), -'sinh' : +'sinh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.sinh'), None, TD(inexact, f='sinh', astype={'e':'f'}), TD(P, f='sinh'), ), -'tanh' : +'tanh': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.tanh'), None, TD(inexact, f='tanh', astype={'e':'f'}), TD(P, f='tanh'), ), -'exp' : +'exp': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.exp'), None, TD(inexact, f='exp', astype={'e':'f'}), TD(P, f='exp'), ), -'exp2' : +'exp2': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.exp2'), None, TD(inexact, f='exp2', astype={'e':'f'}), TD(P, f='exp2'), ), -'expm1' : +'expm1': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.expm1'), None, TD(inexact, f='expm1', astype={'e':'f'}), TD(P, f='expm1'), ), -'log' : +'log': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.log'), None, TD(inexact, f='log', astype={'e':'f'}), TD(P, f='log'), ), -'log2' : +'log2': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.log2'), None, TD(inexact, f='log2', astype={'e':'f'}), TD(P, f='log2'), ), -'log10' : +'log10': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.log10'), None, TD(inexact, f='log10', astype={'e':'f'}), TD(P, f='log10'), ), -'log1p' : +'log1p': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.log1p'), None, TD(inexact, f='log1p', astype={'e':'f'}), TD(P, f='log1p'), ), -'sqrt' : +'sqrt': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.sqrt'), None, @@ -691,105 +691,105 @@ defdict = { TD(inexact, f='sqrt', astype={'e':'f'}), TD(P, f='sqrt'), ), -'ceil' : +'ceil': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.ceil'), None, TD(flts, f='ceil', astype={'e':'f'}), TD(P, f='ceil'), ), -'trunc' : +'trunc': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.trunc'), None, TD(flts, f='trunc', astype={'e':'f'}), TD(P, f='trunc'), ), -'fabs' : +'fabs': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.fabs'), None, TD(flts, f='fabs', astype={'e':'f'}), TD(P, f='fabs'), ), -'floor' : +'floor': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.floor'), None, TD(flts, f='floor', astype={'e':'f'}), TD(P, f='floor'), ), -'rint' : +'rint': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.rint'), None, TD(inexact, f='rint', astype={'e':'f'}), TD(P, f='rint'), ), -'arctan2' : +'arctan2': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.arctan2'), None, TD(flts, f='atan2', astype={'e':'f'}), TD(P, f='arctan2'), ), -'remainder' : +'remainder': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.remainder'), None, TD(intflt), TD(O, f='PyNumber_Remainder'), ), -'hypot' : +'hypot': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.hypot'), None, TD(flts, f='hypot', astype={'e':'f'}), TD(P, f='hypot'), ), -'isnan' : +'isnan': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.isnan'), None, TD(inexact, out='?'), ), -'isinf' : +'isinf': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.isinf'), None, TD(inexact, out='?'), ), -'isfinite' : +'isfinite': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.isfinite'), None, TD(inexact, out='?'), ), -'signbit' : +'signbit': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.signbit'), None, TD(flts, out='?'), ), -'copysign' : +'copysign': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.copysign'), None, TD(flts), ), -'nextafter' : +'nextafter': Ufunc(2, 1, None, docstrings.get('numpy.core.umath.nextafter'), None, TD(flts), ), -'spacing' : +'spacing': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.spacing'), None, TD(flts), ), -'modf' : +'modf': Ufunc(1, 2, None, docstrings.get('numpy.core.umath.modf'), None, @@ -801,11 +801,11 @@ if sys.version_info[0] >= 3: # Will be aliased to true_divide in umathmodule.c.src:InitOtherOperators del defdict['divide'] -def indent(st,spaces): +def indent(st, spaces): indention = ' '*spaces - indented = indention + st.replace('\n','\n'+indention) + indented = indention + st.replace('\n', '\n'+indention) # trim off any trailing spaces - indented = re.sub(r' +$',r'',indented) + indented = re.sub(r' +$', r'', indented) return indented chartotype1 = {'e': 'e_e', @@ -899,7 +899,7 @@ def make_arrays(funcdict): % (name, datanames)) code1list.append("static char %s_signatures[] = { %s };" \ % (name, signames)) - return "\n".join(code1list),"\n".join(code2list) + return "\n".join(code1list), "\n".join(code2list) def make_ufuncs(funcdict): code3list = [] @@ -939,11 +939,11 @@ r"""f = PyUFunc_FromFuncAndData(%s_functions, %s_data, %s_signatures, %d, return '\n'.join(code3list) -def make_code(funcdict,filename): +def make_code(funcdict, filename): code1, code2 = make_arrays(funcdict) code3 = make_ufuncs(funcdict) - code2 = indent(code2,4) - code3 = indent(code3,4) + code2 = indent(code2, 4) + code3 = indent(code3, 4) code = r""" /** Warning this file is autogenerated!!! @@ -966,7 +966,7 @@ InitOperators(PyObject *dictionary) { if __name__ == "__main__": filename = __file__ - fid = open('__umath_generated.c','w') + fid = open('__umath_generated.c', 'w') code = make_code(defdict, filename) fid.write(code) fid.close() diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index 53ccfcfda..59e877750 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -2199,7 +2199,7 @@ add_newdoc('numpy.core.umath', 'maximum', The maximum value of an array along a given axis, propagating any NaNs. nanmax : The maximum value of an array along a given axis, ignoring any NaNs. - + fmin, amin, nanmin Notes diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index cb1c4ff05..ca18d64ea 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -40,7 +40,7 @@ def _wrapit(obj, method, *args, **kwds): wrap = obj.__array_wrap__ except AttributeError: wrap = None - result = getattr(asarray(obj),method)(*args, **kwds) + result = getattr(asarray(obj), method)(*args, **kwds) if wrap: if not isinstance(result, mu.ndarray): result = asarray(result) @@ -106,7 +106,7 @@ def take(a, indices, axis=None, out=None, mode='raise'): array([4, 3, 6]) If `indices` is not one dimensional, the output also has these dimensions. - + >>> np.take(a, [[0, 1], [2, 3]]) array([[4, 3], [5, 7]]) @@ -2077,9 +2077,9 @@ def amax(a, axis=None, out=None, keepdims=False): Element-wise maximum of two arrays, propagating any NaNs. fmax : Element-wise maximum of two arrays, ignoring any NaNs. - argmax : + argmax : Return the indices of the maximum values. - + nanmin, minimum, fmin Notes @@ -2087,9 +2087,9 @@ def amax(a, axis=None, out=None, keepdims=False): NaN values are propagated, that is if at least one item is NaN, the corresponding max value will be NaN as well. To ignore NaN values (MATLAB behavior), please use nanmax. - + Don't use `amax` for element-wise comparison of 2 arrays; when - ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than + ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than ``amax(a, axis=0)``. Examples @@ -2161,7 +2161,7 @@ def amin(a, axis=None, out=None, keepdims=False): Element-wise minimum of two arrays, propagating any NaNs. fmin : Element-wise minimum of two arrays, ignoring any NaNs. - argmin : + argmin : Return the indices of the minimum values. nanmax, maximum, fmax @@ -2171,9 +2171,9 @@ def amin(a, axis=None, out=None, keepdims=False): NaN values are propagated, that is if at least one item is NaN, the corresponding min value will be NaN as well. To ignore NaN values (MATLAB behavior), please use nanmin. - - Don't use `amin` for element-wise comparison of 2 arrays; when - ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than + + Don't use `amin` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than ``amin(a, axis=0)``. Examples @@ -2239,7 +2239,7 @@ def alen(a): try: return len(a) except TypeError: - return len(array(a,ndmin=1)) + return len(array(a, ndmin=1)) def prod(a, axis=None, dtype=None, out=None, keepdims=False): @@ -2913,4 +2913,3 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims) - diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index 686425623..f2c895608 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -164,6 +164,5 @@ def logspace(start,stop,num=50,endpoint=True,base=10.0): >>> plt.show() """ - y = linspace(start,stop,num=num,endpoint=endpoint) - return _nx.power(base,y) - + y = linspace(start, stop, num=num, endpoint=endpoint) + return _nx.power(base, y) diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py index 93210a23b..165ea6860 100644 --- a/numpy/core/getlimits.py +++ b/numpy/core/getlimits.py @@ -3,7 +3,7 @@ """ from __future__ import division, absolute_import, print_function -__all__ = ['finfo','iinfo'] +__all__ = ['finfo', 'iinfo'] from .machar import MachAr from . import numeric @@ -97,7 +97,7 @@ class finfo(object): # In case a float instance was given dtype = numeric.dtype(type(dtype)) - obj = cls._finfo_cache.get(dtype,None) + obj = cls._finfo_cache.get(dtype, None) if obj is not None: return obj dtypes = [dtype] @@ -107,7 +107,7 @@ class finfo(object): dtype = newdtype if not issubclass(dtype, numeric.inexact): raise ValueError("data type %r not inexact" % (dtype)) - obj = cls._finfo_cache.get(dtype,None) + obj = cls._finfo_cache.get(dtype, None) if obj is not None: return obj if not issubclass(dtype, numeric.floating): @@ -115,7 +115,7 @@ class finfo(object): if newdtype is not dtype: dtypes.append(newdtype) dtype = newdtype - obj = cls._finfo_cache.get(dtype,None) + obj = cls._finfo_cache.get(dtype, None) if obj is not None: return obj obj = object.__new__(cls)._init(dtype) @@ -151,11 +151,11 @@ class finfo(object): 'numpy %s precision floating point number' % precname) for word in ['precision', 'iexp', - 'maxexp','minexp','negep', + 'maxexp', 'minexp', 'negep', 'machep']: - setattr(self,word,getattr(machar, word)) - for word in ['tiny','resolution','epsneg']: - setattr(self,word,getattr(machar, word).flat[0]) + setattr(self, word, getattr(machar, word)) + for word in ['tiny', 'resolution', 'epsneg']: + setattr(self, word, getattr(machar, word).flat[0]) self.max = machar.huge.flat[0] self.min = -self.max self.eps = machar.eps.flat[0] @@ -296,11 +296,11 @@ max = %(max)s if __name__ == '__main__': f = finfo(ntypes.single) - print('single epsilon:',f.eps) - print('single tiny:',f.tiny) + print('single epsilon:', f.eps) + print('single tiny:', f.tiny) f = finfo(ntypes.float) - print('float epsilon:',f.eps) - print('float tiny:',f.tiny) + print('float epsilon:', f.eps) + print('float tiny:', f.tiny) f = finfo(ntypes.longfloat) - print('longfloat epsilon:',f.eps) - print('longfloat tiny:',f.tiny) + print('longfloat epsilon:', f.eps) + print('longfloat tiny:', f.tiny) diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h index e5fd09e91..62ffa4006 100644 --- a/numpy/core/include/numpy/npy_common.h +++ b/numpy/core/include/numpy/npy_common.h @@ -952,4 +952,3 @@ typedef npy_int64 npy_datetime; /* End of typedefs for numarray style bit-width names */ #endif - diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py index 53662ce89..bd99a1374 100644 --- a/numpy/core/memmap.py +++ b/numpy/core/memmap.py @@ -11,7 +11,7 @@ from numpy.compat import long, basestring dtypedescr = dtype valid_filemodes = ["r", "c", "r+", "w+"] -writeable_filemodes = ["r+","w+"] +writeable_filemodes = ["r+", "w+"] mode_equivalents = { "readonly":"r", @@ -204,7 +204,7 @@ class memmap(ndarray): raise ValueError("mode must be one of %s" % (valid_filemodes + list(mode_equivalents.keys()))) - if hasattr(filename,'read'): + if hasattr(filename, 'read'): fid = filename own_file = False else: diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index e2c020ced..1b3d5d5a8 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -866,9 +866,9 @@ for NumPy 2.0. The new behavior fits the conventional definition of correlation: inputs are never swapped, and the second argument is conjugated for complex arrays.""", DeprecationWarning) - return multiarray.correlate(a,v,mode) + return multiarray.correlate(a, v, mode) else: - return multiarray.correlate2(a,v,mode) + return multiarray.correlate2(a, v, mode) def convolve(a,v,mode='full'): """ @@ -953,7 +953,7 @@ def convolve(a,v,mode='full'): array([ 2.5]) """ - a,v = array(a, ndmin=1),array(v, ndmin=1) + a, v = array(a, ndmin=1), array(v, ndmin=1) if (len(v) > len(a)): a, v = v, a if len(a) == 0 : @@ -963,7 +963,7 @@ def convolve(a,v,mode='full'): mode = _mode_from_name(mode) return multiarray.correlate(a, v[::-1], mode) -def outer(a,b): +def outer(a, b): """ Compute the outer product of two vectors. @@ -1037,7 +1037,7 @@ def outer(a,b): """ a = asarray(a) b = asarray(b) - return a.ravel()[:,newaxis]*b.ravel()[newaxis,:] + return a.ravel()[:, newaxis]*b.ravel()[newaxis,:] # try to import blas optimized dot if available try: @@ -1169,8 +1169,8 @@ def tensordot(a, b, axes=2): try: iter(axes) except: - axes_a = list(range(-axes,0)) - axes_b = list(range(0,axes)) + axes_a = list(range(-axes, 0)) + axes_b = list(range(0, axes)) else: axes_a, axes_b = axes try: @@ -1346,7 +1346,7 @@ def rollaxis(a, axis, start=0): start -= 1 if axis==start: return a - axes = list(range(0,n)) + axes = list(range(0, n)) axes.remove(axis) axes.insert(start, axis) return a.transpose(axes) @@ -1462,12 +1462,12 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): """ if axis is not None: - axisa,axisb,axisc=(axis,)*3 + axisa, axisb, axisc=(axis,)*3 a = asarray(a).swapaxes(axisa, 0) b = asarray(b).swapaxes(axisb, 0) msg = "incompatible dimensions for cross product\n"\ "(dimension must be 2 or 3)" - if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]): + if (a.shape[0] not in [2, 3]) or (b.shape[0] not in [2, 3]): raise ValueError(msg) if a.shape[0] == 2: if (b.shape[0] == 2): @@ -1489,11 +1489,11 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): x = -a[2]*b[1] y = a[2]*b[0] z = a[0]*b[1] - a[1]*b[0] - cp = array([x,y,z]) + cp = array([x, y, z]) if cp.ndim == 1: return cp else: - return cp.swapaxes(0,axisc) + return cp.swapaxes(0, axisc) #Use numarray's printing function @@ -1742,10 +1742,10 @@ def indices(dimensions, dtype=int): dimensions = tuple(dimensions) N = len(dimensions) if N == 0: - return array([],dtype=dtype) + return array([], dtype=dtype) res = empty((N,)+dimensions, dtype=dtype) for i, dim in enumerate(dimensions): - tmp = arange(dim,dtype=dtype) + tmp = arange(dim, dtype=dtype) tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1) newdim = dimensions[:i] + (1,)+ dimensions[i+1:] val = zeros(newdim, dtype) @@ -2019,7 +2019,7 @@ def _maketup(descr, val): if fields is None: return val else: - res = [_maketup(fields[name][0],val) for name in dt.names] + res = [_maketup(fields[name][0], val) for name in dt.names] return tuple(res) def identity(n, dtype=None): @@ -2699,7 +2699,7 @@ class errstate(object): # Note that we don't want to run the above doctests because they will fail # without a from __future__ import with_statement def __init__(self, **kwargs): - self.call = kwargs.pop('call',_Unspecified) + self.call = kwargs.pop('call', _Unspecified) self.kwargs = kwargs def __enter__(self): diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 1a5f31e4e..8dc4ca75e 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -86,7 +86,7 @@ from __future__ import division, absolute_import, print_function __all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes', 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char', 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type', - 'issubdtype', 'datetime_data','datetime_as_string', + 'issubdtype', 'datetime_data', 'datetime_as_string', 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar', ] @@ -436,7 +436,7 @@ def _construct_char_code_lookup(): for name in typeinfo.keys(): tup = typeinfo[name] if isinstance(tup, tuple): - if tup[0] not in ['p','P']: + if tup[0] not in ['p', 'P']: _sctype2char_dict[tup[-1]] = tup[0] _construct_char_code_lookup() @@ -445,7 +445,7 @@ sctypes = {'int': [], 'uint':[], 'float':[], 'complex':[], - 'others':[bool,object,str,unicode,void]} + 'others':[bool, object, str, unicode, void]} def _add_array_type(typename, bits): try: @@ -541,7 +541,7 @@ except AttributeError: # Py3K buffer_type = memoryview -_python_types = {int : 'int_', +_python_types = {int: 'int_', float: 'float_', complex: 'complex_', bool: 'bool_', @@ -871,7 +871,7 @@ for key in _sctype2char_dict.keys(): if issubclass(key, allTypes['flexible']): _typestr[key] = _sctype2char_dict[key] else: - _typestr[key] = empty((1,),key).dtype.str[1:] + _typestr[key] = empty((1,), key).dtype.str[1:] # Make sure all typestrings are in sctypeDict for key, val in _typestr.items(): @@ -942,7 +942,7 @@ def _find_common_coerce(a, b): thisind = __test_types.index(a.char) except ValueError: return None - return _can_coerce_all([a,b], start=thisind) + return _can_coerce_all([a, b], start=thisind) # Find a data-type that all data-types in a list can be coerced to def _can_coerce_all(dtypelist, start=0): @@ -1030,6 +1030,6 @@ def find_common_type(array_types, scalar_types): return None if index_sc > index_a: - return _find_common_coerce(maxsc,maxa) + return _find_common_coerce(maxsc, maxa) else: return maxa diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 03d26c2b6..3937b2374 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -360,10 +360,10 @@ def check_types(config_cmd, ext, build_dir): def check_mathlib(config_cmd): # Testing the C math library mathlibs = [] - mathlibs_choices = [[],['m'],['cpml']] + mathlibs_choices = [[], ['m'], ['cpml']] mathlib = os.environ.get('MATHLIB') if mathlib: - mathlibs_choices.insert(0,mathlib.split(',')) + mathlibs_choices.insert(0, mathlib.split(',')) for libs in mathlibs_choices: if config_cmd.check_func("exp", libraries=libs, decl=True, call=True): mathlibs = libs @@ -383,12 +383,12 @@ def visibility_define(config): return '' def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration,dot_join + from numpy.distutils.misc_util import Configuration, dot_join from numpy.distutils.system_info import get_info, default_lib_dirs - config = Configuration('core',parent_package,top_path) + config = Configuration('core', parent_package, top_path) local_dir = config.local_path - codegen_dir = join(local_dir,'code_generators') + codegen_dir = join(local_dir, 'code_generators') if is_released(config): warnings.simplefilter('error', MismatchCAPIWarning) @@ -397,32 +397,32 @@ def configuration(parent_package='',top_path=None): # actual C API VERSION check_api_version(C_API_VERSION, codegen_dir) - generate_umath_py = join(codegen_dir,'generate_umath.py') - n = dot_join(config.name,'generate_umath') + generate_umath_py = join(codegen_dir, 'generate_umath.py') + n = dot_join(config.name, 'generate_umath') generate_umath = imp.load_module('_'.join(n.split('.')), - open(generate_umath_py,'U'),generate_umath_py, - ('.py','U',1)) + open(generate_umath_py, 'U'), generate_umath_py, + ('.py', 'U', 1)) header_dir = 'include/numpy' # this is relative to config.path_in_package cocache = CallOnceOnly() def generate_config_h(ext, build_dir): - target = join(build_dir,header_dir,'config.h') + target = join(build_dir, header_dir, 'config.h') d = os.path.dirname(target) if not os.path.exists(d): os.makedirs(d) - if newer(__file__,target): + if newer(__file__, target): config_cmd = config.get_config_cmd() - log.info('Generating %s',target) + log.info('Generating %s', target) # Check sizeof moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir) # Check math library and C99 math funcs availability mathlibs = check_mathlib(config_cmd) - moredefs.append(('MATHLIB',','.join(mathlibs))) + moredefs.append(('MATHLIB', ','.join(mathlibs))) check_math_capabilities(config_cmd, moredefs, mathlibs) moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) @@ -471,10 +471,10 @@ def configuration(parent_package='',top_path=None): # Generate the config.h file from moredefs target_f = open(target, 'w') for d in moredefs: - if isinstance(d,str): + if isinstance(d, str): target_f.write('#define %s\n' % (d)) else: - target_f.write('#define %s %s\n' % (d[0],d[1])) + target_f.write('#define %s %s\n' % (d[0], d[1])) # define inline to our keyword, or nothing target_f.write('#ifndef __cplusplus\n') @@ -493,7 +493,7 @@ def configuration(parent_package='',top_path=None): """) target_f.close() - print('File:',target) + print('File:', target) target_f = open(target) print(target_f.read()) target_f.close() @@ -523,13 +523,13 @@ def configuration(parent_package='',top_path=None): def generate_numpyconfig_h(ext, build_dir): """Depends on config.h: generate_config_h has to be called before !""" - target = join(build_dir,header_dir,'_numpyconfig.h') + target = join(build_dir, header_dir, '_numpyconfig.h') d = os.path.dirname(target) if not os.path.exists(d): os.makedirs(d) - if newer(__file__,target): + if newer(__file__, target): config_cmd = config.get_config_cmd() - log.info('Generating %s',target) + log.info('Generating %s', target) # Check sizeof ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir) @@ -567,10 +567,10 @@ def configuration(parent_package='',top_path=None): # Add moredefs to header target_f = open(target, 'w') for d in moredefs: - if isinstance(d,str): + if isinstance(d, str): target_f.write('#define %s\n' % (d)) else: - target_f.write('#define %s %s\n' % (d[0],d[1])) + target_f.write('#define %s %s\n' % (d[0], d[1])) # Define __STDC_FORMAT_MACROS target_f.write(""" @@ -621,11 +621,11 @@ def configuration(parent_package='',top_path=None): config.numpy_include_dirs.extend(config.paths('include')) - deps = [join('src','npymath','_signbit.c'), - join('include','numpy','*object.h'), + deps = [join('src', 'npymath', '_signbit.c'), + join('include', 'numpy', '*object.h'), 'include/numpy/fenv/fenv.c', 'include/numpy/fenv/fenv.h', - join(codegen_dir,'genapi.py'), + join(codegen_dir, 'genapi.py'), ] # Don't install fenv unless we need them. @@ -643,7 +643,7 @@ def configuration(parent_package='',top_path=None): # generate_numpyconfig_h as sources *before* adding npymath. config.add_extension('_dummy', - sources = [join('src','dummymodule.c'), + sources = [join('src', 'dummymodule.c'), generate_config_h, generate_numpyconfig_h, generate_numpy_api] @@ -826,7 +826,7 @@ def configuration(parent_package='',top_path=None): [generate_config_h, generate_numpyconfig_h, generate_numpy_api, - join(codegen_dir,'generate_numpy_api.py'), + join(codegen_dir, 'generate_numpy_api.py'), join('*.py')], depends = deps + multiarray_deps, libraries = ['npymath', 'npysort']) @@ -857,13 +857,13 @@ def configuration(parent_package='',top_path=None): def generate_umath_c(ext, build_dir): - target = join(build_dir,header_dir,'__umath_generated.c') + target = join(build_dir, header_dir, '__umath_generated.c') dir = os.path.dirname(target) if not os.path.exists(dir): os.makedirs(dir) script = generate_umath_py - if newer(script,target): - f = open(target,'w') + if newer(script, target): + f = open(target, 'w') f.write(generate_umath.make_code(generate_umath.defdict, generate_umath.__file__)) f.close() @@ -881,7 +881,7 @@ def configuration(parent_package='',top_path=None): umath_deps = [ generate_umath_py, join('src', 'umath', 'simd.inc.src'), - join(codegen_dir,'generate_ufunc_api.py')] + join(codegen_dir, 'generate_ufunc_api.py')] if not ENABLE_SEPARATE_COMPILATION: umath_deps.extend(umath_src) @@ -905,7 +905,7 @@ def configuration(parent_package='',top_path=None): ####################################################################### config.add_extension('scalarmath', - sources = [join('src','scalarmathmodule.c.src'), + sources = [join('src', 'scalarmathmodule.c.src'), generate_config_h, generate_numpyconfig_h, generate_numpy_api, @@ -919,19 +919,19 @@ def configuration(parent_package='',top_path=None): ####################################################################### # Configure blasdot - blas_info = get_info('blas_opt',0) + blas_info = get_info('blas_opt', 0) #blas_info = {} def get_dotblas_sources(ext, build_dir): if blas_info: - if ('NO_ATLAS_INFO',1) in blas_info.get('define_macros',[]): + if ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', []): return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient. return ext.depends[:1] return None # no extension module will be built config.add_extension('_dotblas', sources = [get_dotblas_sources], - depends = [join('blasdot','_dotblas.c'), - join('blasdot','cblas.h'), + depends = [join('blasdot', '_dotblas.c'), + join('blasdot', 'cblas.h'), ], include_dirs = ['blasdot'], extra_info = blas_info @@ -942,21 +942,21 @@ def configuration(parent_package='',top_path=None): ####################################################################### config.add_extension('umath_tests', - sources = [join('src','umath', 'umath_tests.c.src')]) + sources = [join('src', 'umath', 'umath_tests.c.src')]) ####################################################################### # custom rational dtype module # ####################################################################### config.add_extension('test_rational', - sources = [join('src','umath', 'test_rational.c.src')]) + sources = [join('src', 'umath', 'test_rational.c.src')]) ####################################################################### # struct_ufunc_test module # ####################################################################### config.add_extension('struct_ufunc_test', - sources = [join('src','umath', 'struct_ufunc_test.c.src')]) + sources = [join('src', 'umath', 'struct_ufunc_test.c.src')]) ####################################################################### # multiarray_tests module # @@ -970,7 +970,7 @@ def configuration(parent_package='',top_path=None): ####################################################################### config.add_extension('operand_flag_tests', - sources = [join('src','umath', 'operand_flag_tests.c.src')]) + sources = [join('src', 'umath', 'operand_flag_tests.c.src')]) config.add_data_dir('tests') config.add_data_dir('tests/data') diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py index 7f0ad8bfe..1f3e6b44e 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -236,9 +236,9 @@ def pyod(filename): else: return _pyod3() -_BEFORE_SEQ = ['000','000','000','000','000','000','000','000', - '001','043','105','147','211','253','315','357'] -_AFTER_SEQ = ['376', '334','272','230','166','124','062','020'] +_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000', + '001', '043', '105', '147', '211', '253', '315', '357'] +_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020'] _IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] _IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1] diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index ee0b1342e..ae684fb42 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -1,6 +1,6 @@ from __future__ import division, absolute_import, print_function -__all__ = ['atleast_1d','atleast_2d','atleast_3d','vstack','hstack'] +__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'vstack', 'hstack'] from . import numeric as _nx from .numeric import array, asanyarray, newaxis @@ -100,7 +100,7 @@ def atleast_2d(*arys): if len(ary.shape) == 0 : result = ary.reshape(1, 1) elif len(ary.shape) == 1 : - result = ary[newaxis, :] + result = ary[newaxis,:] else : result = ary res.append(result) @@ -162,11 +162,11 @@ def atleast_3d(*arys): for ary in arys: ary = asanyarray(ary) if len(ary.shape) == 0: - result = ary.reshape(1,1,1) + result = ary.reshape(1, 1, 1) elif len(ary.shape) == 1: - result = ary[newaxis,:,newaxis] + result = ary[newaxis,:, newaxis] elif len(ary.shape) == 2: - result = ary[:,:,newaxis] + result = ary[:,:, newaxis] else: result = ary res.append(result) @@ -275,4 +275,3 @@ def hstack(tup): return _nx.concatenate(arrs, 0) else: return _nx.concatenate(arrs, 1) - diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c index 6db6060c9..86a6b0137 100644 --- a/numpy/core/src/multiarray/arrayobject.c +++ b/numpy/core/src/multiarray/arrayobject.c @@ -77,7 +77,7 @@ PyArray_Size(PyObject *op) * Returns 0 on success, -1 on failure. */ NPY_NO_EXPORT int -PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) +PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) { if (base == NULL) { PyErr_SetString(PyExc_ValueError, @@ -92,7 +92,7 @@ PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) if (PyArray_FailUnlessWriteable(base, "UPDATEIFCOPY base") < 0) { goto fail; } - + /* * Any writes to 'arr' will magicaly turn into writes to 'base', so we * should warn if necessary. @@ -145,8 +145,8 @@ PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj) /* * Don't allow infinite chains of views, always set the base - * to the first owner of the data. - * That is, either the first object which isn't an array, + * to the first owner of the data. + * That is, either the first object which isn't an array, * or the first object which owns its own data. */ @@ -157,19 +157,19 @@ PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj) /* Propagate WARN_ON_WRITE through views. */ if (PyArray_FLAGS(obj_arr) & NPY_ARRAY_WARN_ON_WRITE) { PyArray_ENABLEFLAGS(arr, NPY_ARRAY_WARN_ON_WRITE); - } + } /* If this array owns its own data, stop collapsing */ if (PyArray_CHKFLAGS(obj_arr, NPY_ARRAY_OWNDATA)) { break; - } + } tmp = PyArray_BASE(obj_arr); /* If there's no base, stop collapsing */ if (tmp == NULL) { break; } - /* Stop the collapse new base when the would not be of the same + /* Stop the collapse new base when the would not be of the same * type (i.e. different subclass). */ if (Py_TYPE(tmp) != Py_TYPE(arr)) { @@ -1478,7 +1478,7 @@ PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp offset, offset_bounds_from_strides(elsize, nd, dims, newstrides, &lower_offset, &upper_offset); - + if ((upper_offset > end) || (lower_offset < begin)) { return NPY_FALSE; } diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c index 4c374cc75..5a7a58b33 100644 --- a/numpy/core/src/multiarray/common.c +++ b/numpy/core/src/multiarray/common.c @@ -108,7 +108,7 @@ _array_find_python_scalar_type(PyObject *op) } } else if (PyLong_Check(op)) { - /* check to see if integer can fit into a longlong or ulonglong + /* check to see if integer can fit into a longlong or ulonglong and return that --- otherwise return object */ if ((PyLong_AsLongLong(op) == -1) && PyErr_Occurred()) { PyErr_Clear(); @@ -117,14 +117,14 @@ _array_find_python_scalar_type(PyObject *op) return PyArray_DescrFromType(NPY_LONGLONG); } - if ((PyLong_AsUnsignedLongLong(op) == (unsigned long long) -1) + if ((PyLong_AsUnsignedLongLong(op) == (unsigned long long) -1) && PyErr_Occurred()){ PyErr_Clear(); - } + } else { return PyArray_DescrFromType(NPY_ULONGLONG); - } - + } + return PyArray_DescrFromType(NPY_OBJECT); } return NULL; @@ -381,7 +381,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims, if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_FORMAT|PyBUF_STRIDES) == 0 || PyObject_GetBuffer(obj, &buffer_view, PyBUF_FORMAT) == 0) { - + PyErr_Clear(); dtype = _descriptor_from_pep3118_format(buffer_view.format); PyBuffer_Release(&buffer_view); @@ -391,7 +391,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims, } else if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_STRIDES) == 0 || PyObject_GetBuffer(obj, &buffer_view, PyBUF_SIMPLE) == 0) { - + PyErr_Clear(); dtype = PyArray_DescrNewFromType(NPY_VOID); dtype->elsize = buffer_view.itemsize; diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h index 82b66123c..9df644210 100644 --- a/numpy/core/src/multiarray/common.h +++ b/numpy/core/src/multiarray/common.h @@ -39,7 +39,7 @@ _array_find_python_scalar_type(PyObject *op); NPY_NO_EXPORT PyArray_Descr * _array_typedescr_fromstr(char *str); -/* +/* * Returns -1 and sets an exception if *index is an invalid index for * an array of size max_item, otherwise adjusts it in place to be * 0 <= *index < max_item, and returns 0. diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c index 8ce7bb74d..9450766ab 100644 --- a/numpy/core/src/multiarray/conversion_utils.c +++ b/numpy/core/src/multiarray/conversion_utils.c @@ -802,7 +802,7 @@ PyArray_PyIntAsIntp(PyObject *o) PyErr_Clear(); } - /* + /* * For backward compatibility check the number C-Api number protcol * This should be removed up the finish label after deprecation. */ @@ -1127,7 +1127,7 @@ PyArray_TypestrConvert(int itemsize, int gentype) } break; } - + /* * Raise deprecate warning if new type hasn't been * set yet and size char is invalid. diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c index 19ba5d327..e327db024 100644 --- a/numpy/core/src/multiarray/convert_datatype.c +++ b/numpy/core/src/multiarray/convert_datatype.c @@ -738,9 +738,9 @@ can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, /* An aligned memory buffer large enough to hold any type */ npy_longlong value[4]; - /* - * If the two dtypes are actually references to the same object - * or if casting type is forced unsafe then always OK. + /* + * If the two dtypes are actually references to the same object + * or if casting type is forced unsafe then always OK. */ if (scal_type == to || casting == NPY_UNSAFE_CASTING ) { return 1; diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 9e84d9a32..07de463f7 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -1805,8 +1805,8 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) oldtype = PyArray_DESCR(arr); if (newtype == NULL) { - /* - * Check if object is of array with Null newtype. + /* + * Check if object is of array with Null newtype. * If so return it directly instead of checking for casting. */ if (flags == 0) { diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index 3923709ed..aeff950c6 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -4245,4 +4245,3 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape, *out_ndim = ndim; return 0; } - diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c index 6e06103a8..7f56ddb03 100644 --- a/numpy/core/src/multiarray/flagsobject.c +++ b/numpy/core/src/multiarray/flagsobject.c @@ -100,7 +100,7 @@ PyArray_UpdateFlags(PyArrayObject *ret, int flagmask) * * strides[0] == itemsize * strides[i] == shape[i - 1] * strides[i - 1] - * + * * According to these rules, a 0- or 1-dimensional array is either both * C- and F-contiguous, or neither; and an array with 2+ dimensions * can be C- or F- contiguous, or neither, but not both. Though there diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index 4fbdd5e54..b7fed6de7 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -975,8 +975,8 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) "Cannot delete iterator elements"); return -1; } - - if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) + + if (PyArray_FailUnlessWriteable(self->ao, "underlying array") < 0) return -1; if (ind == Py_Ellipsis) { @@ -1158,9 +1158,9 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op)) * 2) underlying array is not contiguous * -- make new 1-d contiguous array with updateifcopy flag set * to copy back to the old array - * + * * If underlying array is readonly, then we make the output array readonly - * and updateifcopy does not apply. + * and updateifcopy does not apply. */ size = PyArray_SIZE(it->ao); Py_INCREF(PyArray_DESCR(it->ao)); diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 22a4d8c97..3cdde9afe 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -987,7 +987,7 @@ array_subscript_fancy(PyArrayObject *self, PyObject *op, int fancy) int oned; PyObject *other; PyArrayMapIterObject *mit; - + oned = ((PyArray_NDIM(self) == 1) && !(PyTuple_Check(op) && PyTuple_GET_SIZE(op) > 1)); @@ -1030,7 +1030,7 @@ array_subscript_fromobject(PyArrayObject *self, PyObject *op) { int fancy; npy_intp vals[NPY_MAXDIMS]; - + /* Integer index */ if (PyArray_IsIntegerScalar(op) || (PyIndex_Check(op) && !PySequence_Check(op))) { @@ -1212,7 +1212,7 @@ array_subscript(PyArrayObject *self, PyObject *op) "0-dimensional arrays can't be indexed"); return NULL; } - + else { fancy = fancy_indexing_check(op); if (fancy != SOBJ_NOTFANCY) { @@ -1226,8 +1226,8 @@ array_subscript(PyArrayObject *self, PyObject *op) if (ret == NULL) { return NULL; } - - if (PyArray_Check(ret) && PyArray_NDIM((PyArrayObject *)ret) == 0 + + if (PyArray_Check(ret) && PyArray_NDIM((PyArrayObject *)ret) == 0 && !_check_ellipses(op)) { return PyArray_Return((PyArrayObject *)ret); } @@ -2002,7 +2002,7 @@ PyArray_MapIterNew(PyObject *indexobj, int oned, int fancy) */ /* convert all inputs to iterators */ - if (PyArray_Check(indexobj) && PyArray_ISBOOL(indexobj) + if (PyArray_Check(indexobj) && PyArray_ISBOOL(indexobj) && !PyArray_IsZeroDim(indexobj)) { mit->numiter = _nonzero_indices(indexobj, mit->iters); if (mit->numiter < 0) { @@ -2124,10 +2124,10 @@ PyArray_MapIterArray(PyArrayObject * a, PyObject * index) int fancy = fancy_indexing_check(index); /* - * MapIterNew supports a special mode that allows more efficient 1-d iteration, - * but clients that want to make use of this need to use a different API just + * MapIterNew supports a special mode that allows more efficient 1-d iteration, + * but clients that want to make use of this need to use a different API just * for the one-d cases. For the public interface this is confusing, so we - * unconditionally disable the 1-d optimized mode, and use the generic + * unconditionally disable the 1-d optimized mode, and use the generic * implementation in all cases. */ int oned = 0; diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index e4858eef7..45e322973 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -1992,7 +1992,7 @@ array_dot(PyArrayObject *self, PyObject *args, PyObject *kwds) static PyObject *numpycore = NULL; char * kwords[] = {"b", "out", NULL }; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwords, &b, &out)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwords, &b, &out)) { return NULL; } diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index dd1f3c7d7..9fe406c72 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -488,7 +488,7 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, for (iarrays = 0; iarrays < narrays; ++iarrays) { shape += sizes[iarrays] = PyArray_SIZE(arrays[iarrays]); /* Check for overflow */ - if (shape < 0) { + if (shape < 0) { PyErr_SetString(PyExc_ValueError, "total number of elements " "too large to concatenate"); diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src index f9b35deed..f64b7e9f7 100644 --- a/numpy/core/src/multiarray/scalartypes.c.src +++ b/numpy/core/src/multiarray/scalartypes.c.src @@ -3051,7 +3051,7 @@ void_arrtype_hash(PyObject *obj) /* Cannot hash mutable void scalars */ if (p->flags & NPY_ARRAY_WRITEABLE) { PyErr_SetString(PyExc_TypeError, "unhashable type: 'writeable void-scalar'"); - return -1; + return -1; } len = voidtype_length(p); for (n=0; n < len; n++) { diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c index 67ee4b04b..41c670df4 100644 --- a/numpy/core/src/multiarray/shape.c +++ b/numpy/core/src/multiarray/shape.c @@ -361,7 +361,7 @@ _attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims, int oi, oj, ok, ni, nj, nk; oldnd = 0; - /* + /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ diff --git a/numpy/core/src/multiarray/testcalcs.py b/numpy/core/src/multiarray/testcalcs.py index 9182ae2c3..e8b7b1734 100644 --- a/numpy/core/src/multiarray/testcalcs.py +++ b/numpy/core/src/multiarray/testcalcs.py @@ -33,7 +33,7 @@ def year_offset(year): else return_val = year*365 + (year-3)/4 - (year-99)/100 + (year-399)/400; """ - return weave.inline(code,['year']) + return weave.inline(code, ['year']) def days_from_ymd(year, month, day): diff --git a/numpy/core/src/npymath/halffloat.c b/numpy/core/src/npymath/halffloat.c index 883830795..34ac64287 100644 --- a/numpy/core/src/npymath/halffloat.c +++ b/numpy/core/src/npymath/halffloat.c @@ -527,4 +527,3 @@ npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h) return d_sgn + (((npy_uint64)(h&0x7fffu) + 0xfc000u) << 42); } } - diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src index 90bbf5fb6..2aba8be8f 100644 --- a/numpy/core/src/npymath/ieee754.c.src +++ b/numpy/core/src/npymath/ieee754.c.src @@ -21,7 +21,7 @@ double npy_copysign(double x, double y) /* The below code is provided for compilers which do not yet provide C11 compatibility (gcc 4.5 and older) */ -#ifndef LDBL_TRUE_MIN +#ifndef LDBL_TRUE_MIN #define LDBL_TRUE_MIN __LDBL_DENORM_MIN__ #endif @@ -678,4 +678,3 @@ void npy_set_floatstatus_invalid(void) } #endif - diff --git a/numpy/core/src/npysort/selection.c.src b/numpy/core/src/npysort/selection.c.src index a41aa87f1..b11753367 100644 --- a/numpy/core/src/npysort/selection.c.src +++ b/numpy/core/src/npysort/selection.c.src @@ -1,7 +1,7 @@ /* -*- c -*- */ /* - * + * * The code is loosely based on the quickselect from * Nicolas Devillard - 1998 public domain * http://ndevilla.free.fr/median/median/ diff --git a/numpy/core/src/private/npy_partition.h b/numpy/core/src/private/npy_partition.h index 9e1c8494a..f2b684d3a 100644 --- a/numpy/core/src/private/npy_partition.h +++ b/numpy/core/src/private/npy_partition.h @@ -556,4 +556,3 @@ get_argpartition_func(int type, NPY_SELECTKIND which) } #endif - diff --git a/numpy/core/src/scalarmathmodule.c.src b/numpy/core/src/scalarmathmodule.c.src index 307c7ddb0..d789a3dd4 100644 --- a/numpy/core/src/scalarmathmodule.c.src +++ b/numpy/core/src/scalarmathmodule.c.src @@ -500,7 +500,7 @@ half_ctype_remainder(npy_half a, npy_half b, npy_half *out) { static npy_@name@ (*_basic_@name@_pow)(@type@ a, @type@ b); static void -@name@_ctype_power(@type@ a, @type@ b, @type@ *out) +@name@_ctype_power(@type@ a, @type@ b, @type@ *out) { *out = _basic_@name@_pow(a, b); } diff --git a/numpy/core/src/umath/loops.h b/numpy/core/src/umath/loops.h index 7f49f1956..62890f370 100644 --- a/numpy/core/src/umath/loops.h +++ b/numpy/core/src/umath/loops.h @@ -2726,4 +2726,3 @@ OBJECT_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED */ #endif - diff --git a/numpy/core/src/umath/operand_flag_tests.c.src b/numpy/core/src/umath/operand_flag_tests.c.src index 7eaf1fbfc..4fb428bfc 100644 --- a/numpy/core/src/umath/operand_flag_tests.c.src +++ b/numpy/core/src/umath/operand_flag_tests.c.src @@ -79,7 +79,7 @@ PyMODINIT_FUNC initoperand_flag_tests(void) PyUFunc_None, "inplace_add", "inplace_add_docstring", 0); - /* + /* * Set flags to turn off buffering for first input operand, * so that result can be written back to input operand. */ diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index 3f2b94a4a..bb4c0f44e 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -250,7 +250,7 @@ check_nonreorderable_axes(int ndim, npy_bool *axis_flags, const char *funcname) * PyArray_AssignOne(result, NULL, NULL), because this function raises an * exception when there are no elements to reduce (which appropriate iff the * reduction operation has no identity). - * + * * This means it copies the subarray indexed at zero along each reduction axis * into 'result', then returns a view into 'operand' excluding those copied * elements. diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c index 6a16692b0..36dbf6569 100644 --- a/numpy/core/src/umath/ufunc_type_resolution.c +++ b/numpy/core/src/umath/ufunc_type_resolution.c @@ -1613,7 +1613,7 @@ linear_search_userloop_type_resolver(PyUFuncObject *self, for (i = 0; i < nop; ++i) { int type_num; - + /* no more ufunc arguments to check */ if (op[i] == NULL) { break; @@ -2129,5 +2129,3 @@ type_tuple_type_resolver(PyUFuncObject *self, return -1; } - - diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py index 653b2eeb9..0bec38f22 100644 --- a/numpy/core/tests/test_api.py +++ b/numpy/core/tests/test_api.py @@ -167,19 +167,19 @@ def test_fastCopyAndTranspose(): assert_(b.flags.owndata) # 1D array - a = np.array([3,2,7,0]) + a = np.array([3, 2, 7, 0]) b = np.fastCopyAndTranspose(a) assert_equal(b, a.T) assert_(b.flags.owndata) # 2D array - a = np.arange(6).reshape(2,3) + a = np.arange(6).reshape(2, 3) b = np.fastCopyAndTranspose(a) assert_equal(b, a.T) assert_(b.flags.owndata) def test_array_astype(): - a = np.arange(6, dtype='f4').reshape(2,3) + a = np.arange(6, dtype='f4').reshape(2, 3) # Default behavior: allows unsafe casts, keeps memory layout, # always copies. b = a.astype('i4') @@ -221,7 +221,7 @@ def test_array_astype(): b = a.astype('f4', subok=0, copy=False) assert_(a is b) - a = np.matrix([[0,1,2],[3,4,5]], dtype='f4') + a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') # subok=True passes through a matrix b = a.astype('f4', subok=True, copy=False) @@ -268,7 +268,7 @@ def test_array_astype(): assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) a = np.array([123456789012345678901234567890], dtype='O').astype('U') assert_array_equal(a, np.array(sixu('1234567890' * 3), dtype='U30')) - + a = np.array(123456789012345678901234567890, dtype='S') assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) a = np.array(123456789012345678901234567890, dtype='U') @@ -276,7 +276,7 @@ def test_array_astype(): def test_copyto_fromscalar(): - a = np.arange(6, dtype='f4').reshape(2,3) + a = np.arange(6, dtype='f4').reshape(2, 3) # Simple copy np.copyto(a, 1.5) @@ -285,23 +285,23 @@ def test_copyto_fromscalar(): assert_equal(a, 2.5) # Where-masked copy - mask = np.array([[0,1,0],[0,0,1]], dtype='?') + mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?') np.copyto(a, 3.5, where=mask) - assert_equal(a, [[2.5,3.5,2.5],[2.5,2.5,3.5]]) - mask = np.array([[0,1],[1,1],[1,0]], dtype='?') + assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]]) + mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?') np.copyto(a.T, 4.5, where=mask) - assert_equal(a, [[2.5,4.5,4.5],[4.5,4.5,3.5]]) + assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]]) def test_copyto(): - a = np.arange(6, dtype='i4').reshape(2,3) + a = np.arange(6, dtype='i4').reshape(2, 3) # Simple copy - np.copyto(a, [[3,1,5], [6,2,1]]) - assert_equal(a, [[3,1,5], [6,2,1]]) + np.copyto(a, [[3, 1, 5], [6, 2, 1]]) + assert_equal(a, [[3, 1, 5], [6, 2, 1]]) # Overlapping copy should work - np.copyto(a[:,:2], a[::-1, 1::-1]) - assert_equal(a, [[2,6,5], [1,3,1]]) + np.copyto(a[:, :2], a[::-1, 1::-1]) + assert_equal(a, [[2, 6, 5], [1, 3, 1]]) # Defaults to 'same_kind' casting assert_raises(TypeError, np.copyto, a, 1.5) @@ -311,27 +311,27 @@ def test_copyto(): assert_equal(a, 1) # Copying with a mask - np.copyto(a, 3, where=[True,False,True]) - assert_equal(a, [[3,1,3],[3,1,3]]) + np.copyto(a, 3, where=[True, False, True]) + assert_equal(a, [[3, 1, 3], [3, 1, 3]]) # Casting rule still applies with a mask - assert_raises(TypeError, np.copyto, a, 3.5, where=[True,False,True]) + assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) # Lists of integer 0's and 1's is ok too - np.copyto(a, 4.0, casting='unsafe', where=[[0,1,1], [1,0,0]]) - assert_equal(a, [[3,4,4], [4,1,3]]) + np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) + assert_equal(a, [[3, 4, 4], [4, 1, 3]]) # Overlapping copy with mask should work - np.copyto(a[:,:2], a[::-1, 1::-1], where=[[0,1],[1,1]]) - assert_equal(a, [[3,4,4], [4,3,3]]) + np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) + assert_equal(a, [[3, 4, 4], [4, 3, 3]]) # 'dst' must be an array - assert_raises(TypeError, np.copyto, [1,2,3], [2,3,4]) + assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) def test_copy_order(): - a = np.arange(24).reshape(2,1,3,4) + a = np.arange(24).reshape(2, 1, 3, 4) b = a.copy(order='F') - c = np.arange(24).reshape(2,1,4,3).swapaxes(2,3) + c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) def check_copy_result(x, y, ccontig, fcontig, strides=False): assert_(not (x is y)) @@ -397,10 +397,10 @@ def test_copy_order(): check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) def test_contiguous_flags(): - a = np.ones((4,4,1))[::2,:,:] + a = np.ones((4, 4, 1))[::2,:,:] if NPY_RELAXED_STRIDES_CHECKING: a.strides = a.strides[:2] + (-123,) - b = np.ones((2,2,1,2,2)).swapaxes(3,4) + b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) def check_contig(a, ccontig, fcontig): assert_(a.flags.c_contiguous == ccontig) @@ -410,13 +410,13 @@ def test_contiguous_flags(): check_contig(a, False, False) check_contig(b, False, False) if NPY_RELAXED_STRIDES_CHECKING: - check_contig(np.empty((2,2,0,2,2)), True, True) - check_contig(np.array([[[1],[2]]], order='F'), True, True) + check_contig(np.empty((2, 2, 0, 2, 2)), True, True) + check_contig(np.array([[[1], [2]]], order='F'), True, True) else: - check_contig(np.empty((2,2,0,2,2)), True, False) - check_contig(np.array([[[1],[2]]], order='F'), False, True) - check_contig(np.empty((2,2)), True, False) - check_contig(np.empty((2,2), order='F'), False, True) + check_contig(np.empty((2, 2, 0, 2, 2)), True, False) + check_contig(np.array([[[1], [2]]], order='F'), False, True) + check_contig(np.empty((2, 2)), True, False) + check_contig(np.empty((2, 2), order='F'), False, True) # Check that np.array creates correct contiguous flags: check_contig(np.array(a, copy=False), False, False) @@ -426,27 +426,27 @@ def test_contiguous_flags(): if NPY_RELAXED_STRIDES_CHECKING: # Check slicing update of flags and : check_contig(a[0], True, True) - check_contig(a[None,::4,...,None], True, True) - check_contig(b[0,0,...], False, True) - check_contig(b[:,:,0:0,:,:], True, True) + check_contig(a[None, ::4, ..., None], True, True) + check_contig(b[0, 0, ...], False, True) + check_contig(b[:,:, 0:0,:,:], True, True) else: # Check slicing update of flags: check_contig(a[0], True, False) # Would be nice if this was C-Contiguous: - check_contig(a[None,0,...,None], False, False) - check_contig(b[0,0,0,...], False, True) + check_contig(a[None, 0, ..., None], False, False) + check_contig(b[0, 0, 0, ...], False, True) # Test ravel and squeeze. check_contig(a.ravel(), True, True) - check_contig(np.ones((1,3,1)).squeeze(), True, True) + check_contig(np.ones((1, 3, 1)).squeeze(), True, True) def test_broadcast_arrays(): # Test user defined dtypes - a = np.array([(1,2,3)], dtype='u4,u4,u4') - b = np.array([(1,2,3),(4,5,6),(7,8,9)], dtype='u4,u4,u4') + a = np.array([(1, 2, 3)], dtype='u4,u4,u4') + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') result = np.broadcast_arrays(a, b) - assert_equal(result[0], np.array([(1,2,3),(1,2,3),(1,2,3)], dtype='u4,u4,u4')) - assert_equal(result[1], np.array([(1,2,3),(4,5,6),(7,8,9)], dtype='u4,u4,u4')) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_blasdot.py b/numpy/core/tests/test_blasdot.py index 2e99cf5b0..624c617d3 100644 --- a/numpy/core/tests/test_blasdot.py +++ b/numpy/core/tests/test_blasdot.py @@ -50,16 +50,16 @@ def test_dot_3args(): r = np.empty((1024, 32)) for i in range(12): - np.dot(f,v,r) + np.dot(f, v, r) assert_equal(sys.getrefcount(r), 2) - r2 = np.dot(f,v,out=None) + r2 = np.dot(f, v, out=None) assert_array_equal(r2, r) - assert_(r is np.dot(f,v,out=r)) + assert_(r is np.dot(f, v, out=r)) - v = v[:,0].copy() # v.shape == (16,) - r = r[:,0].copy() # r.shape == (1024,) - r2 = np.dot(f,v) - assert_(r is np.dot(f,v,r)) + v = v[:, 0].copy() # v.shape == (16,) + r = r[:, 0].copy() # r.shape == (1024,) + r2 = np.dot(f, v) + assert_(r is np.dot(f, v, r)) assert_array_equal(r2, r) def test_dot_3args_errors(): @@ -81,8 +81,8 @@ def test_dot_3args_errors(): assert_raises(ValueError, np.dot, f, v, r.T) r = np.empty((1024, 64)) - assert_raises(ValueError, np.dot, f, v, r[:,::2]) - assert_raises(ValueError, np.dot, f, v, r[:,:32]) + assert_raises(ValueError, np.dot, f, v, r[:, ::2]) + assert_raises(ValueError, np.dot, f, v, r[:, :32]) r = np.empty((1024, 32), dtype=np.float32) assert_raises(ValueError, np.dot, f, v, r) diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 4ba0c048a..84efd87b3 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -159,26 +159,26 @@ class TestDateTime(TestCase): # Construction from datetime.date assert_equal(np.datetime64('1945-03-25'), - np.datetime64(datetime.date(1945,3,25))) + np.datetime64(datetime.date(1945, 3, 25))) assert_equal(np.datetime64('2045-03-25', 'D'), - np.datetime64(datetime.date(2045,3,25), 'D')) + np.datetime64(datetime.date(2045, 3, 25), 'D')) # Construction from datetime.datetime assert_equal(np.datetime64('1980-01-25T14:36:22.5Z'), - np.datetime64(datetime.datetime(1980,1,25, - 14,36,22,500000))) + np.datetime64(datetime.datetime(1980, 1, 25, + 14, 36, 22, 500000))) # Construction with time units from a date raises assert_raises(TypeError, np.datetime64, '1920-03-13', 'h') assert_raises(TypeError, np.datetime64, '1920-03', 'm') assert_raises(TypeError, np.datetime64, '1920', 's') - assert_raises(TypeError, np.datetime64, datetime.date(2045,3,25), 'ms') + assert_raises(TypeError, np.datetime64, datetime.date(2045, 3, 25), 'ms') # Construction with date units from a datetime raises assert_raises(TypeError, np.datetime64, '1920-03-13T18Z', 'D') assert_raises(TypeError, np.datetime64, '1920-03-13T18:33Z', 'W') assert_raises(TypeError, np.datetime64, '1920-03-13T18:33:12Z', 'M') assert_raises(TypeError, np.datetime64, '1920-03-13T18:33:12.5Z', 'Y') assert_raises(TypeError, np.datetime64, - datetime.datetime(1920,4,14,13,20), 'D') + datetime.datetime(1920, 4, 14, 13, 20), 'D') def test_datetime_array_find_type(self): dt = np.datetime64('1970-01-01', 'M') @@ -351,12 +351,12 @@ class TestDateTime(TestCase): np.dtype('M8[as]')) # Python date object - assert_equal(np.datetime64(datetime.date(2010,4,16)).dtype, + assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype, np.dtype('M8[D]')) # Python datetime object assert_equal(np.datetime64( - datetime.datetime(2010,4,16,13,45,18)).dtype, + datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype, np.dtype('M8[us]')) # 'today' special value @@ -592,7 +592,7 @@ class TestDateTime(TestCase): def test_pyobject_roundtrip(self): # All datetime types should be able to roundtrip through object - a = np.array([0,0,0,0,0,0,0,0,0, + a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, -1020040340, -2942398, -1, 0, 1, 234523453, 1199164176], dtype=np.int64) # With date units @@ -878,11 +878,11 @@ class TestDateTime(TestCase): # M8 - M8 with different goes to higher precision assert_equal(np.subtract(dtc, dtd, casting='unsafe'), - np.timedelta64(0,'h')) + np.timedelta64(0, 'h')) assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype, np.dtype('m8[h]')) assert_equal(np.subtract(dtd, dtc, casting='unsafe'), - np.timedelta64(0,'h')) + np.timedelta64(0, 'h')) assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype, np.dtype('m8[h]')) @@ -1004,40 +1004,40 @@ class TestDateTime(TestCase): # of the operand metadata a = np.array('1999-03-12T13Z', dtype='M8[2m]') b = np.array('1999-03-12T12Z', dtype='M8[s]') - assert_equal(np.minimum(a,b), b) - assert_equal(np.minimum(a,b).dtype, np.dtype('M8[s]')) - assert_equal(np.fmin(a,b), b) - assert_equal(np.fmin(a,b).dtype, np.dtype('M8[s]')) - assert_equal(np.maximum(a,b), a) - assert_equal(np.maximum(a,b).dtype, np.dtype('M8[s]')) - assert_equal(np.fmax(a,b), a) - assert_equal(np.fmax(a,b).dtype, np.dtype('M8[s]')) + assert_equal(np.minimum(a, b), b) + assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]')) + assert_equal(np.fmin(a, b), b) + assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]')) + assert_equal(np.maximum(a, b), a) + assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]')) + assert_equal(np.fmax(a, b), a) + assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]')) # Viewed as integers, the comparison is opposite because # of the units chosen - assert_equal(np.minimum(a.view('i8'),b.view('i8')), a.view('i8')) + assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8')) # Interaction with NaT a = np.array('1999-03-12T13Z', dtype='M8[2m]') dtnat = np.array('NaT', dtype='M8[h]') - assert_equal(np.minimum(a,dtnat), a) - assert_equal(np.minimum(dtnat,a), a) - assert_equal(np.maximum(a,dtnat), a) - assert_equal(np.maximum(dtnat,a), a) + assert_equal(np.minimum(a, dtnat), a) + assert_equal(np.minimum(dtnat, a), a) + assert_equal(np.maximum(a, dtnat), a) + assert_equal(np.maximum(dtnat, a), a) # Also do timedelta a = np.array(3, dtype='m8[h]') b = np.array(3*3600 - 3, dtype='m8[s]') - assert_equal(np.minimum(a,b), b) - assert_equal(np.minimum(a,b).dtype, np.dtype('m8[s]')) - assert_equal(np.fmin(a,b), b) - assert_equal(np.fmin(a,b).dtype, np.dtype('m8[s]')) - assert_equal(np.maximum(a,b), a) - assert_equal(np.maximum(a,b).dtype, np.dtype('m8[s]')) - assert_equal(np.fmax(a,b), a) - assert_equal(np.fmax(a,b).dtype, np.dtype('m8[s]')) + assert_equal(np.minimum(a, b), b) + assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]')) + assert_equal(np.fmin(a, b), b) + assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]')) + assert_equal(np.maximum(a, b), a) + assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]')) + assert_equal(np.fmax(a, b), a) + assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]')) # Viewed as integers, the comparison is opposite because # of the units chosen - assert_equal(np.minimum(a.view('i8'),b.view('i8')), a.view('i8')) + assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8')) # should raise between datetime and timedelta # @@ -1394,18 +1394,18 @@ class TestDateTime(TestCase): assert_raises(ValueError, np.arange, np.datetime64('today'), np.datetime64('today') + 3, 0) # Promotion across nonlinear unit boundaries is disallowed - assert_raises(TypeError, np.arange, np.datetime64('2011-03-01','D'), - np.timedelta64(5,'M')) + assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'), + np.timedelta64(5, 'M')) assert_raises(TypeError, np.arange, - np.datetime64('2012-02-03T14Z','s'), - np.timedelta64(5,'Y')) + np.datetime64('2012-02-03T14Z', 's'), + np.timedelta64(5, 'Y')) def test_timedelta_arange(self): a = np.arange(3, 10, dtype='m8') assert_equal(a.dtype, np.dtype('m8')) assert_equal(a, np.timedelta64(0) + np.arange(3, 10)) - a = np.arange(np.timedelta64(3,'s'), 10, 2, dtype='m8') + a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8') assert_equal(a.dtype, np.dtype('m8[s]')) assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2)) @@ -1413,18 +1413,18 @@ class TestDateTime(TestCase): assert_raises(ValueError, np.arange, np.timedelta64(0), np.timedelta64(5), 0) # Promotion across nonlinear unit boundaries is disallowed - assert_raises(TypeError, np.arange, np.timedelta64(0,'D'), - np.timedelta64(5,'M')) - assert_raises(TypeError, np.arange, np.timedelta64(0,'Y'), - np.timedelta64(5,'D')) + assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'), + np.timedelta64(5, 'M')) + assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'), + np.timedelta64(5, 'D')) def test_datetime_maximum_reduce(self): - a = np.array(['2010-01-02','1999-03-14','1833-03'], dtype='M8[D]') + a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]') assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]')) assert_equal(np.maximum.reduce(a), np.datetime64('2010-01-02')) - a = np.array([1,4,0,7,2], dtype='m8[s]') + a = np.array([1, 4, 0, 7, 2], dtype='m8[s]') assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]')) assert_equal(np.maximum.reduce(a), np.timedelta64(7, 's')) @@ -1432,14 +1432,14 @@ class TestDateTime(TestCase): def test_datetime_busday_offset(self): # First Monday in June assert_equal( - np.busday_offset('2011-06',0,roll='forward',weekmask='Mon'), + np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'), np.datetime64('2011-06-06')) # Last Monday in June assert_equal( - np.busday_offset('2011-07',-1,roll='forward',weekmask='Mon'), + np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'), np.datetime64('2011-06-27')) assert_equal( - np.busday_offset('2011-07',-1,roll='forward',weekmask='Mon'), + np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'), np.datetime64('2011-06-27')) # Default M-F business days, different roll modes @@ -1481,26 +1481,26 @@ class TestDateTime(TestCase): # and sorts the result. bdd = np.busdaycalendar( holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT', - '2011-12-26','2011-05-30','2011-01-17']) + '2011-12-26', '2011-05-30', '2011-01-17']) assert_equal(bdd.holidays, - np.array(['2011-01-17','2011-05-30','2011-12-26'], dtype='M8')) + np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8')) # Default M-F weekmask - assert_equal(bdd.weekmask, np.array([1,1,1,1,1,0,0], dtype='?')) + assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?')) # Check string weekmask with varying whitespace. bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri") - assert_equal(bdd.weekmask, np.array([0,1,1,1,1,0,1], dtype='?')) + assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?')) # Check length 7 0/1 string bdd = np.busdaycalendar(weekmask="0011001") - assert_equal(bdd.weekmask, np.array([0,0,1,1,0,0,1], dtype='?')) + assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?')) # Check length 7 string weekmask. bdd = np.busdaycalendar(weekmask="Mon Tue") - assert_equal(bdd.weekmask, np.array([1,1,0,0,0,0,0], dtype='?')) + assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?')) # All-zeros weekmask should raise - assert_raises(ValueError, np.busdaycalendar, weekmask=[0,0,0,0,0,0,0]) + assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0]) # weekday names must be correct case assert_raises(ValueError, np.busdaycalendar, weekmask="satsun") # All-zeros weekmask should raise diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py index 4656c842f..09fcff0d0 100644 --- a/numpy/core/tests/test_defchararray.py +++ b/numpy/core/tests/test_defchararray.py @@ -37,14 +37,14 @@ class TestBasic(TestCase): assert_array_equal(B, A) assert_equal(B.dtype, A.dtype) assert_equal(B.shape, A.shape) - B[0,0] = 'changed' - assert_(B[0,0] != A[0,0]) + B[0, 0] = 'changed' + assert_(B[0, 0] != A[0, 0]) C = np.char.asarray(A) assert_array_equal(C, A) assert_equal(C.dtype, A.dtype) - C[0,0] = 'changed again' - assert_(C[0,0] != B[0,0]) - assert_(C[0,0] == A[0,0]) + C[0, 0] = 'changed again' + assert_(C[0, 0] != B[0, 0]) + assert_(C[0, 0] == A[0, 0]) def test_from_unicode_array(self): A = np.array([['abc', sixu('Sigma \u03a3')], @@ -572,9 +572,9 @@ class TestOperations(TestCase): def test_mul(self): A = self.A - for r in (2,3,5,7,197): - Ar = np.array([[A[0,0]*r, A[0,1]*r], - [A[1,0]*r, A[1,1]*r]]).view(np.chararray) + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0]*r, A[0, 1]*r], + [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) assert_array_equal(Ar, (self.A * r)) @@ -588,9 +588,9 @@ class TestOperations(TestCase): def test_rmul(self): A = self.A - for r in (2,3,5,7,197): - Ar = np.array([[A[0,0]*r, A[0,1]*r], - [A[1,0]*r, A[1,1]*r]]).view(np.chararray) + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0]*r, A[0, 1]*r], + [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray) assert_array_equal(Ar, (r * self.A)) for ob in [object(), 'qrs']: @@ -603,18 +603,18 @@ class TestOperations(TestCase): def test_mod(self): """Ticket #856""" - F = np.array([['%d', '%f'],['%s','%r']]).view(np.chararray) - C = np.array([[3,7],[19,1]]) + F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray) + C = np.array([[3, 7], [19, 1]]) FC = np.array([['3', '7.000000'], ['19', '1']]).view(np.chararray) assert_array_equal(FC, F % C) - A = np.array([['%.3f','%d'],['%s','%r']]).view(np.chararray) - A1 = np.array([['1.000','1'],['1','1']]).view(np.chararray) + A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray) + A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray) assert_array_equal(A1, (A % 1)) - A2 = np.array([['1.000','2'],['3','4']]).view(np.chararray) - assert_array_equal(A2, (A % [[1,2],[3,4]])) + A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray) + assert_array_equal(A2, (A % [[1, 2], [3, 4]])) def test_rmod(self): assert_(("%s" % self.A) == str(self.A)) diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 2bedbca09..11e572ada 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -148,20 +148,20 @@ class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): assert_deprecated(lambda: a[0.0]) assert_deprecated(lambda: a[0, 0.0]) assert_deprecated(lambda: a[0.0, 0]) - assert_deprecated(lambda: a[0.0, :]) + assert_deprecated(lambda: a[0.0,:]) assert_deprecated(lambda: a[:, 0.0]) - assert_deprecated(lambda: a[:, 0.0, :]) - assert_deprecated(lambda: a[0.0, :, :], num=2) # [1] + assert_deprecated(lambda: a[:, 0.0,:]) + assert_deprecated(lambda: a[0.0,:,:], num=2) # [1] assert_deprecated(lambda: a[0, 0, 0.0]) assert_deprecated(lambda: a[0.0, 0, 0]) assert_deprecated(lambda: a[0, 0.0, 0]) assert_deprecated(lambda: a[-1.4]) assert_deprecated(lambda: a[0, -1.4]) assert_deprecated(lambda: a[-1.4, 0]) - assert_deprecated(lambda: a[-1.4, :]) + assert_deprecated(lambda: a[-1.4,:]) assert_deprecated(lambda: a[:, -1.4]) - assert_deprecated(lambda: a[:, -1.4, :]) - assert_deprecated(lambda: a[-1.4, :, :], num=2) # [1] + assert_deprecated(lambda: a[:, -1.4,:]) + assert_deprecated(lambda: a[-1.4,:,:], num=2) # [1] assert_deprecated(lambda: a[0, 0, -1.4]) assert_deprecated(lambda: a[-1.4, 0, 0]) assert_deprecated(lambda: a[0, -1.4, 0]) @@ -170,7 +170,7 @@ class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): # Test that the slice parameter deprecation warning doesn't mask # the scalar index warning. assert_deprecated(lambda: a[0.0:, 0.0], num=2) - assert_deprecated(lambda: a[0.0:, 0.0, :], num=2) + assert_deprecated(lambda: a[0.0:, 0.0,:], num=2) def test_valid_indexing(self): @@ -180,8 +180,8 @@ class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): assert_not_deprecated(lambda: a[np.array([0])]) assert_not_deprecated(lambda: a[[0, 0]]) assert_not_deprecated(lambda: a[:, [0, 0]]) - assert_not_deprecated(lambda: a[:, 0, :]) - assert_not_deprecated(lambda: a[:, :, :]) + assert_not_deprecated(lambda: a[:, 0,:]) + assert_not_deprecated(lambda: a[:,:,:]) def test_slicing(self): @@ -193,26 +193,26 @@ class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): assert_deprecated(lambda: a[0.0:]) assert_deprecated(lambda: a[0:, 0.0:2]) assert_deprecated(lambda: a[0.0::2, :0]) - assert_deprecated(lambda: a[0.0:1:2, :]) + assert_deprecated(lambda: a[0.0:1:2,:]) assert_deprecated(lambda: a[:, 0.0:]) # stop as float. assert_deprecated(lambda: a[:0.0]) assert_deprecated(lambda: a[:0, 1:2.0]) assert_deprecated(lambda: a[:0.0:2, :0]) - assert_deprecated(lambda: a[:0.0, :]) + assert_deprecated(lambda: a[:0.0,:]) assert_deprecated(lambda: a[:, 0:4.0:2]) # step as float. assert_deprecated(lambda: a[::1.0]) assert_deprecated(lambda: a[0:, :2:2.0]) assert_deprecated(lambda: a[1::4.0, :0]) - assert_deprecated(lambda: a[::5.0, :]) + assert_deprecated(lambda: a[::5.0,:]) assert_deprecated(lambda: a[:, 0:4:2.0]) # mixed. assert_deprecated(lambda: a[1.0:2:2.0], num=2) assert_deprecated(lambda: a[1.0::2.0], num=2) assert_deprecated(lambda: a[0:, :2.0:2.0], num=2) assert_deprecated(lambda: a[1.0:1:4.0, :0], num=2) - assert_deprecated(lambda: a[1.0:5.0:5.0, :], num=3) + assert_deprecated(lambda: a[1.0:5.0:5.0,:], num=3) assert_deprecated(lambda: a[:, 0.4:4.0:2.0], num=3) # should still get the DeprecationWarning if step = 0. assert_deprecated(lambda: a[::0.0], function_fails=True) @@ -261,8 +261,8 @@ class TestBooleanArgumentDeprecation(_DeprecationTestCase): assert_raises(TypeError, operator.index, np.array(True)) self.assert_deprecated(np.take, args=(a, [0], False)) self.assert_deprecated(lambda: a[False:True:True], exceptions=IndexError, num=3) - self.assert_deprecated(lambda: a[False,0], exceptions=IndexError) - self.assert_deprecated(lambda: a[False,0,0], exceptions=IndexError) + self.assert_deprecated(lambda: a[False, 0], exceptions=IndexError) + self.assert_deprecated(lambda: a[False, 0, 0], exceptions=IndexError) class TestArrayToIndexDeprecation(_DeprecationTestCase): diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 7493c20e2..83017ea6a 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -50,8 +50,8 @@ class TestBuiltin(TestCase): def test_invalid_types(self): # Make sure invalid type strings raise a warning. - # For now, display a deprecation warning for invalid - # type sizes. In the future this should be changed + # For now, display a deprecation warning for invalid + # type sizes. In the future this should be changed # to an exception. assert_warns(DeprecationWarning, np.dtype, 'O3') @@ -85,21 +85,21 @@ class TestBuiltin(TestCase): def test_bad_param(self): # Can't give a size that's too small assert_raises(ValueError, np.dtype, - {'names':['f0','f1'], + {'names':['f0', 'f1'], 'formats':['i4', 'i1'], - 'offsets':[0,4], + 'offsets':[0, 4], 'itemsize':4}) # If alignment is enabled, the alignment (4) must divide the itemsize assert_raises(ValueError, np.dtype, - {'names':['f0','f1'], + {'names':['f0', 'f1'], 'formats':['i4', 'i1'], - 'offsets':[0,4], + 'offsets':[0, 4], 'itemsize':9}, align=True) # If alignment is enabled, the individual fields must be aligned assert_raises(ValueError, np.dtype, - {'names':['f0','f1'], - 'formats':['i1','f4'], - 'offsets':[0,2]}, align=True) + {'names':['f0', 'f1'], + 'formats':['i1', 'f4'], + 'offsets':[0, 2]}, align=True) class TestRecord(TestCase): def test_equivalent_record(self): @@ -116,10 +116,10 @@ class TestRecord(TestCase): def test_different_titles(self): # In theory, they may hash the same (collision) ? - a = np.dtype({'names': ['r','b'], + a = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'titles': ['Red pixel', 'Blue pixel']}) - b = np.dtype({'names': ['r','b'], + b = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'titles': ['RRed pixel', 'Blue pixel']}) assert_dtype_not_equal(a, b) @@ -139,9 +139,9 @@ class TestRecord(TestCase): assert_equal(dt.itemsize, 8) dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True) assert_equal(dt.itemsize, 8) - dt = np.dtype({'names':['f0','f1'], + dt = np.dtype({'names':['f0', 'f1'], 'formats':['i4', 'u1'], - 'offsets':[0,4]}, align=True) + 'offsets':[0, 4]}, align=True) assert_equal(dt.itemsize, 8) dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True) assert_equal(dt.itemsize, 8) @@ -150,7 +150,7 @@ class TestRecord(TestCase): ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), ('f2', 'i1')], align=True) assert_equal(dt1.itemsize, 20) - dt2 = np.dtype({'names':['f0','f1','f2'], + dt2 = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['i4', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 'i1'], @@ -167,7 +167,7 @@ class TestRecord(TestCase): ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]), ('f2', 'i1')], align=False) assert_equal(dt1.itemsize, 11) - dt2 = np.dtype({'names':['f0','f1','f2'], + dt2 = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['i4', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 'i1'], @@ -182,22 +182,22 @@ class TestRecord(TestCase): def test_union_struct(self): # Should be able to create union dtypes - dt = np.dtype({'names':['f0','f1','f2'], 'formats':['<u4', '<u2','<u2'], - 'offsets':[0,0,2]}, align=True) + dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'], + 'offsets':[0, 0, 2]}, align=True) assert_equal(dt.itemsize, 4) a = np.array([3], dtype='<u4').view(dt) a['f1'] = 10 a['f2'] = 36 assert_equal(a['f0'], 10 + 36*256*256) # Should be able to specify fields out of order - dt = np.dtype({'names':['f0','f1','f2'], 'formats':['<u4', '<u2','<u2'], - 'offsets':[4,0,2]}, align=True) + dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'], + 'offsets':[4, 0, 2]}, align=True) assert_equal(dt.itemsize, 8) - dt2 = np.dtype({'names':['f2','f0','f1'], - 'formats':['<u2', '<u4','<u2'], - 'offsets':[2,4,0]}, align=True) - vals = [(0,1,2), (3,-1,4)] - vals2 = [(2,0,1), (4,3,-1)] + dt2 = np.dtype({'names':['f2', 'f0', 'f1'], + 'formats':['<u2', '<u4', '<u2'], + 'offsets':[2, 4, 0]}, align=True) + vals = [(0, 1, 2), (3, -1, 4)] + vals2 = [(2, 0, 1), (4, 3, -1)] a = np.array(vals, dt) b = np.array(vals2, dt2) assert_equal(a.astype(dt2), b) @@ -206,23 +206,23 @@ class TestRecord(TestCase): assert_equal(b.view(dt), a) # Should not be able to overlap objects with other types assert_raises(TypeError, np.dtype, - {'names':['f0','f1'], + {'names':['f0', 'f1'], 'formats':['O', 'i1'], - 'offsets':[0,2]}) + 'offsets':[0, 2]}) assert_raises(TypeError, np.dtype, - {'names':['f0','f1'], + {'names':['f0', 'f1'], 'formats':['i4', 'O'], - 'offsets':[0,3]}) + 'offsets':[0, 3]}) assert_raises(TypeError, np.dtype, - {'names':['f0','f1'], - 'formats':[[('a','O')], 'i1'], - 'offsets':[0,2]}) + {'names':['f0', 'f1'], + 'formats':[[('a', 'O')], 'i1'], + 'offsets':[0, 2]}) assert_raises(TypeError, np.dtype, - {'names':['f0','f1'], - 'formats':['i4', [('a','O')]], - 'offsets':[0,3]}) + {'names':['f0', 'f1'], + 'formats':['i4', [('a', 'O')]], + 'offsets':[0, 3]}) # Out of order should still be ok, however - dt = np.dtype({'names':['f0','f1'], + dt = np.dtype({'names':['f0', 'f1'], 'formats':['i1', 'O'], 'offsets':[np.dtype('intp').itemsize, 0]}) @@ -263,38 +263,38 @@ class TestSubarray(TestCase): def test_shape_equal(self): """Test some data types that are equal""" - assert_dtype_equal(np.dtype('f8'), np.dtype(('f8',tuple()))) - assert_dtype_equal(np.dtype('f8'), np.dtype(('f8',1))) - assert_dtype_equal(np.dtype((np.int,2)), np.dtype((np.int,(2,)))) - assert_dtype_equal(np.dtype(('<f4',(3,2))), np.dtype(('<f4',(3,2)))) - d = ([('a','f4',(1,2)),('b','f8',(3,1))],(3,2)) + assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple()))) + assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1))) + assert_dtype_equal(np.dtype((np.int, 2)), np.dtype((np.int, (2,)))) + assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2)))) + d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2)) assert_dtype_equal(np.dtype(d), np.dtype(d)) def test_shape_simple(self): """Test some simple cases that shouldn't be equal""" - assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8',(1,)))) - assert_dtype_not_equal(np.dtype(('f8',(1,))), np.dtype(('f8',(1,1)))) - assert_dtype_not_equal(np.dtype(('f4',(3,2))), np.dtype(('f4',(2,3)))) + assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,)))) + assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1)))) + assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3)))) def test_shape_monster(self): """Test some more complicated cases that shouldn't be equal""" assert_dtype_not_equal( - np.dtype(([('a','f4',(2,1)), ('b','f8',(1,3))],(2,2))), - np.dtype(([('a','f4',(1,2)), ('b','f8',(1,3))],(2,2)))) + np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), + np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2)))) assert_dtype_not_equal( - np.dtype(([('a','f4',(2,1)), ('b','f8',(1,3))],(2,2))), - np.dtype(([('a','f4',(2,1)), ('b','i8',(1,3))],(2,2)))) + np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), + np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2)))) assert_dtype_not_equal( - np.dtype(([('a','f4',(2,1)), ('b','f8',(1,3))],(2,2))), - np.dtype(([('e','f8',(1,3)), ('d','f4',(2,1))],(2,2)))) + np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), + np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2)))) assert_dtype_not_equal( - np.dtype(([('a',[('a','i4',6)],(2,1)), ('b','f8',(1,3))],(2,2))), - np.dtype(([('a',[('a','u4',6)],(2,1)), ('b','f8',(1,3))],(2,2)))) + np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))), + np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2)))) class TestMonsterType(TestCase): """Test deeply nested subtypes.""" def test1(self): - simple1 = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], + simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'titles': ['Red pixel', 'Blue pixel']}) a = np.dtype([('yo', np.int), ('ye', simple1), ('yi', np.dtype((np.int, (3, 2))))]) @@ -357,7 +357,7 @@ class TestString(TestCase): "'aligned':True}") assert_equal(np.dtype(eval(str(dt))), dt) - dt = np.dtype({'names': ['r','g','b'], 'formats': ['u1', 'u1', 'u1'], + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], 'offsets': [0, 1, 2], 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) assert_equal(str(dt), @@ -365,7 +365,7 @@ class TestString(TestCase): "(('Green pixel', 'g'), 'u1'), " "(('Blue pixel', 'b'), 'u1')]") - dt = np.dtype({'names': ['rgba', 'r','g','b'], + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], 'formats': ['<u4', 'u1', 'u1', 'u1'], 'offsets': [0, 0, 1, 2], 'titles': ['Color', 'Red pixel', @@ -378,7 +378,7 @@ class TestString(TestCase): "'Green pixel','Blue pixel']," " 'itemsize':4}") - dt = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], + dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'offsets': [0, 2], 'titles': ['Red pixel', 'Blue pixel']}) assert_equal(str(dt), @@ -403,7 +403,7 @@ class TestString(TestCase): "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " "('bright', '>f4', (8, 36))])])") - dt = np.dtype({'names': ['r','g','b'], 'formats': ['u1', 'u1', 'u1'], + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], 'offsets': [0, 1, 2], 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, align=True) @@ -412,7 +412,7 @@ class TestString(TestCase): "(('Green pixel', 'g'), 'u1'), " "(('Blue pixel', 'b'), 'u1')], align=True)") - dt = np.dtype({'names': ['rgba', 'r','g','b'], + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], 'formats': ['<u4', 'u1', 'u1', 'u1'], 'offsets': [0, 0, 1, 2], 'titles': ['Color', 'Red pixel', @@ -425,7 +425,7 @@ class TestString(TestCase): "'Green pixel','Blue pixel']," " 'itemsize':4}, align=True)") - dt = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], + dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], 'offsets': [0, 2], 'titles': ['Red pixel', 'Blue pixel'], 'itemsize': 4}) diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py index e7480a269..31f94bf07 100644 --- a/numpy/core/tests/test_einsum.py +++ b/numpy/core/tests/test_einsum.py @@ -38,42 +38,42 @@ class TestEinSum(TestCase): # can't have more subscripts than dimensions in the operand assert_raises(ValueError, np.einsum, "i", 0) - assert_raises(ValueError, np.einsum, "ij", [0,0]) + assert_raises(ValueError, np.einsum, "ij", [0, 0]) assert_raises(ValueError, np.einsum, "...i", 0) - assert_raises(ValueError, np.einsum, "i...j", [0,0]) + assert_raises(ValueError, np.einsum, "i...j", [0, 0]) assert_raises(ValueError, np.einsum, "i...", 0) - assert_raises(ValueError, np.einsum, "ij...", [0,0]) + assert_raises(ValueError, np.einsum, "ij...", [0, 0]) # invalid ellipsis - assert_raises(ValueError, np.einsum, "i..", [0,0]) - assert_raises(ValueError, np.einsum, ".i...", [0,0]) - assert_raises(ValueError, np.einsum, "j->..j", [0,0]) - assert_raises(ValueError, np.einsum, "j->.j...", [0,0]) + assert_raises(ValueError, np.einsum, "i..", [0, 0]) + assert_raises(ValueError, np.einsum, ".i...", [0, 0]) + assert_raises(ValueError, np.einsum, "j->..j", [0, 0]) + assert_raises(ValueError, np.einsum, "j->.j...", [0, 0]) # invalid subscript character - assert_raises(ValueError, np.einsum, "i%...", [0,0]) - assert_raises(ValueError, np.einsum, "...j$", [0,0]) - assert_raises(ValueError, np.einsum, "i->&", [0,0]) + assert_raises(ValueError, np.einsum, "i%...", [0, 0]) + assert_raises(ValueError, np.einsum, "...j$", [0, 0]) + assert_raises(ValueError, np.einsum, "i->&", [0, 0]) # output subscripts must appear in input - assert_raises(ValueError, np.einsum, "i->ij", [0,0]) + assert_raises(ValueError, np.einsum, "i->ij", [0, 0]) # output subscripts may only be specified once - assert_raises(ValueError, np.einsum, "ij->jij", [[0,0],[0,0]]) + assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]]) # dimensions much match when being collapsed - assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2,3)) - assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2,3)) + assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2, 3)) + assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2, 3)) # broadcasting to new dimensions must be enabled explicitly - assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2,3)) - assert_raises(ValueError, np.einsum, "i->i", [[0,1],[0,1]], - out=np.arange(4).reshape(2,2)) + assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3)) + assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], + out=np.arange(4).reshape(2, 2)) def test_einsum_views(self): # pass-through a = np.arange(6) - a.shape = (2,3) + a.shape = (2, 3) b = np.einsum("...", a) assert_(b.base is a) @@ -85,186 +85,186 @@ class TestEinSum(TestCase): assert_(b.base is a) assert_equal(b, a) - b = np.einsum(a, [0,1]) + b = np.einsum(a, [0, 1]) assert_(b.base is a) assert_equal(b, a) # transpose a = np.arange(6) - a.shape = (2,3) + a.shape = (2, 3) b = np.einsum("ji", a) assert_(b.base is a) assert_equal(b, a.T) - b = np.einsum(a, [1,0]) + b = np.einsum(a, [1, 0]) assert_(b.base is a) assert_equal(b, a.T) # diagonal a = np.arange(9) - a.shape = (3,3) + a.shape = (3, 3) b = np.einsum("ii->i", a) assert_(b.base is a) - assert_equal(b, [a[i,i] for i in range(3)]) + assert_equal(b, [a[i, i] for i in range(3)]) - b = np.einsum(a, [0,0], [0]) + b = np.einsum(a, [0, 0], [0]) assert_(b.base is a) - assert_equal(b, [a[i,i] for i in range(3)]) + assert_equal(b, [a[i, i] for i in range(3)]) # diagonal with various ways of broadcasting an additional dimension a = np.arange(27) - a.shape = (3,3,3) + a.shape = (3, 3, 3) b = np.einsum("...ii->...i", a) assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] for x in a]) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) - b = np.einsum(a, [Ellipsis,0,0], [Ellipsis,0]) + b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0]) assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] for x in a]) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) b = np.einsum("ii...->...i", a) assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] - for x in a.transpose(2,0,1)]) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) - b = np.einsum(a, [0,0,Ellipsis], [Ellipsis,0]) + b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0]) assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] - for x in a.transpose(2,0,1)]) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) b = np.einsum("...ii->i...", a) assert_(b.base is a) - assert_equal(b, [a[:,i,i] for i in range(3)]) + assert_equal(b, [a[:, i, i] for i in range(3)]) - b = np.einsum(a, [Ellipsis,0,0], [0,Ellipsis]) + b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis]) assert_(b.base is a) - assert_equal(b, [a[:,i,i] for i in range(3)]) + assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("jii->ij", a) assert_(b.base is a) - assert_equal(b, [a[:,i,i] for i in range(3)]) + assert_equal(b, [a[:, i, i] for i in range(3)]) - b = np.einsum(a, [1,0,0], [0,1]) + b = np.einsum(a, [1, 0, 0], [0, 1]) assert_(b.base is a) - assert_equal(b, [a[:,i,i] for i in range(3)]) + assert_equal(b, [a[:, i, i] for i in range(3)]) b = np.einsum("ii...->i...", a) assert_(b.base is a) - assert_equal(b, [a.transpose(2,0,1)[:,i,i] for i in range(3)]) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) - b = np.einsum(a, [0,0,Ellipsis], [0,Ellipsis]) + b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis]) assert_(b.base is a) - assert_equal(b, [a.transpose(2,0,1)[:,i,i] for i in range(3)]) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) b = np.einsum("i...i->i...", a) assert_(b.base is a) - assert_equal(b, [a.transpose(1,0,2)[:,i,i] for i in range(3)]) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) - b = np.einsum(a, [0,Ellipsis,0], [0,Ellipsis]) + b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis]) assert_(b.base is a) - assert_equal(b, [a.transpose(1,0,2)[:,i,i] for i in range(3)]) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) b = np.einsum("i...i->...i", a) assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] - for x in a.transpose(1,0,2)]) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) - b = np.einsum(a, [0,Ellipsis,0], [Ellipsis,0]) + b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0]) assert_(b.base is a) - assert_equal(b, [[x[i,i] for i in range(3)] - for x in a.transpose(1,0,2)]) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) # triple diagonal a = np.arange(27) - a.shape = (3,3,3) + a.shape = (3, 3, 3) b = np.einsum("iii->i", a) assert_(b.base is a) - assert_equal(b, [a[i,i,i] for i in range(3)]) + assert_equal(b, [a[i, i, i] for i in range(3)]) - b = np.einsum(a, [0,0,0], [0]) + b = np.einsum(a, [0, 0, 0], [0]) assert_(b.base is a) - assert_equal(b, [a[i,i,i] for i in range(3)]) + assert_equal(b, [a[i, i, i] for i in range(3)]) # swap axes a = np.arange(24) - a.shape = (2,3,4) + a.shape = (2, 3, 4) b = np.einsum("ijk->jik", a) assert_(b.base is a) - assert_equal(b, a.swapaxes(0,1)) + assert_equal(b, a.swapaxes(0, 1)) - b = np.einsum(a, [0,1,2], [1,0,2]) + b = np.einsum(a, [0, 1, 2], [1, 0, 2]) assert_(b.base is a) - assert_equal(b, a.swapaxes(0,1)) + assert_equal(b, a.swapaxes(0, 1)) def check_einsum_sums(self, dtype): # Check various sums. Does many sizes to exercise unrolled loops. # sum(a, axis=-1) - for n in range(1,17): + for n in range(1, 17): a = np.arange(n, dtype=dtype) assert_equal(np.einsum("i->", a), np.sum(a, axis=-1).astype(dtype)) assert_equal(np.einsum(a, [0], []), np.sum(a, axis=-1).astype(dtype)) - for n in range(1,17): - a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n) + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) assert_equal(np.einsum("...i->...", a), np.sum(a, axis=-1).astype(dtype)) - assert_equal(np.einsum(a, [Ellipsis,0], [Ellipsis]), + assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis]), np.sum(a, axis=-1).astype(dtype)) # sum(a, axis=0) - for n in range(1,17): - a = np.arange(2*n, dtype=dtype).reshape(2,n) + for n in range(1, 17): + a = np.arange(2*n, dtype=dtype).reshape(2, n) assert_equal(np.einsum("i...->...", a), np.sum(a, axis=0).astype(dtype)) - assert_equal(np.einsum(a, [0,Ellipsis], [Ellipsis]), + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]), np.sum(a, axis=0).astype(dtype)) - for n in range(1,17): - a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n) + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) assert_equal(np.einsum("i...->...", a), np.sum(a, axis=0).astype(dtype)) - assert_equal(np.einsum(a, [0,Ellipsis], [Ellipsis]), + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]), np.sum(a, axis=0).astype(dtype)) # trace(a) - for n in range(1,17): - a = np.arange(n*n, dtype=dtype).reshape(n,n) + for n in range(1, 17): + a = np.arange(n*n, dtype=dtype).reshape(n, n) assert_equal(np.einsum("ii", a), np.trace(a).astype(dtype)) - assert_equal(np.einsum(a, [0,0]), np.trace(a).astype(dtype)) + assert_equal(np.einsum(a, [0, 0]), np.trace(a).astype(dtype)) # multiply(a, b) assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case - for n in range(1,17): - a = np.arange(3*n, dtype=dtype).reshape(3,n) - b = np.arange(2*3*n, dtype=dtype).reshape(2,3,n) + for n in range(1, 17): + a = np.arange(3*n, dtype=dtype).reshape(3, n) + b = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) assert_equal(np.einsum("..., ...", a, b), np.multiply(a, b)) assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis]), np.multiply(a, b)) # inner(a,b) - for n in range(1,17): - a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n) + for n in range(1, 17): + a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("...i, ...i", a, b), np.inner(a, b)) - assert_equal(np.einsum(a, [Ellipsis,0], b, [Ellipsis,0]), + assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0]), np.inner(a, b)) - for n in range(1,11): - a = np.arange(n*3*2, dtype=dtype).reshape(n,3,2) + for n in range(1, 11): + a = np.arange(n*3*2, dtype=dtype).reshape(n, 3, 2) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("i..., i...", a, b), np.inner(a.T, b.T).T) - assert_equal(np.einsum(a, [0,Ellipsis], b, [0,Ellipsis]), + assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis]), np.inner(a.T, b.T).T) # outer(a,b) - for n in range(1,17): + for n in range(1, 17): a = np.arange(3, dtype=dtype)+1 b = np.arange(n, dtype=dtype)+1 assert_equal(np.einsum("i,j", a, b), np.outer(a, b)) @@ -275,11 +275,11 @@ class TestEinSum(TestCase): warnings.simplefilter('ignore', np.ComplexWarning) # matvec(a,b) / a.dot(b) where a is matrix, b is vector - for n in range(1,17): - a = np.arange(4*n, dtype=dtype).reshape(4,n) + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("ij, j", a, b), np.dot(a, b)) - assert_equal(np.einsum(a, [0,1], b, [1]), np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1]), np.dot(a, b)) c = np.arange(4, dtype=dtype) np.einsum("ij,j", a, b, out=c, @@ -288,17 +288,17 @@ class TestEinSum(TestCase): np.dot(a.astype('f8'), b.astype('f8')).astype(dtype)) c[...] = 0 - np.einsum(a, [0,1], b, [1], out=c, + np.einsum(a, [0, 1], b, [1], out=c, dtype='f8', casting='unsafe') assert_equal(c, np.dot(a.astype('f8'), b.astype('f8')).astype(dtype)) - for n in range(1,17): - a = np.arange(4*n, dtype=dtype).reshape(4,n) + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) b = np.arange(n, dtype=dtype) assert_equal(np.einsum("ji,j", a.T, b.T), np.dot(b.T, a.T)) - assert_equal(np.einsum(a.T, [1,0], b.T, [1]), np.dot(b.T, a.T)) + assert_equal(np.einsum(a.T, [1, 0], b.T, [1]), np.dot(b.T, a.T)) c = np.arange(4, dtype=dtype) np.einsum("ji,j", a.T, b.T, out=c, dtype='f8', casting='unsafe') @@ -306,30 +306,30 @@ class TestEinSum(TestCase): np.dot(b.T.astype('f8'), a.T.astype('f8')).astype(dtype)) c[...] = 0 - np.einsum(a.T, [1,0], b.T, [1], out=c, + np.einsum(a.T, [1, 0], b.T, [1], out=c, dtype='f8', casting='unsafe') assert_equal(c, np.dot(b.T.astype('f8'), a.T.astype('f8')).astype(dtype)) # matmat(a,b) / a.dot(b) where a is matrix, b is matrix - for n in range(1,17): + for n in range(1, 17): if n < 8 or dtype != 'f2': - a = np.arange(4*n, dtype=dtype).reshape(4,n) - b = np.arange(n*6, dtype=dtype).reshape(n,6) + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n*6, dtype=dtype).reshape(n, 6) assert_equal(np.einsum("ij,jk", a, b), np.dot(a, b)) - assert_equal(np.einsum(a, [0,1], b, [1,2]), np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2]), np.dot(a, b)) - for n in range(1,17): - a = np.arange(4*n, dtype=dtype).reshape(4,n) - b = np.arange(n*6, dtype=dtype).reshape(n,6) - c = np.arange(24, dtype=dtype).reshape(4,6) + for n in range(1, 17): + a = np.arange(4*n, dtype=dtype).reshape(4, n) + b = np.arange(n*6, dtype=dtype).reshape(n, 6) + c = np.arange(24, dtype=dtype).reshape(4, 6) np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe') assert_equal(c, np.dot(a.astype('f8'), b.astype('f8')).astype(dtype)) c[...] = 0 - np.einsum(a, [0,1], b, [1,2], out=c, + np.einsum(a, [0, 1], b, [1, 2], out=c, dtype='f8', casting='unsafe') assert_equal(c, np.dot(a.astype('f8'), @@ -337,50 +337,50 @@ class TestEinSum(TestCase): # matrix triple product (note this is not currently an efficient # way to multiply 3 matrices) - a = np.arange(12, dtype=dtype).reshape(3,4) - b = np.arange(20, dtype=dtype).reshape(4,5) - c = np.arange(30, dtype=dtype).reshape(5,6) + a = np.arange(12, dtype=dtype).reshape(3, 4) + b = np.arange(20, dtype=dtype).reshape(4, 5) + c = np.arange(30, dtype=dtype).reshape(5, 6) if dtype != 'f2': assert_equal(np.einsum("ij,jk,kl", a, b, c), a.dot(b).dot(c)) - assert_equal(np.einsum(a, [0,1], b, [1,2], c, [2,3]), + assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3]), a.dot(b).dot(c)) - d = np.arange(18, dtype=dtype).reshape(3,6) + d = np.arange(18, dtype=dtype).reshape(3, 6) np.einsum("ij,jk,kl", a, b, c, out=d, dtype='f8', casting='unsafe') assert_equal(d, a.astype('f8').dot(b.astype('f8') ).dot(c.astype('f8')).astype(dtype)) d[...] = 0 - np.einsum(a, [0,1], b, [1,2], c, [2,3], out=d, + np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, dtype='f8', casting='unsafe') assert_equal(d, a.astype('f8').dot(b.astype('f8') ).dot(c.astype('f8')).astype(dtype)) # tensordot(a, b) if np.dtype(dtype) != np.dtype('f2'): - a = np.arange(60, dtype=dtype).reshape(3,4,5) - b = np.arange(24, dtype=dtype).reshape(4,3,2) + a = np.arange(60, dtype=dtype).reshape(3, 4, 5) + b = np.arange(24, dtype=dtype).reshape(4, 3, 2) assert_equal(np.einsum("ijk, jil -> kl", a, b), - np.tensordot(a,b, axes=([1,0],[0,1]))) - assert_equal(np.einsum(a, [0,1,2], b, [1,0,3], [2,3]), - np.tensordot(a,b, axes=([1,0],[0,1]))) + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) - c = np.arange(10, dtype=dtype).reshape(5,2) + c = np.arange(10, dtype=dtype).reshape(5, 2) np.einsum("ijk,jil->kl", a, b, out=c, dtype='f8', casting='unsafe') assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), - axes=([1,0],[0,1])).astype(dtype)) + axes=([1, 0], [0, 1])).astype(dtype)) c[...] = 0 - np.einsum(a, [0,1,2], b, [1,0,3], [2,3], out=c, + np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, dtype='f8', casting='unsafe') assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), - axes=([1,0],[0,1])).astype(dtype)) + axes=([1, 0], [0, 1])).astype(dtype)) # logical_and(logical_and(a!=0, b!=0), c!=0) a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype) b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype) - c = np.array([True,True,False,True,True,False,True,True]) + c = np.array([True, True, False, True, True, False, True, True]) assert_equal(np.einsum("i,i,i->i", a, b, c, dtype='?', casting='unsafe'), np.logical_and(np.logical_and(a!=0, b!=0), c!=0)) @@ -395,20 +395,20 @@ class TestEinSum(TestCase): assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a)) # Various stride0, contiguous, and SSE aligned variants - for n in range(1,25): + for n in range(1, 25): a = np.arange(n, dtype=dtype) if np.dtype(dtype).itemsize > 1: - assert_equal(np.einsum("...,...",a,a), np.multiply(a,a)) - assert_equal(np.einsum("i,i", a, a), np.dot(a,a)) + assert_equal(np.einsum("...,...", a, a), np.multiply(a, a)) + assert_equal(np.einsum("i,i", a, a), np.dot(a, a)) assert_equal(np.einsum("i,->i", a, 2), 2*a) assert_equal(np.einsum(",i->i", 2, a), 2*a) assert_equal(np.einsum("i,->", a, 2), 2*np.sum(a)) assert_equal(np.einsum(",i->", 2, a), 2*np.sum(a)) - assert_equal(np.einsum("...,...",a[1:],a[:-1]), - np.multiply(a[1:],a[:-1])) + assert_equal(np.einsum("...,...", a[1:], a[:-1]), + np.multiply(a[1:], a[:-1])) assert_equal(np.einsum("i,i", a[1:], a[:-1]), - np.dot(a[1:],a[:-1])) + np.dot(a[1:], a[:-1])) assert_equal(np.einsum("i,->i", a[1:], 2), 2*a[1:]) assert_equal(np.einsum(",i->i", 2, a[1:]), 2*a[1:]) assert_equal(np.einsum("i,->", a[1:], 2), 2*np.sum(a[1:])) @@ -427,8 +427,8 @@ class TestEinSum(TestCase): # A case which was failing (ticket #1885) p = np.arange(2) + 1 - q = np.arange(4).reshape(2,2) + 3 - r = np.arange(4).reshape(2,2) + 7 + q = np.arange(4).reshape(2, 2) + 3 + r = np.arange(4).reshape(2, 2) + 7 assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) def test_einsum_sums_int8(self): @@ -479,15 +479,15 @@ class TestEinSum(TestCase): def test_einsum_misc(self): # This call used to crash because of a bug in # PyArray_AssignZero - a = np.ones((1,2)) - b = np.ones((2,2,1)) - assert_equal(np.einsum('ij...,j...->i...',a,b), [[[2],[2]]]) + a = np.ones((1, 2)) + b = np.ones((2, 2, 1)) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) # The iterator had an issue with buffering this reduction a = np.ones((5, 12, 4, 2, 3), np.int64) b = np.ones((5, 12, 11), np.int64) - assert_equal(np.einsum('ijklm,ijn,ijn->',a,b,b), - np.einsum('ijklm,ijn->',a,b)) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), + np.einsum('ijklm,ijn->', a, b)) # Issue #2027, was a problem in the contiguous 3-argument # inner loop implementation diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py index 5de733d4c..efca9ef8a 100644 --- a/numpy/core/tests/test_function_base.py +++ b/numpy/core/tests/test_function_base.py @@ -5,34 +5,33 @@ from numpy import logspace, linspace class TestLogspace(TestCase): def test_basic(self): - y = logspace(0,6) + y = logspace(0, 6) assert_(len(y)==50) - y = logspace(0,6,num=100) + y = logspace(0, 6, num=100) assert_(y[-1] == 10**6) - y = logspace(0,6,endpoint=0) + y = logspace(0, 6, endpoint=0) assert_(y[-1] < 10**6) - y = logspace(0,6,num=7) - assert_array_equal(y,[1,10,100,1e3,1e4,1e5,1e6]) + y = logspace(0, 6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) class TestLinspace(TestCase): def test_basic(self): - y = linspace(0,10) + y = linspace(0, 10) assert_(len(y)==50) - y = linspace(2,10,num=100) + y = linspace(2, 10, num=100) assert_(y[-1] == 10) - y = linspace(2,10,endpoint=0) + y = linspace(2, 10, endpoint=0) assert_(y[-1] < 10) def test_corner(self): - y = list(linspace(0,1,1)) + y = list(linspace(0, 1, 1)) assert_(y == [0.0], y) - y = list(linspace(0,1,2.5)) + y = list(linspace(0, 1, 2.5)) assert_(y == [0.0, 1.0]) def test_type(self): - t1 = linspace(0,1,0).dtype - t2 = linspace(0,1,1).dtype - t3 = linspace(0,1,2).dtype + t1 = linspace(0, 1, 0).dtype + t2 = linspace(0, 1, 1).dtype + t3 = linspace(0, 1, 2).dtype assert_equal(t1, t2) assert_equal(t2, t3) - diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py index 96ca66b10..6ccdbd5de 100644 --- a/numpy/core/tests/test_getlimits.py +++ b/numpy/core/tests/test_getlimits.py @@ -15,31 +15,31 @@ class TestPythonFloat(TestCase): def test_singleton(self): ftype = finfo(float) ftype2 = finfo(float) - assert_equal(id(ftype),id(ftype2)) + assert_equal(id(ftype), id(ftype2)) class TestHalf(TestCase): def test_singleton(self): ftype = finfo(half) ftype2 = finfo(half) - assert_equal(id(ftype),id(ftype2)) + assert_equal(id(ftype), id(ftype2)) class TestSingle(TestCase): def test_singleton(self): ftype = finfo(single) ftype2 = finfo(single) - assert_equal(id(ftype),id(ftype2)) + assert_equal(id(ftype), id(ftype2)) class TestDouble(TestCase): def test_singleton(self): ftype = finfo(double) ftype2 = finfo(double) - assert_equal(id(ftype),id(ftype2)) + assert_equal(id(ftype), id(ftype2)) class TestLongdouble(TestCase): def test_singleton(self,level=2): ftype = finfo(longdouble) ftype2 = finfo(longdouble) - assert_equal(id(ftype),id(ftype2)) + assert_equal(id(ftype), id(ftype2)) class TestIinfo(TestCase): def test_basic(self): diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py index e5f2eaf9b..928db48b7 100644 --- a/numpy/core/tests/test_half.py +++ b/numpy/core/tests/test_half.py @@ -28,8 +28,8 @@ class TestHalf(TestCase): # An array of all non-NaN float16 values, in sorted order self.nonan_f16 = np.concatenate( - (np.arange(0xfc00,0x7fff,-1, dtype=uint16), - np.arange(0x0000,0x7c01,1, dtype=uint16)) + (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), + np.arange(0x0000, 0x7c01, 1, dtype=uint16)) ) self.nonan_f16.dtype = float16 self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) @@ -65,10 +65,10 @@ class TestHalf(TestCase): b.view(dtype=uint16)) # Check the range for which all integers can be represented - i_int = np.arange(-2048,2049) + i_int = np.arange(-2048, 2049) i_f16 = np.array(i_int, dtype=float16) j = np.array(i_f16, dtype=np.int) - assert_equal(i_int,j) + assert_equal(i_int, j) def test_nans_infs(self): with np.errstate(all='ignore'): @@ -223,17 +223,17 @@ class TestHalf(TestCase): assert_equal(a, np.ones((5,), dtype=float16)) # nonzero and copyswap - a = np.array([0,0,-1,-1/1e20,0,2.0**-24, 7.629e-6], dtype=float16) + a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) assert_equal(a.nonzero()[0], - [2,5,6]) + [2, 5, 6]) a = a.byteswap().newbyteorder() assert_equal(a.nonzero()[0], - [2,5,6]) + [2, 5, 6]) # dot a = np.arange(0, 10, 0.5, dtype=float16) b = np.ones((20,), dtype=float16) - assert_equal(np.dot(a,b), + assert_equal(np.dot(a, b), 95) # argmax @@ -247,7 +247,7 @@ class TestHalf(TestCase): # getitem a = np.arange(10, dtype=float16) for i in range(10): - assert_equal(a.item(i),i) + assert_equal(a.item(i), i) def test_spacing_nextafter(self): """Test np.spacing and np.nextafter""" @@ -276,127 +276,127 @@ class TestHalf(TestCase): def test_half_ufuncs(self): """Test the various ufuncs""" - a = np.array([0,1,2,4,2], dtype=float16) - b = np.array([-2,5,1,4,3], dtype=float16) - c = np.array([0,-1,-np.inf,np.nan,6], dtype=float16) - - assert_equal(np.add(a,b), [-2,6,3,8,5]) - assert_equal(np.subtract(a,b), [2,-4,1,0,-1]) - assert_equal(np.multiply(a,b), [0,5,2,16,6]) - assert_equal(np.divide(a,b), [0,0.199951171875,2,1,0.66650390625]) - - assert_equal(np.equal(a,b), [False,False,False,True,False]) - assert_equal(np.not_equal(a,b), [True,True,True,False,True]) - assert_equal(np.less(a,b), [False,True,False,False,True]) - assert_equal(np.less_equal(a,b), [False,True,False,True,True]) - assert_equal(np.greater(a,b), [True,False,True,False,False]) - assert_equal(np.greater_equal(a,b), [True,False,True,True,False]) - assert_equal(np.logical_and(a,b), [False,True,True,True,True]) - assert_equal(np.logical_or(a,b), [True,True,True,True,True]) - assert_equal(np.logical_xor(a,b), [True,False,False,False,False]) - assert_equal(np.logical_not(a), [True,False,False,False,False]) - - assert_equal(np.isnan(c), [False,False,False,True,False]) - assert_equal(np.isinf(c), [False,False,True,False,False]) - assert_equal(np.isfinite(c), [True,True,False,False,True]) - assert_equal(np.signbit(b), [True,False,False,False,False]) - - assert_equal(np.copysign(b,a), [2,5,1,4,3]) - - assert_equal(np.maximum(a,b), [0,5,2,4,3]) - x = np.maximum(b,c) + a = np.array([0, 1, 2, 4, 2], dtype=float16) + b = np.array([-2, 5, 1, 4, 3], dtype=float16) + c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) + + assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) + assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) + assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) + assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) + + assert_equal(np.equal(a, b), [False, False, False, True, False]) + assert_equal(np.not_equal(a, b), [True, True, True, False, True]) + assert_equal(np.less(a, b), [False, True, False, False, True]) + assert_equal(np.less_equal(a, b), [False, True, False, True, True]) + assert_equal(np.greater(a, b), [True, False, True, False, False]) + assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) + assert_equal(np.logical_and(a, b), [False, True, True, True, True]) + assert_equal(np.logical_or(a, b), [True, True, True, True, True]) + assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) + assert_equal(np.logical_not(a), [True, False, False, False, False]) + + assert_equal(np.isnan(c), [False, False, False, True, False]) + assert_equal(np.isinf(c), [False, False, True, False, False]) + assert_equal(np.isfinite(c), [True, True, False, False, True]) + assert_equal(np.signbit(b), [True, False, False, False, False]) + + assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) + + assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) + x = np.maximum(b, c) assert_(np.isnan(x[3])) x[3] = 0 - assert_equal(x, [0,5,1,0,6]) - assert_equal(np.minimum(a,b), [-2,1,1,4,2]) - x = np.minimum(b,c) + assert_equal(x, [0, 5, 1, 0, 6]) + assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) + x = np.minimum(b, c) assert_(np.isnan(x[3])) x[3] = 0 - assert_equal(x, [-2,-1,-np.inf,0,3]) - assert_equal(np.fmax(a,b), [0,5,2,4,3]) - assert_equal(np.fmax(b,c), [0,5,1,4,6]) - assert_equal(np.fmin(a,b), [-2,1,1,4,2]) - assert_equal(np.fmin(b,c), [-2,-1,-np.inf,4,3]) - - assert_equal(np.floor_divide(a,b), [0,0,2,1,0]) - assert_equal(np.remainder(a,b), [0,1,0,0,2]) - assert_equal(np.square(b), [4,25,1,16,9]) - assert_equal(np.reciprocal(b), [-0.5,0.199951171875,1,0.25,0.333251953125]) - assert_equal(np.ones_like(b), [1,1,1,1,1]) + assert_equal(x, [-2, -1, -np.inf, 0, 3]) + assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) + assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) + assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) + assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) + + assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) + assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) + assert_equal(np.square(b), [4, 25, 1, 16, 9]) + assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) + assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) assert_equal(np.conjugate(b), b) - assert_equal(np.absolute(b), [2,5,1,4,3]) - assert_equal(np.negative(b), [2,-5,-1,-4,-3]) - assert_equal(np.sign(b), [-1,1,1,1,1]) - assert_equal(np.modf(b), ([0,0,0,0,0],b)) - assert_equal(np.frexp(b), ([-0.5,0.625,0.5,0.5,0.75],[2,3,1,3,2])) - assert_equal(np.ldexp(b,[0,1,2,4,2]), [-2,10,4,64,12]) + assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) + assert_equal(np.negative(b), [2, -5, -1, -4, -3]) + assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) + assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) + assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) + assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) def test_half_coercion(self): """Test that half gets coerced properly with the other types""" - a16 = np.array((1,),dtype=float16) - a32 = np.array((1,),dtype=float32) + a16 = np.array((1,), dtype=float16) + a32 = np.array((1,), dtype=float32) b16 = float16(1) b32 = float32(1) - assert_equal(np.power(a16,2).dtype, float16) - assert_equal(np.power(a16,2.0).dtype, float16) - assert_equal(np.power(a16,b16).dtype, float16) - assert_equal(np.power(a16,b32).dtype, float16) - assert_equal(np.power(a16,a16).dtype, float16) - assert_equal(np.power(a16,a32).dtype, float32) - - assert_equal(np.power(b16,2).dtype, float64) - assert_equal(np.power(b16,2.0).dtype, float64) - assert_equal(np.power(b16,b16).dtype, float16) - assert_equal(np.power(b16,b32).dtype, float32) - assert_equal(np.power(b16,a16).dtype, float16) - assert_equal(np.power(b16,a32).dtype, float32) - - assert_equal(np.power(a32,a16).dtype, float32) - assert_equal(np.power(a32,b16).dtype, float32) - assert_equal(np.power(b32,a16).dtype, float16) - assert_equal(np.power(b32,b16).dtype, float32) + assert_equal(np.power(a16, 2).dtype, float16) + assert_equal(np.power(a16, 2.0).dtype, float16) + assert_equal(np.power(a16, b16).dtype, float16) + assert_equal(np.power(a16, b32).dtype, float16) + assert_equal(np.power(a16, a16).dtype, float16) + assert_equal(np.power(a16, a32).dtype, float32) + + assert_equal(np.power(b16, 2).dtype, float64) + assert_equal(np.power(b16, 2.0).dtype, float64) + assert_equal(np.power(b16, b16).dtype, float16) + assert_equal(np.power(b16, b32).dtype, float32) + assert_equal(np.power(b16, a16).dtype, float16) + assert_equal(np.power(b16, a32).dtype, float32) + + assert_equal(np.power(a32, a16).dtype, float32) + assert_equal(np.power(a32, b16).dtype, float32) + assert_equal(np.power(b32, a16).dtype, float16) + assert_equal(np.power(b32, b16).dtype, float32) @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") def test_half_fpe(self): with np.errstate(all='raise'): - sx16 = np.array((1e-4,),dtype=float16) - bx16 = np.array((1e4,),dtype=float16) + sx16 = np.array((1e-4,), dtype=float16) + bx16 = np.array((1e4,), dtype=float16) sy16 = float16(1e-4) by16 = float16(1e4) # Underflow errors - assert_raises_fpe('underflow', lambda a,b:a*b, sx16, sx16) - assert_raises_fpe('underflow', lambda a,b:a*b, sx16, sy16) - assert_raises_fpe('underflow', lambda a,b:a*b, sy16, sx16) - assert_raises_fpe('underflow', lambda a,b:a*b, sy16, sy16) - assert_raises_fpe('underflow', lambda a,b:a/b, sx16, bx16) - assert_raises_fpe('underflow', lambda a,b:a/b, sx16, by16) - assert_raises_fpe('underflow', lambda a,b:a/b, sy16, bx16) - assert_raises_fpe('underflow', lambda a,b:a/b, sy16, by16) - assert_raises_fpe('underflow', lambda a,b:a/b, + assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16) + assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16) + assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16) + assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16) + assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16) + assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16) + assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16) + assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16) + assert_raises_fpe('underflow', lambda a, b:a/b, float16(2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a,b:a/b, + assert_raises_fpe('underflow', lambda a, b:a/b, float16(-2.**-14), float16(2**11)) - assert_raises_fpe('underflow', lambda a,b:a/b, + assert_raises_fpe('underflow', lambda a, b:a/b, float16(2.**-14+2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a,b:a/b, + assert_raises_fpe('underflow', lambda a, b:a/b, float16(-2.**-14-2**-24), float16(2)) - assert_raises_fpe('underflow', lambda a,b:a/b, + assert_raises_fpe('underflow', lambda a, b:a/b, float16(2.**-14+2**-23), float16(4)) # Overflow errors - assert_raises_fpe('overflow', lambda a,b:a*b, bx16, bx16) - assert_raises_fpe('overflow', lambda a,b:a*b, bx16, by16) - assert_raises_fpe('overflow', lambda a,b:a*b, by16, bx16) - assert_raises_fpe('overflow', lambda a,b:a*b, by16, by16) - assert_raises_fpe('overflow', lambda a,b:a/b, bx16, sx16) - assert_raises_fpe('overflow', lambda a,b:a/b, bx16, sy16) - assert_raises_fpe('overflow', lambda a,b:a/b, by16, sx16) - assert_raises_fpe('overflow', lambda a,b:a/b, by16, sy16) - assert_raises_fpe('overflow', lambda a,b:a+b, + assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16) + assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16) + assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16) + assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16) + assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16) + assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16) + assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16) + assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16) + assert_raises_fpe('overflow', lambda a, b:a+b, float16(65504), float16(17)) - assert_raises_fpe('overflow', lambda a,b:a-b, + assert_raises_fpe('overflow', lambda a, b:a-b, float16(-65504), float16(17)) assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) diff --git a/numpy/core/tests/test_indexerrors.py b/numpy/core/tests/test_indexerrors.py index 263c505de..e5dc9dbab 100644 --- a/numpy/core/tests/test_indexerrors.py +++ b/numpy/core/tests/test_indexerrors.py @@ -54,34 +54,34 @@ class TestIndexErrors(TestCase): def assign(obj, ind, val): obj[ind] = val - a = np.zeros([1,2,3]) - assert_raises(IndexError, lambda: a[0,5,None,2]) - assert_raises(IndexError, lambda: a[0,5,0,2]) - assert_raises(IndexError, lambda: assign(a, (0,5,None,2), 1)) - assert_raises(IndexError, lambda: assign(a, (0,5,0,2), 1)) + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a[0, 5, None, 2]) + assert_raises(IndexError, lambda: a[0, 5, 0, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1)) + assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1)) - a = np.zeros([1,0,3]) - assert_raises(IndexError, lambda: a[0,0,None,2]) - assert_raises(IndexError, lambda: assign(a, (0,0,None,2), 1)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a[0, 0, None, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1)) - a = np.zeros([1,2,3]) + a = np.zeros([1, 2, 3]) assert_raises(IndexError, lambda: a.flat[10]) assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) - a = np.zeros([1,0,3]) + a = np.zeros([1, 0, 3]) assert_raises(IndexError, lambda: a.flat[10]) assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) - a = np.zeros([1,2,3]) + a = np.zeros([1, 2, 3]) assert_raises(IndexError, lambda: a.flat[np.array(10)]) assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) - a = np.zeros([1,0,3]) + a = np.zeros([1, 0, 3]) assert_raises(IndexError, lambda: a.flat[np.array(10)]) assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) - a = np.zeros([1,2,3]) + a = np.zeros([1, 2, 3]) assert_raises(IndexError, lambda: a.flat[np.array([10])]) assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) - a = np.zeros([1,0,3]) + a = np.zeros([1, 0, 3]) assert_raises(IndexError, lambda: a.flat[np.array([10])]) assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) @@ -94,10 +94,10 @@ class TestIndexErrors(TestCase): a = np.zeros((0, 10)) assert_raises(IndexError, lambda: a[12]) - a = np.zeros((3,5)) + a = np.zeros((3, 5)) assert_raises(IndexError, lambda: a[(10, 20)]) assert_raises(IndexError, lambda: assign(a, (10, 20), 1)) - a = np.zeros((3,0)) + a = np.zeros((3, 0)) assert_raises(IndexError, lambda: a[(1, 0)]) assert_raises(IndexError, lambda: assign(a, (1, 0), 1)) @@ -106,10 +106,10 @@ class TestIndexErrors(TestCase): a = np.zeros((0,)) assert_raises(IndexError, lambda: assign(a, 10, 1)) - a = np.zeros((3,5)) + a = np.zeros((3, 5)) assert_raises(IndexError, lambda: a[(1, [1, 20])]) assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1)) - a = np.zeros((3,0)) + a = np.zeros((3, 0)) assert_raises(IndexError, lambda: a[(1, [0, 1])]) assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1)) diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py index ecd93ec71..89e28af78 100644 --- a/numpy/core/tests/test_indexing.py +++ b/numpy/core/tests/test_indexing.py @@ -39,7 +39,7 @@ class TestIndexing(TestCase): def test_ellipsis_index(self): # Ellipsis index does not create a view a = np.array([[1, 2, 3], - [4 ,5, 6], + [4, 5, 6], [7, 8, 9]]) assert_equal(a[...], a) assert_(a[...] is a) @@ -47,7 +47,7 @@ class TestIndexing(TestCase): # Slicing with ellipsis can skip an # arbitrary number of dimensions assert_equal(a[0, ...], a[0]) - assert_equal(a[0, ...], a[0, :]) + assert_equal(a[0, ...], a[0,:]) assert_equal(a[..., 0], a[:, 0]) # Slicing with ellipsis always results @@ -57,7 +57,7 @@ class TestIndexing(TestCase): def test_single_int_index(self): # Single integer index selects one row a = np.array([[1, 2, 3], - [4 ,5, 6], + [4, 5, 6], [7, 8, 9]]) assert_equal(a[0], [1, 2, 3]) @@ -71,7 +71,7 @@ class TestIndexing(TestCase): def test_single_bool_index(self): # Single boolean index a = np.array([[1, 2, 3], - [4 ,5, 6], + [4, 5, 6], [7, 8, 9]]) # Python boolean converts to integer @@ -108,7 +108,7 @@ class TestIndexing(TestCase): # Indexing a 2-dimensional array with # 2-dimensional boolean array a = np.array([[1, 2, 3], - [4 ,5, 6], + [4, 5, 6], [7, 8, 9]]) b = np.array([[ True, False, True], [False, True, False], @@ -140,8 +140,8 @@ class TestMultiIndexingAutomated(TestCase): will usually not be the same one. They are *not* tested. """ def setUp(self): - self.a = np.arange(np.prod([3,1,5,6])).reshape(3,1,5,6) - self.b = np.empty((3,0,5,6)) + self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + self.b = np.empty((3, 0, 5, 6)) self.complex_indices = ['skip', Ellipsis, 0, # Boolean indices, up to 3-d for some special cases of eating up @@ -154,19 +154,19 @@ class TestMultiIndexingAutomated(TestCase): slice(-5, 5, 2), slice(1, 1, 100), slice(4, -1, -2), - slice(None,None,-3), + slice(None, None, -3), # Some Fancy indexes: - np.empty((0,1,1), dtype=np.intp), # empty broadcastable - np.array([0,1,-2]), - np.array([[2],[0],[1]]), - np.array([[0,-1], [0,1]]), - np.array([2,-1]), + np.empty((0, 1, 1), dtype=np.intp), # empty broadcastable + np.array([0, 1, -2]), + np.array([[2], [0], [1]]), + np.array([[0, -1], [0, 1]]), + np.array([2, -1]), np.zeros([1]*31, dtype=int), # trigger too large array. np.array([0., 1.])] # invalid datatype # Some simpler indices that still cover a bit more self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip'] # Very simple ones to fill the rest: - self.fill_indices = [slice(None,None), 0] + self.fill_indices = [slice(None, None), 0] def _get_multi_index(self, arr, indices): @@ -226,7 +226,7 @@ class TestMultiIndexingAutomated(TestCase): if ellipsis_pos is None: ellipsis_pos = i continue # do not increment ndim counter - in_indices[i] = slice(None,None) + in_indices[i] = slice(None, None) ndim += 1 continue if isinstance(indx, slice): @@ -258,7 +258,7 @@ class TestMultiIndexingAutomated(TestCase): return arr.copy(), no_copy if ellipsis_pos is not None: - in_indices[ellipsis_pos:ellipsis_pos+1] = [slice(None,None)] * (arr.ndim - ndim) + in_indices[ellipsis_pos:ellipsis_pos+1] = [slice(None, None)] * (arr.ndim - ndim) for ax, indx in enumerate(in_indices): if isinstance(indx, slice): @@ -481,8 +481,8 @@ class TestMultiIndexingAutomated(TestCase): # consistency with arr[boolean_array,] also no broadcasting # is done at all self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool),)) - self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[...,0],)) - self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[None,...],)) + self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) def test_multidim(self): @@ -492,7 +492,7 @@ class TestMultiIndexingAutomated(TestCase): # This is so that np.array(True) is not accepted in a full integer # index, when running the file seperatly. warnings.filterwarnings('error', '', DeprecationWarning) - for simple_pos in [0,2,3]: + for simple_pos in [0, 2, 3]: tocheck = [self.fill_indices, self.complex_indices, self.fill_indices, self.fill_indices] tocheck[simple_pos] = self.simple_indices @@ -502,13 +502,13 @@ class TestMultiIndexingAutomated(TestCase): self._check_multi_index(self.b, index) # Check very simple item getting: - self._check_multi_index(self.a, (0,0,0,0)) - self._check_multi_index(self.b, (0,0,0,0)) + self._check_multi_index(self.a, (0, 0, 0, 0)) + self._check_multi_index(self.b, (0, 0, 0, 0)) # Also check (simple cases of) too many indices: - assert_raises(IndexError, self.a.__getitem__, (0,0,0,0,0)) - assert_raises(IndexError, self.a.__setitem__, (0,0,0,0,0), 0) - assert_raises(IndexError, self.a.__getitem__, (0,0,[1],0,0)) - assert_raises(IndexError, self.a.__setitem__, (0,0,[1],0,0), 0) + assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) def test_1d(self): diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py index 425b1d893..e501588c9 100644 --- a/numpy/core/tests/test_item_selection.py +++ b/numpy/core/tests/test_item_selection.py @@ -8,12 +8,12 @@ import sys, warnings class TestTake(TestCase): def test_simple(self): a = [[1, 2], [3, 4]] - a_str = [[b'1', b'2'],[b'3', b'4']] + a_str = [[b'1', b'2'], [b'3', b'4']] modes = ['raise', 'wrap', 'clip'] indices = [-1, 4] index_arrays = [np.empty(0, dtype=np.intp), np.empty(tuple(), dtype=np.intp), - np.empty((1,1), dtype=np.intp)] + np.empty((1, 1), dtype=np.intp)] real_indices = {} real_indices['raise'] = {-1:1, 4:IndexError} real_indices['wrap'] = {-1:1, 4:0} @@ -59,7 +59,7 @@ class TestTake(TestCase): a.take(b, out=a[:6]) del a assert_(all(sys.getrefcount(o) == 3 for o in objects)) - + if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py index 97afb68a2..6de6319ef 100644 --- a/numpy/core/tests/test_memmap.py +++ b/numpy/core/tests/test_memmap.py @@ -11,7 +11,7 @@ from numpy.testing import * class TestMemmap(TestCase): def setUp(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') - self.shape = (3,4) + self.shape = (3, 4) self.dtype = 'float32' self.data = arange(12, dtype=self.dtype) self.data.resize(self.shape) @@ -33,7 +33,7 @@ class TestMemmap(TestCase): assert_array_equal(self.data, newfp) def test_open_with_filename(self): - tmpname = mktemp('','mmap') + tmpname = mktemp('', 'mmap') fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) fp[:] = self.data[:] @@ -55,7 +55,7 @@ class TestMemmap(TestCase): del fp def test_filename(self): - tmpname = mktemp('','mmap') + tmpname = mktemp('', 'mmap') fp = memmap(tmpname, dtype=self.dtype, mode='w+', shape=self.shape) abspath = os.path.abspath(tmpname) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index cdb278dfc..12d1e8ea9 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -63,16 +63,16 @@ class TestFlags(TestCase): class TestAttributes(TestCase): def setUp(self): self.one = arange(10) - self.two = arange(20).reshape(4,5) - self.three = arange(60,dtype=float64).reshape(2,5,6) + self.two = arange(20).reshape(4, 5) + self.three = arange(60, dtype=float64).reshape(2, 5, 6) def test_attributes(self): assert_equal(self.one.shape, (10,)) - assert_equal(self.two.shape, (4,5)) - assert_equal(self.three.shape, (2,5,6)) - self.three.shape = (10,3,2) - assert_equal(self.three.shape, (10,3,2)) - self.three.shape = (2,5,6) + assert_equal(self.two.shape, (4, 5)) + assert_equal(self.three.shape, (2, 5, 6)) + self.three.shape = (10, 3, 2) + assert_equal(self.three.shape, (10, 3, 2)) + self.three.shape = (2, 5, 6) assert_equal(self.one.strides, (self.one.itemsize,)) num = self.two.itemsize assert_equal(self.two.strides, (5*num, num)) @@ -137,13 +137,13 @@ class TestAttributes(TestCase): r.strides = strides=strides*x.itemsize return r assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1])) - assert_equal(make_array(7,3,1), array([3, 4, 5, 6, 7, 8, 9])) + assert_equal(make_array(7, 3, 1), array([3, 4, 5, 6, 7, 8, 9])) self.assertRaises(ValueError, make_array, 4, 4, -2) self.assertRaises(ValueError, make_array, 4, 2, -1) self.assertRaises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. - x = np.lib.stride_tricks.as_strided(arange(1), (10,10), (0,0)) + x = np.lib.stride_tricks.as_strided(arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): arr.strides = strides self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) @@ -158,15 +158,15 @@ class TestAttributes(TestCase): def test_fill(self): for t in "?bhilqpBHILQPfdgFDGO": - x = empty((3,2,1), t) - y = empty((3,2,1), t) + x = empty((3, 2, 1), t) + y = empty((3, 2, 1), t) x.fill(1) y[...] = 1 - assert_equal(x,y) + assert_equal(x, y) def test_fill_struct_array(self): # Filling from a scalar - x = array([(0,0.0), (1,1.0)], dtype='i4,f8') + x = array([(0, 0.0), (1, 1.0)], dtype='i4,f8') x.fill(x[0]) assert_equal(x['f1'][1], x['f1'][0]) # Filling from a tuple that can be converted @@ -178,13 +178,13 @@ class TestAttributes(TestCase): class TestAssignment(TestCase): def test_assignment_broadcasting(self): - a = np.arange(6).reshape(2,3) + a = np.arange(6).reshape(2, 3) # Broadcasting the input to the output a[...] = np.arange(3) - assert_equal(a, [[0,1,2],[0,1,2]]) - a[...] = np.arange(2).reshape(2,1) - assert_equal(a, [[0,0,0],[1,1,1]]) + assert_equal(a, [[0, 1, 2], [0, 1, 2]]) + a[...] = np.arange(2).reshape(2, 1) + assert_equal(a, [[0, 0, 0], [1, 1, 1]]) # For compatibility with <= 1.5, a limited version of broadcasting # the output to the input. @@ -194,12 +194,12 @@ class TestAssignment(TestCase): # rules (adding a new "1" dimension to the left of the shape), # applied to the output instead of an input. In NumPy 2.0, this kind # of broadcasting assignment will likely be disallowed. - a[...] = np.arange(6)[::-1].reshape(1,2,3) - assert_equal(a, [[5,4,3],[2,1,0]]) + a[...] = np.arange(6)[::-1].reshape(1, 2, 3) + assert_equal(a, [[5, 4, 3], [2, 1, 0]]) # The other type of broadcasting would require a reduction operation. - def assign(a,b): + def assign(a, b): a[...] = b - assert_raises(ValueError, assign, a, np.arange(12).reshape(2,2,3)) + assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) class TestDtypedescr(TestCase): def test_construction(self): @@ -213,42 +213,42 @@ class TestZeroRank(TestCase): self.d = array(0), array('x', object) def test_ellipsis_subscript(self): - a,b = self.d + a, b = self.d self.assertEqual(a[...], 0) self.assertEqual(b[...], 'x') self.assertTrue(a[...] is a) self.assertTrue(b[...] is b) def test_empty_subscript(self): - a,b = self.d + a, b = self.d self.assertEqual(a[()], 0) self.assertEqual(b[()], 'x') self.assertTrue(type(a[()]) is a.dtype.type) self.assertTrue(type(b[()]) is str) def test_invalid_subscript(self): - a,b = self.d + a, b = self.d self.assertRaises(IndexError, lambda x: x[0], a) self.assertRaises(IndexError, lambda x: x[0], b) self.assertRaises(IndexError, lambda x: x[array([], int)], a) self.assertRaises(IndexError, lambda x: x[array([], int)], b) def test_ellipsis_subscript_assignment(self): - a,b = self.d + a, b = self.d a[...] = 42 self.assertEqual(a, 42) b[...] = '' self.assertEqual(b.item(), '') def test_empty_subscript_assignment(self): - a,b = self.d + a, b = self.d a[()] = 42 self.assertEqual(a, 42) b[()] = '' self.assertEqual(b.item(), '') def test_invalid_subscript_assignment(self): - a,b = self.d + a, b = self.d def assign(x, i, v): x[i] = v self.assertRaises(IndexError, assign, a, 0, 42) @@ -256,18 +256,18 @@ class TestZeroRank(TestCase): self.assertRaises(ValueError, assign, a, (), '') def test_newaxis(self): - a,b = self.d + a, b = self.d self.assertEqual(a[newaxis].shape, (1,)) self.assertEqual(a[..., newaxis].shape, (1,)) self.assertEqual(a[newaxis, ...].shape, (1,)) self.assertEqual(a[..., newaxis].shape, (1,)) - self.assertEqual(a[newaxis, ..., newaxis].shape, (1,1)) - self.assertEqual(a[..., newaxis, newaxis].shape, (1,1)) - self.assertEqual(a[newaxis, newaxis, ...].shape, (1,1)) + self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1)) + self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1)) + self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1)) self.assertEqual(a[(newaxis,)*10].shape, (1,)*10) def test_invalid_newaxis(self): - a,b = self.d + a, b = self.d def subscript(x, i): x[i] self.assertRaises(IndexError, subscript, a, (newaxis, 0)) self.assertRaises(IndexError, subscript, a, (newaxis,)*50) @@ -276,7 +276,7 @@ class TestZeroRank(TestCase): x = ndarray(()) x[()] = 5 self.assertEqual(x[()], 5) - y = ndarray((),buffer=x) + y = ndarray((), buffer=x) y[()] = 6 self.assertEqual(x[()], 6) @@ -287,17 +287,17 @@ class TestZeroRank(TestCase): class TestScalarIndexing(TestCase): def setUp(self): - self.d = array([0,1])[0] + self.d = array([0, 1])[0] def test_ellipsis_subscript(self): a = self.d self.assertEqual(a[...], 0) - self.assertEqual(a[...].shape,()) + self.assertEqual(a[...].shape, ()) def test_empty_subscript(self): a = self.d self.assertEqual(a[()], 0) - self.assertEqual(a[()].shape,()) + self.assertEqual(a[()].shape, ()) def test_invalid_subscript(self): a = self.d @@ -316,9 +316,9 @@ class TestScalarIndexing(TestCase): self.assertEqual(a[..., newaxis].shape, (1,)) self.assertEqual(a[newaxis, ...].shape, (1,)) self.assertEqual(a[..., newaxis].shape, (1,)) - self.assertEqual(a[newaxis, ..., newaxis].shape, (1,1)) - self.assertEqual(a[..., newaxis, newaxis].shape, (1,1)) - self.assertEqual(a[newaxis, newaxis, ...].shape, (1,1)) + self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1)) + self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1)) + self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1)) self.assertEqual(a[(newaxis,)*10].shape, (1,)*10) def test_invalid_newaxis(self): @@ -331,49 +331,49 @@ class TestScalarIndexing(TestCase): # With positive strides a = np.arange(4) a[:-1] = a[1:] - assert_equal(a, [1,2,3,3]) + assert_equal(a, [1, 2, 3, 3]) a = np.arange(4) a[1:] = a[:-1] - assert_equal(a, [0,0,1,2]) + assert_equal(a, [0, 0, 1, 2]) # With positive and negative strides a = np.arange(4) a[:] = a[::-1] - assert_equal(a, [3,2,1,0]) + assert_equal(a, [3, 2, 1, 0]) - a = np.arange(6).reshape(2,3) - a[::-1,:] = a[:,::-1] - assert_equal(a, [[5,4,3],[2,1,0]]) + a = np.arange(6).reshape(2, 3) + a[::-1,:] = a[:, ::-1] + assert_equal(a, [[5, 4, 3], [2, 1, 0]]) - a = np.arange(6).reshape(2,3) - a[::-1,::-1] = a[:,::-1] - assert_equal(a, [[3,4,5],[0,1,2]]) + a = np.arange(6).reshape(2, 3) + a[::-1, ::-1] = a[:, ::-1] + assert_equal(a, [[3, 4, 5], [0, 1, 2]]) # With just one element overlapping a = np.arange(5) a[:3] = a[2:] - assert_equal(a, [2,3,4,3,4]) + assert_equal(a, [2, 3, 4, 3, 4]) a = np.arange(5) a[2:] = a[:3] - assert_equal(a, [0,1,0,1,2]) + assert_equal(a, [0, 1, 0, 1, 2]) a = np.arange(5) a[2::-1] = a[2:] - assert_equal(a, [4,3,2,3,4]) + assert_equal(a, [4, 3, 2, 3, 4]) a = np.arange(5) a[2:] = a[2::-1] - assert_equal(a, [0,1,2,1,0]) + assert_equal(a, [0, 1, 2, 1, 0]) a = np.arange(5) a[2::-1] = a[:1:-1] - assert_equal(a, [2,3,4,3,4]) + assert_equal(a, [2, 3, 4, 3, 4]) a = np.arange(5) a[:1:-1] = a[2::-1] - assert_equal(a, [0,1,0,1,2]) + assert_equal(a, [0, 1, 0, 1, 2]) class TestCreation(TestCase): def test_from_attribute(self): @@ -384,7 +384,7 @@ class TestCreation(TestCase): def test_from_string(self) : types = np.typecodes['AllInteger'] + np.typecodes['Float'] - nstr = ['123','123'] + nstr = ['123', '123'] result = array([123, 123], dtype=int) for type in types : msg = 'String conversion for %s' % type @@ -459,52 +459,52 @@ class TestStructured(TestCase): # Check that comparisons between record arrays with # multi-dimensional field types work properly a = np.rec.fromrecords( - [([1,2,3],'a', [[1,2],[3,4]]),([3,3,3],'b',[[0,0],[0,0]])], - dtype=[('a', ('f4',3)), ('b', np.object), ('c', ('i4',(2,2)))]) + [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], + dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))]) b = a.copy() - assert_equal(a==b, [True,True]) - assert_equal(a!=b, [False,False]) + assert_equal(a==b, [True, True]) + assert_equal(a!=b, [False, False]) b[1].b = 'c' - assert_equal(a==b, [True,False]) - assert_equal(a!=b, [False,True]) + assert_equal(a==b, [True, False]) + assert_equal(a!=b, [False, True]) for i in range(3): b[0].a = a[0].a b[0].a[i] = 5 - assert_equal(a==b, [False,False]) - assert_equal(a!=b, [True,True]) + assert_equal(a==b, [False, False]) + assert_equal(a!=b, [True, True]) for i in range(2): for j in range(2): b = a.copy() - b[0].c[i,j] = 10 - assert_equal(a==b, [False,True]) - assert_equal(a!=b, [True,False]) + b[0].c[i, j] = 10 + assert_equal(a==b, [False, True]) + assert_equal(a!=b, [True, False]) # Check that broadcasting with a subarray works - a = np.array([[(0,)],[(1,)]],dtype=[('a','f8')]) - b = np.array([(0,),(0,),(1,)],dtype=[('a','f8')]) + a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) + b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) assert_equal(a==b, [[True, True, False], [False, False, True]]) assert_equal(b==a, [[True, True, False], [False, False, True]]) - a = np.array([[(0,)],[(1,)]],dtype=[('a','f8',(1,))]) - b = np.array([(0,),(0,),(1,)],dtype=[('a','f8',(1,))]) + a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) + b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) assert_equal(a==b, [[True, True, False], [False, False, True]]) assert_equal(b==a, [[True, True, False], [False, False, True]]) - a = np.array([[([0,0],)],[([1,1],)]],dtype=[('a','f8',(2,))]) - b = np.array([([0,0],),([0,1],),([1,1],)],dtype=[('a','f8',(2,))]) + a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) + b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a==b, [[True, False, False], [False, False, True]]) assert_equal(b==a, [[True, False, False], [False, False, True]]) # Check that broadcasting Fortran-style arrays with a subarray work - a = np.array([[([0,0],)],[([1,1],)]],dtype=[('a','f8',(2,))], order='F') - b = np.array([([0,0],),([0,1],),([1,1],)],dtype=[('a','f8',(2,))]) + a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') + b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a==b, [[True, False, False], [False, False, True]]) assert_equal(b==a, [[True, False, False], [False, False, True]]) # Check that incompatible sub-array shapes don't result to broadcasting - x = np.zeros((1,), dtype=[('a', ('f4', (1,2))), ('b', 'i1')]) + x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) assert_equal(x == y, False) - x = np.zeros((1,), dtype=[('a', ('f4', (2,1))), ('b', 'i1')]) + x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) assert_equal(x == y, False) @@ -523,17 +523,17 @@ class TestBool(TestCase): class TestMethods(TestCase): def test_test_round(self): - assert_equal(array([1.2,1.5]).round(), [1,2]) + assert_equal(array([1.2, 1.5]).round(), [1, 2]) assert_equal(array(1.5).round(), 2) - assert_equal(array([12.2,15.5]).round(-1), [10,20]) - assert_equal(array([12.15,15.51]).round(1), [12.2,15.5]) + assert_equal(array([12.2, 15.5]).round(-1), [10, 20]) + assert_equal(array([12.15, 15.51]).round(1), [12.2, 15.5]) def test_transpose(self): - a = array([[1,2],[3,4]]) - assert_equal(a.transpose(), [[1,3],[2,4]]) + a = array([[1, 2], [3, 4]]) + assert_equal(a.transpose(), [[1, 3], [2, 4]]) self.assertRaises(ValueError, lambda: a.transpose(0)) - self.assertRaises(ValueError, lambda: a.transpose(0,0)) - self.assertRaises(ValueError, lambda: a.transpose(0,1,2)) + self.assertRaises(ValueError, lambda: a.transpose(0, 0)) + self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2)) def test_sort(self): # test ordering for floats and complex containing nans. It is only @@ -561,7 +561,7 @@ class TestMethods(TestCase): # sort for small arrays. a = np.arange(101) b = a[::-1].copy() - for kind in ['q','m','h'] : + for kind in ['q', 'm', 'h'] : msg = "scalar sort, kind=%s" % kind c = a.copy(); c.sort(kind=kind) @@ -574,7 +574,7 @@ class TestMethods(TestCase): # but the compare fuction differs. ai = a*1j + 1 bi = b*1j + 1 - for kind in ['q','m','h'] : + for kind in ['q', 'm', 'h'] : msg = "complex sort, real part == 1, kind=%s" % kind c = ai.copy(); c.sort(kind=kind) @@ -584,7 +584,7 @@ class TestMethods(TestCase): assert_equal(c, ai, msg) ai = a + 1j bi = b + 1j - for kind in ['q','m','h'] : + for kind in ['q', 'm', 'h'] : msg = "complex sort, imag part == 1, kind=%s" % kind c = ai.copy(); c.sort(kind=kind) @@ -633,8 +633,8 @@ class TestMethods(TestCase): assert_equal(c, a, msg) # test record array sorts. - dt = np.dtype([('f',float),('i',int)]) - a = array([(i,i) for i in range(101)], dtype = dt) + dt = np.dtype([('f', float), ('i', int)]) + a = array([(i, i) for i in range(101)], dtype = dt) b = a[::-1] for kind in ['q', 'h', 'm'] : msg = "object sort, kind=%s" % kind @@ -671,9 +671,9 @@ class TestMethods(TestCase): # check axis handling. This should be the same for all type # specific sorts, so we only check it for one type and one kind - a = np.array([[3,2],[1,0]]) - b = np.array([[1,0],[3,2]]) - c = np.array([[2,3],[0,1]]) + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 0], [3, 2]]) + c = np.array([[2, 3], [0, 1]]) d = a.copy() d.sort(axis=0) assert_equal(d, b, "test sort with axis=0") @@ -711,32 +711,32 @@ class TestMethods(TestCase): def test_sort_order(self): # Test sorting an array with fields - x1=np.array([21,32,14]) - x2=np.array(['my','first','name']) - x3=np.array([3.1,4.5,6.2]) - r=np.rec.fromarrays([x1,x2,x3],names='id,word,number') + x1=np.array([21, 32, 14]) + x2=np.array(['my', 'first', 'name']) + x3=np.array([3.1, 4.5, 6.2]) + r=np.rec.fromarrays([x1, x2, x3], names='id,word,number') r.sort(order=['id']) - assert_equal(r.id, array([14,21,32])) - assert_equal(r.word, array(['name','my','first'])) - assert_equal(r.number, array([6.2,3.1,4.5])) + assert_equal(r.id, array([14, 21, 32])) + assert_equal(r.word, array(['name', 'my', 'first'])) + assert_equal(r.number, array([6.2, 3.1, 4.5])) r.sort(order=['word']) - assert_equal(r.id, array([32,21,14])) - assert_equal(r.word, array(['first','my','name'])) - assert_equal(r.number, array([4.5,3.1,6.2])) + assert_equal(r.id, array([32, 21, 14])) + assert_equal(r.word, array(['first', 'my', 'name'])) + assert_equal(r.number, array([4.5, 3.1, 6.2])) r.sort(order=['number']) - assert_equal(r.id, array([21,32,14])) - assert_equal(r.word, array(['my','first','name'])) - assert_equal(r.number, array([3.1,4.5,6.2])) + assert_equal(r.id, array([21, 32, 14])) + assert_equal(r.word, array(['my', 'first', 'name'])) + assert_equal(r.number, array([3.1, 4.5, 6.2])) if sys.byteorder == 'little': strtype = '>i2' else: strtype = '<i2' - mydtype = [('name', strchar + '5'),('col2',strtype)] - r = np.array([('a', 1),('b', 255), ('c', 3), ('d', 258)], + mydtype = [('name', strchar + '5'), ('col2', strtype)] + r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)], dtype= mydtype) r.sort(order='col2') assert_equal(r['col2'], [1, 3, 255, 258]) @@ -751,7 +751,7 @@ class TestMethods(TestCase): # sort for small arrays. a = np.arange(101) b = a[::-1].copy() - for kind in ['q','m','h'] : + for kind in ['q', 'm', 'h'] : msg = "scalar argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), a, msg) assert_equal(b.copy().argsort(kind=kind), b, msg) @@ -760,13 +760,13 @@ class TestMethods(TestCase): # but the compare fuction differs. ai = a*1j + 1 bi = b*1j + 1 - for kind in ['q','m','h'] : + for kind in ['q', 'm', 'h'] : msg = "complex argsort, kind=%s" % kind assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) ai = a + 1j bi = b + 1j - for kind in ['q','m','h'] : + for kind in ['q', 'm', 'h'] : msg = "complex argsort, kind=%s" % kind assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) @@ -805,8 +805,8 @@ class TestMethods(TestCase): assert_equal(b.copy().argsort(kind=kind), rr, msg) # test structured array argsorts. - dt = np.dtype([('f',float),('i',int)]) - a = array([(i,i) for i in range(101)], dtype = dt) + dt = np.dtype([('f', float), ('i', int)]) + a = array([(i, i) for i in range(101)], dtype = dt) b = a[::-1] r = np.arange(101) rr = r[::-1] @@ -838,9 +838,9 @@ class TestMethods(TestCase): # check axis handling. This should be the same for all type # specific argsorts, so we only check it for one type and one kind - a = np.array([[3,2],[1,0]]) - b = np.array([[1,1],[0,0]]) - c = np.array([[1,0],[1,0]]) + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 1], [0, 0]]) + c = np.array([[1, 0], [1, 0]]) assert_equal(a.copy().argsort(axis=0), b) assert_equal(a.copy().argsort(axis=1), c) assert_equal(a.copy().argsort(), c) @@ -876,7 +876,7 @@ class TestMethods(TestCase): assert_equal(b, np.arange(3), msg) msg = "Test real searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') - assert_equal(b, np.arange(1,4), msg) + assert_equal(b, np.arange(1, 4), msg) # check double complex a = np.zeros(9, dtype=np.complex128) a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] @@ -886,14 +886,14 @@ class TestMethods(TestCase): assert_equal(b, np.arange(9), msg) msg = "Test complex searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') - assert_equal(b, np.arange(1,10), msg) + assert_equal(b, np.arange(1, 10), msg) msg = "Test searchsorted with little endian, side='l'" - a = np.array([0,128],dtype='<i4') - b = a.searchsorted(np.array(128,dtype='<i4')) + a = np.array([0, 128], dtype='<i4') + b = a.searchsorted(np.array(128, dtype='<i4')) assert_equal(b, 1, msg) msg = "Test searchsorted with big endian, side='l'" - a = np.array([0,128],dtype='>i4') - b = a.searchsorted(np.array(128,dtype='>i4')) + a = np.array([0, 128], dtype='>i4') + b = a.searchsorted(np.array(128, dtype='>i4')) assert_equal(b, 1, msg) # Check 0 elements @@ -959,17 +959,17 @@ class TestMethods(TestCase): assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1) def test_searchsorted_with_sorter(self): - a = np.array([5,2,1,3,4]) + a = np.array([5, 2, 1, 3, 4]) s = np.argsort(a) - assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1,(2,3))) + assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3))) assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1]) - assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1,2,3,4]) - assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1,2,3,4,5,6]) + assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4]) + assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6]) # bounds check - assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0,1,2,3,5]) - assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1,0,1,2,3]) - assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4,0,-1,2,3]) + assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5]) + assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3]) + assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3]) a = np.random.rand(300) s = a.argsort() @@ -1156,23 +1156,23 @@ class TestMethods(TestCase): aae(p, d[np.argpartition(d, i, kind=k)]) p = np.partition(d1, i, axis=1, kind=k) - aae(p[:,i], np.array([i] * d1.shape[0], dtype=dt)) + aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:, :i].T <= p[:, i]).all(), msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T)) at((p[:, i + 1:].T > p[:, i]).all(), msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) - aae(p, d1[np.arange(d1.shape[0])[:,None], + aae(p, d1[np.arange(d1.shape[0])[:, None], np.argpartition(d1, i, axis=1, kind=k)]) p = np.partition(d0, i, axis=0, kind=k) - aae(p[i, :], np.array([i] * d1.shape[0], + aae(p[i,:], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right - at((p[:i, :] <= p[i, :]).all(), - msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) - at((p[i + 1:, :] > p[i, :]).all(), - msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) + at((p[:i,:] <= p[i,:]).all(), + msg="%d: %r <= %r" % (i, p[i,:], p[:i,:])) + at((p[i + 1:,:] > p[i,:]).all(), + msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:])) aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), np.arange(d0.shape[1])[None,:]]) @@ -1253,11 +1253,11 @@ class TestMethods(TestCase): kth = (1, 6, 7, -1) p = np.partition(d1, kth, axis=1) - pa = d1[np.arange(d1.shape[0])[:,None], + pa = d1[np.arange(d1.shape[0])[:, None], d1.argpartition(kth, axis=1)] assert_array_equal(p, pa) for i in range(d1.shape[0]): - self.assert_partitioned(p[i, :], kth) + self.assert_partitioned(p[i,:], kth) p = np.partition(d0, kth, axis=0) pa = d0[np.argpartition(d0, kth, axis=0), np.arange(d0.shape[1])[None,:]] @@ -1293,12 +1293,12 @@ class TestMethods(TestCase): def test_flatten(self): - x0 = np.array([[1,2,3],[4,5,6]], np.int32) - x1 = np.array([[[1,2],[3,4]],[[5,6],[7,8]]], np.int32) - y0 = np.array([1,2,3,4,5,6], np.int32) - y0f = np.array([1,4,2,5,3,6], np.int32) - y1 = np.array([1,2,3,4,5,6,7,8], np.int32) - y1f = np.array([1,5,3,7,2,6,4,8], np.int32) + x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32) + x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32) + y0 = np.array([1, 2, 3, 4, 5, 6], np.int32) + y0f = np.array([1, 4, 2, 5, 3, 6], np.int32) + y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32) + y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32) assert_equal(x0.flatten(), y0) assert_equal(x0.flatten('F'), y0f) assert_equal(x0.flatten('F'), x0.T.flatten()) @@ -1314,15 +1314,15 @@ class TestMethods(TestCase): assert_equal(np.dot(a, b), a.dot(b)) assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c)) - # test passing in an output array + # test passing in an output array c = np.zeros_like(a) - a.dot(b,c) - assert_equal(c, np.dot(a,b)) - + a.dot(b, c) + assert_equal(c, np.dot(a, b)) + # test keyword args c = np.zeros_like(a) - a.dot(b=b,out=c) - assert_equal(c, np.dot(a,b)) + a.dot(b=b, out=c) + assert_equal(c, np.dot(a, b)) def test_diagonal(self): a = np.arange(12).reshape((3, 4)) @@ -1470,39 +1470,39 @@ class TestMethods(TestCase): assert_(sys.getrefcount(a) < 50) def test_ravel(self): - a = np.array([[0,1],[2,3]]) - assert_equal(a.ravel(), [0,1,2,3]) + a = np.array([[0, 1], [2, 3]]) + assert_equal(a.ravel(), [0, 1, 2, 3]) assert_(not a.ravel().flags.owndata) - assert_equal(a.ravel('F'), [0,2,1,3]) - assert_equal(a.ravel(order='C'), [0,1,2,3]) - assert_equal(a.ravel(order='F'), [0,2,1,3]) - assert_equal(a.ravel(order='A'), [0,1,2,3]) + assert_equal(a.ravel('F'), [0, 2, 1, 3]) + assert_equal(a.ravel(order='C'), [0, 1, 2, 3]) + assert_equal(a.ravel(order='F'), [0, 2, 1, 3]) + assert_equal(a.ravel(order='A'), [0, 1, 2, 3]) assert_(not a.ravel(order='A').flags.owndata) - assert_equal(a.ravel(order='K'), [0,1,2,3]) + assert_equal(a.ravel(order='K'), [0, 1, 2, 3]) assert_(not a.ravel(order='K').flags.owndata) assert_equal(a.ravel(), a.reshape(-1)) - a = np.array([[0,1],[2,3]], order='F') - assert_equal(a.ravel(), [0,1,2,3]) - assert_equal(a.ravel(order='A'), [0,2,1,3]) - assert_equal(a.ravel(order='K'), [0,2,1,3]) + a = np.array([[0, 1], [2, 3]], order='F') + assert_equal(a.ravel(), [0, 1, 2, 3]) + assert_equal(a.ravel(order='A'), [0, 2, 1, 3]) + assert_equal(a.ravel(order='K'), [0, 2, 1, 3]) assert_(not a.ravel(order='A').flags.owndata) assert_(not a.ravel(order='K').flags.owndata) assert_equal(a.ravel(), a.reshape(-1)) assert_equal(a.ravel(order='A'), a.reshape(-1, order='A')) - a = np.array([[0,1],[2,3]])[::-1,:] - assert_equal(a.ravel(), [2,3,0,1]) - assert_equal(a.ravel(order='C'), [2,3,0,1]) - assert_equal(a.ravel(order='F'), [2,0,3,1]) - assert_equal(a.ravel(order='A'), [2,3,0,1]) + a = np.array([[0, 1], [2, 3]])[::-1,:] + assert_equal(a.ravel(), [2, 3, 0, 1]) + assert_equal(a.ravel(order='C'), [2, 3, 0, 1]) + assert_equal(a.ravel(order='F'), [2, 0, 3, 1]) + assert_equal(a.ravel(order='A'), [2, 3, 0, 1]) # 'K' doesn't reverse the axes of negative strides - assert_equal(a.ravel(order='K'), [2,3,0,1]) + assert_equal(a.ravel(order='K'), [2, 3, 0, 1]) assert_(a.ravel(order='K').flags.owndata) class TestSubscripting(TestCase): def test_test_zero_rank(self): - x = array([1,2,3]) + x = array([1, 2, 3]) self.assertTrue(isinstance(x[0], np.int_)) if sys.version_info[0] < 3: self.assertTrue(isinstance(x[0], int)) @@ -1512,11 +1512,11 @@ class TestSubscripting(TestCase): class TestPickling(TestCase): def test_roundtrip(self): import pickle - carray = array([[2,9],[7,0],[3,8]]) + carray = array([[2, 9], [7, 0], [3, 8]]) DATA = [ carray, transpose(carray), - array([('xxx', 1, 2.0)], dtype=[('a', (str,3)), ('b', int), + array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), ('c', float)]) ] @@ -1533,7 +1533,7 @@ class TestPickling(TestCase): # version 0 doesn't have a version field def test_version0_int8(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' - a = array([1,2,3,4], dtype=int8) + a = array([1, 2, 3, 4], dtype=int8) p = self._loads(asbytes(s)) assert_equal(a, p) @@ -1552,7 +1552,7 @@ class TestPickling(TestCase): # version 1 pickles, using protocol=2 to pickle def test_version1_int8(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' - a = array([1,2,3,4], dtype=int8) + a = array([1, 2, 3, 4], dtype=int8) p = self._loads(asbytes(s)) assert_equal(a, p) @@ -1570,78 +1570,78 @@ class TestPickling(TestCase): def test_subarray_int_shape(self): s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb." - a = np.array([(1,(1,2))], dtype=[('a', 'i1', (2,2)), ('b', 'i1', 2)]) + a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)]) p = self._loads(asbytes(s)) assert_equal(a, p) class TestFancyIndexing(TestCase): def test_list(self): - x = ones((1,1)) - x[:,[0]] = 2.0 + x = ones((1, 1)) + x[:, [0]] = 2.0 assert_array_equal(x, array([[2.0]])) - x = ones((1,1,1)) - x[:,:,[0]] = 2.0 + x = ones((1, 1, 1)) + x[:,:, [0]] = 2.0 assert_array_equal(x, array([[[2.0]]])) def test_tuple(self): - x = ones((1,1)) - x[:,(0,)] = 2.0 + x = ones((1, 1)) + x[:, (0,)] = 2.0 assert_array_equal(x, array([[2.0]])) - x = ones((1,1,1)) - x[:,:,(0,)] = 2.0 + x = ones((1, 1, 1)) + x[:,:, (0,)] = 2.0 assert_array_equal(x, array([[[2.0]]])) def test_mask(self): - x = array([1,2,3,4]) - m = array([0,1],bool) + x = array([1, 2, 3, 4]) + m = array([0, 1], bool) assert_array_equal(x[m], array([2])) def test_mask2(self): - x = array([[1,2,3,4],[5,6,7,8]]) - m = array([0,1],bool) - m2 = array([[0,1],[1,0]], bool) - m3 = array([[0,1]], bool) - assert_array_equal(x[m], array([[5,6,7,8]])) - assert_array_equal(x[m2], array([2,5])) + x = array([[1, 2, 3, 4], [5, 6, 7, 8]]) + m = array([0, 1], bool) + m2 = array([[0, 1], [1, 0]], bool) + m3 = array([[0, 1]], bool) + assert_array_equal(x[m], array([[5, 6, 7, 8]])) + assert_array_equal(x[m2], array([2, 5])) assert_array_equal(x[m3], array([2])) def test_assign_mask(self): - x = array([1,2,3,4]) - m = array([0,1],bool) + x = array([1, 2, 3, 4]) + m = array([0, 1], bool) x[m] = 5 - assert_array_equal(x, array([1,5,3,4])) + assert_array_equal(x, array([1, 5, 3, 4])) def test_assign_mask2(self): - xorig = array([[1,2,3,4],[5,6,7,8]]) - m = array([0,1],bool) - m2 = array([[0,1],[1,0]],bool) - m3 = array([[0,1]], bool) + xorig = array([[1, 2, 3, 4], [5, 6, 7, 8]]) + m = array([0, 1], bool) + m2 = array([[0, 1], [1, 0]], bool) + m3 = array([[0, 1]], bool) x = xorig.copy() x[m] = 10 - assert_array_equal(x, array([[1,2,3,4],[10,10,10,10]])) + assert_array_equal(x, array([[1, 2, 3, 4], [10, 10, 10, 10]])) x = xorig.copy() x[m2] = 10 - assert_array_equal(x, array([[1,10,3,4],[10,6,7,8]])) + assert_array_equal(x, array([[1, 10, 3, 4], [10, 6, 7, 8]])) x = xorig.copy() x[m3] = 10 - assert_array_equal(x, array([[1,10,3,4],[5,6,7,8]])) + assert_array_equal(x, array([[1, 10, 3, 4], [5, 6, 7, 8]])) class TestStringCompare(TestCase): def test_string(self): - g1 = array(["This","is","example"]) - g2 = array(["This","was","example"]) - assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0,1,2]]) - assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0,1,2]]) - assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0,1,2]]) - assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0,1,2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]]) + g1 = array(["This", "is", "example"]) + g2 = array(["This", "was", "example"]) + assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) def test_mixed(self): - g1 = array(["spam","spa","spammer","and eggs"]) + g1 = array(["spam", "spa", "spammer", "and eggs"]) g2 = "spam" assert_array_equal(g1 == g2, [x == g2 for x in g1]) assert_array_equal(g1 != g2, [x != g2 for x in g1]) @@ -1652,14 +1652,14 @@ class TestStringCompare(TestCase): def test_unicode(self): - g1 = array([sixu("This"),sixu("is"),sixu("example")]) - g2 = array([sixu("This"),sixu("was"),sixu("example")]) - assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0,1,2]]) - assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0,1,2]]) - assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0,1,2]]) - assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0,1,2]]) - assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]]) - assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]]) + g1 = array([sixu("This"), sixu("is"), sixu("example")]) + g2 = array([sixu("This"), sixu("was"), sixu("example")]) + assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) class TestArgmax(TestCase): @@ -1669,11 +1669,11 @@ class TestArgmax(TestCase): ([0, 1, 2, np.nan, 3], 3), ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), - ([0, 1, 2, 3, complex(0,np.nan)], 4), - ([0, 1, 2, 3, complex(np.nan,0)], 4), - ([0, 1, 2, complex(np.nan,0), 3], 3), - ([0, 1, 2, complex(0,np.nan), 3], 3), - ([complex(0,np.nan), 0, 1, 2, 3], 0), + ([0, 1, 2, 3, complex(0, np.nan)], 4), + ([0, 1, 2, 3, complex(np.nan, 0)], 4), + ([0, 1, 2, complex(np.nan, 0), 3], 3), + ([0, 1, 2, complex(0, np.nan), 3], 3), + ([complex(0, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), @@ -1716,7 +1716,7 @@ class TestArgmax(TestCase): ] def test_all(self): - a = np.random.normal(0,1,(4,5,6,7,8)) + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): amax = a.max(i) aargmax = a.argmax(i) @@ -1737,11 +1737,11 @@ class TestArgmin(TestCase): ([0, 1, 2, np.nan, 3], 3), ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), - ([0, 1, 2, 3, complex(0,np.nan)], 4), - ([0, 1, 2, 3, complex(np.nan,0)], 4), - ([0, 1, 2, complex(np.nan,0), 3], 3), - ([0, 1, 2, complex(0,np.nan), 3], 3), - ([complex(0,np.nan), 0, 1, 2, 3], 0), + ([0, 1, 2, 3, complex(0, np.nan)], 4), + ([0, 1, 2, 3, complex(np.nan, 0)], 4), + ([0, 1, 2, complex(np.nan, 0), 3], 3), + ([0, 1, 2, complex(0, np.nan), 3], 3), + ([complex(0, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), @@ -1784,7 +1784,7 @@ class TestArgmin(TestCase): ] def test_all(self): - a = np.random.normal(0,1,(4,5,6,7,8)) + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): amin = a.min(i) aargmin = a.argmin(i) @@ -1823,18 +1823,18 @@ class TestMinMax(TestCase): assert_equal(np.amin(1, axis=None), 1) def test_axis(self): - assert_raises(ValueError, np.amax, [1,2,3], 1000) - assert_equal(np.amax([[1,2,3]], axis=1), 3) + assert_raises(ValueError, np.amax, [1, 2, 3], 1000) + assert_equal(np.amax([[1, 2, 3]], axis=1), 3) class TestNewaxis(TestCase): def test_basic(self): - sk = array([0,-0.1,0.1]) - res = 250*sk[:,newaxis] - assert_almost_equal(res.ravel(),250*sk) + sk = array([0, -0.1, 0.1]) + res = 250*sk[:, newaxis] + assert_almost_equal(res.ravel(), 250*sk) class TestClip(TestCase): - def _check_range(self,x,cmin,cmax): + def _check_range(self, x, cmin, cmax): assert_(np.all(x >= cmin)) assert_(np.all(x <= cmax)) @@ -1848,45 +1848,45 @@ class TestClip(TestCase): for T in np.sctypes[type_group]: if sys.byteorder == 'little': - byte_orders = ['=','>'] + byte_orders = ['=', '>'] else: - byte_orders = ['<','='] + byte_orders = ['<', '='] for byteorder in byte_orders: dtype = np.dtype(T).newbyteorder(byteorder) x = (np.random.random(1000) * array_max).astype(dtype) if inplace: - x.clip(clip_min,clip_max,x) + x.clip(clip_min, clip_max, x) else: - x = x.clip(clip_min,clip_max) + x = x.clip(clip_min, clip_max) byteorder = '=' if x.dtype.byteorder == '|': byteorder = '|' - assert_equal(x.dtype.byteorder,byteorder) - self._check_range(x,expected_min,expected_max) + assert_equal(x.dtype.byteorder, byteorder) + self._check_range(x, expected_min, expected_max) return x def test_basic(self): for inplace in [False, True]: - self._clip_type('float',1024,-12.8,100.2, inplace=inplace) - self._clip_type('float',1024,0,0, inplace=inplace) + self._clip_type('float', 1024, -12.8, 100.2, inplace=inplace) + self._clip_type('float', 1024, 0, 0, inplace=inplace) - self._clip_type('int',1024,-120,100.5, inplace=inplace) - self._clip_type('int',1024,0,0, inplace=inplace) + self._clip_type('int', 1024, -120, 100.5, inplace=inplace) + self._clip_type('int', 1024, 0, 0, inplace=inplace) - x = self._clip_type('uint',1024,-120,100,expected_min=0, + x = self._clip_type('uint', 1024, -120, 100, expected_min=0, inplace=inplace) - x = self._clip_type('uint',1024,0,0, inplace=inplace) + x = self._clip_type('uint', 1024, 0, 0, inplace=inplace) def test_record_array(self): rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')]) - y = rec['x'].clip(-0.3,0.5) - self._check_range(y,-0.3,0.5) + y = rec['x'].clip(-0.3, 0.5) + self._check_range(y, -0.3, 0.5) def test_max_or_min(self): - val = np.array([0,1,2,3,4,5,6,7]) + val = np.array([0, 1, 2, 3, 4, 5, 6, 7]) x = val.clip(3) assert_(np.all(x >= 3)) x = val.clip(min=3) @@ -1907,36 +1907,36 @@ class TestPutmask(object): x = np.random.random(1000)*100 mask = x < 40 - for val in [-100,0,15]: + for val in [-100, 0, 15]: for types in np.sctypes.values(): for T in types: if T not in unchecked_types: - yield self.tst_basic,x.copy().astype(T),T,mask,val + yield self.tst_basic, x.copy().astype(T), T, mask, val def test_mask_size(self): - assert_raises(ValueError, np.putmask, np.array([1,2,3]), [True], 5) + assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) - def tst_byteorder(self,dtype): - x = np.array([1,2,3],dtype) - np.putmask(x,[True,False,True],-1) - assert_array_equal(x,[-1,2,-1]) + def tst_byteorder(self, dtype): + x = np.array([1, 2, 3], dtype) + np.putmask(x, [True, False, True], -1) + assert_array_equal(x, [-1, 2, -1]) def test_ip_byteorder(self): - for dtype in ('>i4','<i4'): - yield self.tst_byteorder,dtype + for dtype in ('>i4', '<i4'): + yield self.tst_byteorder, dtype def test_record_array(self): # Note mixed byteorder. rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) - np.putmask(rec['x'],[True,False],10) - assert_array_equal(rec['x'],[10,5]) - assert_array_equal(rec['y'],[2,4]) - assert_array_equal(rec['z'],[3,3]) - np.putmask(rec['y'],[True,False],11) - assert_array_equal(rec['x'],[10,5]) - assert_array_equal(rec['y'],[11,4]) - assert_array_equal(rec['z'],[3,3]) + np.putmask(rec['x'], [True, False], 10) + assert_array_equal(rec['x'], [10, 5]) + assert_array_equal(rec['y'], [2, 4]) + assert_array_equal(rec['z'], [3, 3]) + np.putmask(rec['y'], [True, False], 11) + assert_array_equal(rec['x'], [10, 5]) + assert_array_equal(rec['y'], [11, 4]) + assert_array_equal(rec['z'], [3, 3]) def test_masked_array(self): ## x = np.array([1,2,3]) @@ -1946,7 +1946,7 @@ class TestPutmask(object): class TestTake(object): - def tst_basic(self,x): + def tst_basic(self, x): ind = list(range(x.shape[0])) assert_array_equal(x.take(ind, axis=0), x) @@ -1954,39 +1954,39 @@ class TestTake(object): unchecked_types = [str, unicode, np.void, object] x = np.random.random(24)*100 - x.shape = 2,3,4 + x.shape = 2, 3, 4 for types in np.sctypes.values(): for T in types: if T not in unchecked_types: - yield self.tst_basic,x.copy().astype(T) + yield self.tst_basic, x.copy().astype(T) def test_raise(self): x = np.random.random(24)*100 - x.shape = 2,3,4 - assert_raises(IndexError, x.take, [0,1,2], axis=0) + x.shape = 2, 3, 4 + assert_raises(IndexError, x.take, [0, 1, 2], axis=0) assert_raises(IndexError, x.take, [-3], axis=0) assert_array_equal(x.take([-1], axis=0)[0], x[1]) def test_clip(self): x = np.random.random(24)*100 - x.shape = 2,3,4 + x.shape = 2, 3, 4 assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) def test_wrap(self): x = np.random.random(24)*100 - x.shape = 2,3,4 + x.shape = 2, 3, 4 assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) - def tst_byteorder(self,dtype): - x = np.array([1,2,3],dtype) - assert_array_equal(x.take([0,2,1]),[1,3,2]) + def tst_byteorder(self, dtype): + x = np.array([1, 2, 3], dtype) + assert_array_equal(x.take([0, 2, 1]), [1, 3, 2]) def test_ip_byteorder(self): - for dtype in ('>i4','<i4'): - yield self.tst_byteorder,dtype + for dtype in ('>i4', '<i4'): + yield self.tst_byteorder, dtype def test_record_array(self): # Note mixed byteorder. @@ -1998,27 +1998,27 @@ class TestTake(object): class TestLexsort(TestCase): def test_basic(self): - a = [1,2,1,3,1,5] - b = [0,4,5,6,2,3] - idx = np.lexsort((b,a)) - expected_idx = np.array([0,4,2,1,3,5]) - assert_array_equal(idx,expected_idx) + a = [1, 2, 1, 3, 1, 5] + b = [0, 4, 5, 6, 2, 3] + idx = np.lexsort((b, a)) + expected_idx = np.array([0, 4, 2, 1, 3, 5]) + assert_array_equal(idx, expected_idx) - x = np.vstack((b,a)) + x = np.vstack((b, a)) idx = np.lexsort(x) - assert_array_equal(idx,expected_idx) + assert_array_equal(idx, expected_idx) - assert_array_equal(x[1][idx],np.sort(x[1])) + assert_array_equal(x[1][idx], np.sort(x[1])) class TestIO(object): """Test tofile, fromfile, tostring, and fromstring""" def setUp(self): - shape = (2,4,3) + shape = (2, 4, 3) rand = np.random.random self.x = rand(shape) + rand(shape).astype(np.complex)*1j - self.x[0,:,1] = [nan, inf, -inf, nan] + self.x[0,:, 1] = [nan, inf, -inf, nan] self.dtype = self.x.dtype self.filename = tempfile.mktemp() @@ -2028,7 +2028,7 @@ class TestIO(object): #tmp_file.close() def test_bool_fromstring(self): - v = np.array([True,False,True,False], dtype=np.bool_) + v = np.array([True, False, True, False], dtype=np.bool_) y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_) assert_array_equal(v, y) @@ -2109,7 +2109,7 @@ class TestIO(object): def test_binary(self): self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', - array([1,2,3,4]), + array([1, 2, 3, 4]), dtype='<f4') @dec.slow # takes > 1 minute on mechanical hard drive @@ -2149,21 +2149,21 @@ class TestIO(object): self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') def test_counted_string_with_ws(self): - self._check_from('1 2 3 4 ', [1,2,3], count=3, dtype=int, + self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int, sep=' ') def test_ascii(self): - self._check_from('1 , 2 , 3 , 4', [1.,2.,3.,4.], sep=',') - self._check_from('1,2,3,4', [1.,2.,3.,4.], dtype=float, sep=',') + self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') + self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') def test_malformed(self): self._check_from('1.234 1,234', [1.234, 1.], sep=' ') def test_long_sep(self): - self._check_from('1_x_3_x_4_x_5', [1,3,4,5], sep='_x_') + self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') def test_dtype(self): - v = np.array([1,2,3,4], dtype=np.int_) + v = np.array([1, 2, 3, 4], dtype=np.int_) self._check_from('1,2,3,4', v, sep=',', dtype=np.int_) def test_dtype_bool(self): @@ -2210,14 +2210,14 @@ class TestIO(object): class TestFromBuffer(object): - def tst_basic(self,buffer,expected,kwargs): - assert_array_equal(np.frombuffer(buffer,**kwargs),expected) + def tst_basic(self, buffer, expected, kwargs): + assert_array_equal(np.frombuffer(buffer,**kwargs), expected) def test_ip_basic(self): - for byteorder in ['<','>']: - for dtype in [float,int,np.complex]: + for byteorder in ['<', '>']: + for dtype in [float, int, np.complex]: dt = np.dtype(dtype).newbyteorder(byteorder) - x = (np.random.random((4,7))*5).astype(dt) + x = (np.random.random((4, 7))*5).astype(dt) buf = x.tostring() yield self.tst_basic, buf, x.flat, {'dtype':dt} @@ -2228,13 +2228,13 @@ class TestFromBuffer(object): class TestFlat(TestCase): def setUp(self): a0 = arange(20.0) - a = a0.reshape(4,5) - a0.shape = (4,5) + a = a0.reshape(4, 5) + a0.shape = (4, 5) a.flags.writeable = False self.a = a - self.b = a[::2,::2] + self.b = a[::2, ::2] self.a0 = a0 - self.b0 = a0[::2,::2] + self.b0 = a0[::2, ::2] def test_contiguous(self): testpassed = False @@ -2274,15 +2274,15 @@ class TestFlat(TestCase): class TestResize(TestCase): def test_basic(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - x.resize((5,5)) + x.resize((5, 5)) assert_array_equal(x.flat[:9], np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) - assert_array_equal(x[9:].flat,0) + assert_array_equal(x[9:].flat, 0) def test_check_reference(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y = x - self.assertRaises(ValueError,x.resize,(5,1)) + self.assertRaises(ValueError, x.resize, (5, 1)) def test_int_shape(self): x = np.eye(3) @@ -2304,21 +2304,21 @@ class TestResize(TestCase): def test_freeform_shape(self): x = np.eye(3) - x.resize(3,2,1) - assert_(x.shape == (3,2,1)) + x.resize(3, 2, 1) + assert_(x.shape == (3, 2, 1)) def test_zeros_appended(self): x = np.eye(3) - x.resize(2,3,3) + x.resize(2, 3, 3) assert_array_equal(x[0], np.eye(3)) - assert_array_equal(x[1], np.zeros((3,3))) + assert_array_equal(x[1], np.zeros((3, 3))) class TestRecord(TestCase): def test_field_rename(self): - dt = np.dtype([('f',float),('i',int)]) - dt.names = ['p','q'] - assert_equal(dt.names,['p','q']) + dt = np.dtype([('f', float), ('i', int)]) + dt.names = ['p', 'q'] + assert_equal(dt.names, ['p', 'q']) if sys.version_info[0] >= 3: def test_bytes_fields(self): @@ -2393,14 +2393,14 @@ class TestRecord(TestCase): # multiple Subfields fn2 = func('f2') b[fn2] = 3 - assert_equal(b[['f1','f2']][0].tolist(), (2, 3)) - assert_equal(b[['f2','f1']][0].tolist(), (3, 2)) - assert_equal(b[['f1','f3']][0].tolist(), (2, (1,))) + assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) + assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) + assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) # view of subfield view/copy - assert_equal(b[['f1','f2']][0].view(('i4',2)).tolist(), (2, 3)) - assert_equal(b[['f2','f1']][0].view(('i4',2)).tolist(), (3, 2)) - view_dtype=[('f1', 'i4'),('f3', [('', 'i4')])] - assert_equal(b[['f1','f3']][0].view(view_dtype).tolist(), (2, (1,))) + assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3)) + assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2)) + view_dtype=[('f1', 'i4'), ('f3', [('', 'i4')])] + assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,))) # non-ascii unicode field indexing is well behaved if not is_py3: raise SkipTest('non ascii unicode field indexing skipped; ' @@ -2432,11 +2432,11 @@ class TestRecord(TestCase): # All the different functions raise a warning, but not an error, and # 'a' is not modified: - assert_equal(collect_warning_types(a[['f1','f2']].__setitem__, 0, (10,20)), + assert_equal(collect_warning_types(a[['f1', 'f2']].__setitem__, 0, (10, 20)), [FutureWarning]) assert_equal(a, b) # Views also warn - subset = a[['f1','f2']] + subset = a[['f1', 'f2']] subset_view = subset.view() assert_equal(collect_warning_types(subset_view['f1'].__setitem__, 0, 10), [FutureWarning]) @@ -2448,11 +2448,11 @@ class TestRecord(TestCase): []) def test_record_hash(self): - a = np.array([(1,2),(1,2)], dtype='i1,i2') + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') a.flags.writeable = False - b = np.array([(1,2),(3,4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) + b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) b.flags.writeable = False - c = np.array([(1,2),(3,4)], dtype='i1,i2') + c = np.array([(1, 2), (3, 4)], dtype='i1,i2') c.flags.writeable = False self.assertTrue(hash(a[0]) == hash(a[1])) self.assertTrue(hash(a[0]) == hash(b[0])) @@ -2460,13 +2460,13 @@ class TestRecord(TestCase): self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0]) def test_record_no_hash(self): - a = np.array([(1,2),(1,2)], dtype='i1,i2') + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') self.assertRaises(TypeError, hash, a[0]) class TestView(TestCase): def test_basic(self): - x = np.array([(1,2,3,4),(5,6,7,8)],dtype=[('r',np.int8),('g',np.int8), - ('b',np.int8),('a',np.int8)]) + x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype=[('r', np.int8), ('g', np.int8), + ('b', np.int8), ('a', np.int8)]) # We must be specific about the endianness here: y = x.view(dtype='<i4') # ... and again without the keyword. @@ -2605,7 +2605,7 @@ class TestStats(TestCase): assert_(issubclass(w[0].category, RuntimeWarning)) def test_empty(self): - A = np.zeros((0,3)) + A = np.zeros((0, 3)) for f in self.funcs: for axis in [0, None]: with warnings.catch_warnings(record=True) as w: @@ -2656,7 +2656,7 @@ class TestStats(TestCase): def __array_finalize__(self, obj): self.info = getattr(obj, "info", '') - dat = TestArray([[1,2,3,4],[5,6,7,8]], 'jubba') + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') res = dat.mean(1) assert_(res.info == dat.info) res = dat.std(1) @@ -2684,16 +2684,16 @@ class TestDot(TestCase): r = np.empty((1024, 32)) for i in range(12): - dot(f,v,r) + dot(f, v, r) assert_equal(sys.getrefcount(r), 2) - r2 = dot(f,v,out=None) + r2 = dot(f, v, out=None) assert_array_equal(r2, r) - assert_(r is dot(f,v,out=r)) + assert_(r is dot(f, v, out=r)) - v = v[:,0].copy() # v.shape == (16,) - r = r[:,0].copy() # r.shape == (1024,) - r2 = dot(f,v) - assert_(r is dot(f,v,r)) + v = v[:, 0].copy() # v.shape == (16,) + r = r[:, 0].copy() # r.shape == (1024,) + r2 = dot(f, v) + assert_(r is dot(f, v, r)) assert_array_equal(r2, r) def test_dot_3args_errors(self): @@ -2717,8 +2717,8 @@ class TestDot(TestCase): assert_raises(ValueError, dot, f, v, r.T) r = np.empty((1024, 64)) - assert_raises(ValueError, dot, f, v, r[:,::2]) - assert_raises(ValueError, dot, f, v, r[:,:32]) + assert_raises(ValueError, dot, f, v, r[:, ::2]) + assert_raises(ValueError, dot, f, v, r[:, :32]) r = np.empty((1024, 32), dtype=np.float32) assert_raises(ValueError, dot, f, v, r) @@ -2737,7 +2737,7 @@ class TestSummarization(TestCase): assert_(repr(A) == reprA) def test_2d(self): - A = np.arange(1002).reshape(2,501) + A = np.arange(1002).reshape(2, 501) strA = '[[ 0 1 2 ..., 498 499 500]\n' \ ' [ 501 502 503 ..., 999 1000 1001]]' assert_(str(A) == strA) @@ -2749,23 +2749,23 @@ class TestSummarization(TestCase): class TestChoose(TestCase): def setUp(self): - self.x = 2*ones((3,),dtype=int) - self.y = 3*ones((3,),dtype=int) - self.x2 = 2*ones((2,3), dtype=int) - self.y2 = 3*ones((2,3), dtype=int) - self.ind = [0,0,1] + self.x = 2*ones((3,), dtype=int) + self.y = 3*ones((3,), dtype=int) + self.x2 = 2*ones((2, 3), dtype=int) + self.y2 = 3*ones((2, 3), dtype=int) + self.ind = [0, 0, 1] def test_basic(self): A = np.choose(self.ind, (self.x, self.y)) - assert_equal(A, [2,2,3]) + assert_equal(A, [2, 2, 3]) def test_broadcast1(self): A = np.choose(self.ind, (self.x2, self.y2)) - assert_equal(A, [[2,2,3],[2,2,3]]) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_broadcast2(self): A = np.choose(self.ind, (self.x, self.y2)) - assert_equal(A, [[2,2,3],[2,2,3]]) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def can_use_decimal(): try: @@ -3052,13 +3052,13 @@ class TestStackedNeighborhoodIter(TestCase): class TestWarnings(object): def test_complex_warning(self): - x = np.array([1,2]) - y = np.array([1-2j,1+2j]) + x = np.array([1, 2]) + y = np.array([1-2j, 1+2j]) with warnings.catch_warnings(): warnings.simplefilter("error", np.ComplexWarning) assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) - assert_equal(x, [1,2]) + assert_equal(x, [1, 2]) class TestMinScalarType(object): @@ -3187,13 +3187,13 @@ class TestNewBufferProtocol(object): assert_array_equal(obj, y2) def test_roundtrip(self): - x = np.array([1,2,3,4,5], dtype='i4') + x = np.array([1, 2, 3, 4, 5], dtype='i4') self._check_roundtrip(x) - x = np.array([[1,2],[3,4]], dtype=np.float64) + x = np.array([[1, 2], [3, 4]], dtype=np.float64) self._check_roundtrip(x) - x = np.zeros((3,3,3), dtype=np.float32)[:,0,:] + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] self._check_roundtrip(x) dt = [('a', 'b'), @@ -3224,32 +3224,32 @@ class TestNewBufferProtocol(object): dtype=dt) self._check_roundtrip(x) - x = np.array(([[1,2],[3,4]],), dtype=[('a', (int, (2,2)))]) + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) self._check_roundtrip(x) - x = np.array([1,2,3], dtype='>i2') + x = np.array([1, 2, 3], dtype='>i2') self._check_roundtrip(x) - x = np.array([1,2,3], dtype='<i2') + x = np.array([1, 2, 3], dtype='<i2') self._check_roundtrip(x) - x = np.array([1,2,3], dtype='>i4') + x = np.array([1, 2, 3], dtype='>i4') self._check_roundtrip(x) - x = np.array([1,2,3], dtype='<i4') + x = np.array([1, 2, 3], dtype='<i4') self._check_roundtrip(x) # Native-only data types can be passed through the buffer interface # only in native byte order if sys.byteorder == 'little': - x = np.array([1,2,3], dtype='>q') + x = np.array([1, 2, 3], dtype='>q') assert_raises(ValueError, self._check_roundtrip, x) - x = np.array([1,2,3], dtype='<q') + x = np.array([1, 2, 3], dtype='<q') self._check_roundtrip(x) else: - x = np.array([1,2,3], dtype='>q') + x = np.array([1, 2, 3], dtype='>q') self._check_roundtrip(x) - x = np.array([1,2,3], dtype='<q') + x = np.array([1, 2, 3], dtype='<q') assert_raises(ValueError, self._check_roundtrip, x) def test_roundtrip_half(self): @@ -3272,7 +3272,7 @@ class TestNewBufferProtocol(object): self._check_roundtrip(x) def test_export_simple_1d(self): - x = np.array([1,2,3,4,5], dtype='i') + x = np.array([1, 2, 3, 4, 5], dtype='i') y = memoryview(x) assert_equal(y.format, 'i') assert_equal(y.shape, (5,)) @@ -3282,7 +3282,7 @@ class TestNewBufferProtocol(object): assert_equal(y.itemsize, 4) def test_export_simple_nd(self): - x = np.array([[1,2],[3,4]], dtype=np.float64) + x = np.array([[1, 2], [3, 4]], dtype=np.float64) y = memoryview(x) assert_equal(y.format, 'd') assert_equal(y.shape, (2, 2)) @@ -3292,7 +3292,7 @@ class TestNewBufferProtocol(object): assert_equal(y.itemsize, 8) def test_export_discontiguous(self): - x = np.zeros((3,3,3), dtype=np.float32)[:,0,:] + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] y = memoryview(x) assert_equal(y.format, 'f') assert_equal(y.shape, (3, 3)) @@ -3344,7 +3344,7 @@ class TestNewBufferProtocol(object): assert_equal(y.itemsize, sz) def test_export_subarray(self): - x = np.array(([[1,2],[3,4]],), dtype=[('a', ('i', (2,2)))]) + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) y = memoryview(x) assert_equal(y.format, 'T{(2,2)i:a:}') assert_equal(y.shape, EMPTY) @@ -3354,14 +3354,14 @@ class TestNewBufferProtocol(object): assert_equal(y.itemsize, 16) def test_export_endian(self): - x = np.array([1,2,3], dtype='>i') + x = np.array([1, 2, 3], dtype='>i') y = memoryview(x) if sys.byteorder == 'little': assert_equal(y.format, '>i') else: assert_equal(y.format, 'i') - x = np.array([1,2,3], dtype='<i') + x = np.array([1, 2, 3], dtype='<i') y = memoryview(x) if sys.byteorder == 'little': assert_equal(y.format, 'i') @@ -3374,7 +3374,7 @@ class TestNewBufferProtocol(object): def test_padding(self): for j in range(8): - x = np.array([(1,),(2,)], dtype={'f0': (int, j)}) + x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) self._check_roundtrip(x) def test_reference_leak(self): @@ -3460,7 +3460,7 @@ def test_array_interface(): assert_equal(np.array(f), 0.5) f.iface['shape'] = None assert_raises(TypeError, np.array, f) - f.iface['shape'] = (1,1) + f.iface['shape'] = (1, 1) assert_equal(np.array(f), [[0.5]]) f.iface['shape'] = (2,) assert_raises(ValueError, np.array, f) @@ -3497,19 +3497,19 @@ class TestMapIter(TestCase): # The actual tests are within the C code in # multiarray/multiarray_tests.c.src - a = arange(12).reshape((3,4)).astype(float) - index = ([1,1,2,0], - [0,0,2,3]) - vals = [50,50, 30,16] + a = arange(12).reshape((3, 4)).astype(float) + index = ([1, 1, 2, 0], + [0, 0, 2, 3]) + vals = [50, 50, 30, 16] test_inplace_increment(a, index, vals) - assert_equal(a, [[ 0. , 1., 2., 19.,], + assert_equal(a, [[ 0., 1., 2., 19.,], [ 104., 5., 6., 7.,], [ 8., 9., 40., 11.,]]) b = arange(6).astype(float) - index = (array([1,2,0]),) - vals = [50,4,100.1] + index = (array([1, 2, 0]),) + vals = [50, 4, 100.1] test_inplace_increment(b, index, vals) assert_equal(b, [ 100.1, 51., 6., 3., 4., 5. ]) diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index 76d85618f..bc20b900e 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -39,7 +39,7 @@ def test_iter_refcount(): rc_a = sys.getrefcount(a) rc_dt = sys.getrefcount(dt) it = nditer(a, [], - [['readwrite','updateifcopy']], + [['readwrite', 'updateifcopy']], casting='unsafe', op_dtypes=[dt]) assert_(not it.iterationneedsapi) @@ -74,14 +74,14 @@ def test_iter_best_order(): # with increasing memory addresses # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3,4), (2,3,4), (2,3,4,3), (2,3,2,2,3)]: + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit)&dirs): - dirs_index[bit] = slice(None,None,-1) + dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] @@ -93,21 +93,21 @@ def test_iter_best_order(): assert_equal([x for x in i], a) # Other order if len(shape) > 2: - i = nditer(aview.swapaxes(0,1), [], [['readonly']]) + i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) assert_equal([x for x in i], a) def test_iter_c_order(): # Test forcing C order # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3,4), (2,3,4), (2,3,4,3), (2,3,2,2,3)]: + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit)&dirs): - dirs_index[bit] = slice(None,None,-1) + dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] @@ -119,22 +119,22 @@ def test_iter_c_order(): assert_equal([x for x in i], aview.T.ravel(order='C')) # Other order if len(shape) > 2: - i = nditer(aview.swapaxes(0,1), order='C') + i = nditer(aview.swapaxes(0, 1), order='C') assert_equal([x for x in i], - aview.swapaxes(0,1).ravel(order='C')) + aview.swapaxes(0, 1).ravel(order='C')) def test_iter_f_order(): # Test forcing F order # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3,4), (2,3,4), (2,3,4,3), (2,3,2,2,3)]: + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit)&dirs): - dirs_index[bit] = slice(None,None,-1) + dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] @@ -146,22 +146,22 @@ def test_iter_f_order(): assert_equal([x for x in i], aview.T.ravel(order='F')) # Other order if len(shape) > 2: - i = nditer(aview.swapaxes(0,1), order='F') + i = nditer(aview.swapaxes(0, 1), order='F') assert_equal([x for x in i], - aview.swapaxes(0,1).ravel(order='F')) + aview.swapaxes(0, 1).ravel(order='F')) def test_iter_c_or_f_order(): # Test forcing any contiguous (C or F) order # Test the ordering for 1-D to 5-D shapes - for shape in [(5,), (3,4), (2,3,4), (2,3,4,3), (2,3,2,2,3)]: + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: a = arange(np.prod(shape)) # Test each combination of positive and negative strides for dirs in range(2**len(shape)): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit)&dirs): - dirs_index[bit] = slice(None,None,-1) + dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] @@ -173,250 +173,250 @@ def test_iter_c_or_f_order(): assert_equal([x for x in i], aview.T.ravel(order='A')) # Other order if len(shape) > 2: - i = nditer(aview.swapaxes(0,1), order='A') + i = nditer(aview.swapaxes(0, 1), order='A') assert_equal([x for x in i], - aview.swapaxes(0,1).ravel(order='A')) + aview.swapaxes(0, 1).ravel(order='A')) def test_iter_best_order_multi_index_1d(): # The multi-indices should be correct with any reordering a = arange(4) # 1D order - i = nditer(a,['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(0,),(1,),(2,),(3,)]) + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) # 1D reversed order - i = nditer(a[::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(3,),(2,),(1,),(0,)]) + i = nditer(a[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) def test_iter_best_order_multi_index_2d(): # The multi-indices should be correct with any reordering a = arange(6) # 2D C-order - i = nditer(a.reshape(2,3),['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(0,0),(0,1),(0,2),(1,0),(1,1),(1,2)]) + i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) # 2D Fortran-order - i = nditer(a.reshape(2,3).copy(order='F'),['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(0,0),(1,0),(0,1),(1,1),(0,2),(1,2)]) + i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) # 2D reversed C-order - i = nditer(a.reshape(2,3)[::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(1,0),(1,1),(1,2),(0,0),(0,1),(0,2)]) - i = nditer(a.reshape(2,3)[:,::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(0,2),(0,1),(0,0),(1,2),(1,1),(1,0)]) - i = nditer(a.reshape(2,3)[::-1,::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(1,2),(1,1),(1,0),(0,2),(0,1),(0,0)]) + i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) # 2D reversed Fortran-order - i = nditer(a.reshape(2,3).copy(order='F')[::-1],['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(1,0),(0,0),(1,1),(0,1),(1,2),(0,2)]) - i = nditer(a.reshape(2,3).copy(order='F')[:,::-1], - ['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(0,2),(1,2),(0,1),(1,1),(0,0),(1,0)]) - i = nditer(a.reshape(2,3).copy(order='F')[::-1,::-1], - ['multi_index'],[['readonly']]) - assert_equal(iter_multi_index(i), [(1,2),(0,2),(1,1),(0,1),(1,0),(0,0)]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) def test_iter_best_order_multi_index_3d(): # The multi-indices should be correct with any reordering a = arange(12) # 3D C-order - i = nditer(a.reshape(2,3,2),['multi_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), - [(0,0,0),(0,0,1),(0,1,0),(0,1,1),(0,2,0),(0,2,1), - (1,0,0),(1,0,1),(1,1,0),(1,1,1),(1,2,0),(1,2,1)]) + [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), + (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) # 3D Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F'),['multi_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), - [(0,0,0),(1,0,0),(0,1,0),(1,1,0),(0,2,0),(1,2,0), - (0,0,1),(1,0,1),(0,1,1),(1,1,1),(0,2,1),(1,2,1)]) + [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), + (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) # 3D reversed C-order - i = nditer(a.reshape(2,3,2)[::-1],['multi_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), - [(1,0,0),(1,0,1),(1,1,0),(1,1,1),(1,2,0),(1,2,1), - (0,0,0),(0,0,1),(0,1,0),(0,1,1),(0,2,0),(0,2,1)]) - i = nditer(a.reshape(2,3,2)[:,::-1],['multi_index'],[['readonly']]) + [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), + (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), - [(0,2,0),(0,2,1),(0,1,0),(0,1,1),(0,0,0),(0,0,1), - (1,2,0),(1,2,1),(1,1,0),(1,1,1),(1,0,0),(1,0,1)]) - i = nditer(a.reshape(2,3,2)[:,:,::-1],['multi_index'],[['readonly']]) + [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), + (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), - [(0,0,1),(0,0,0),(0,1,1),(0,1,0),(0,2,1),(0,2,0), - (1,0,1),(1,0,0),(1,1,1),(1,1,0),(1,2,1),(1,2,0)]) + [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), + (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) # 3D reversed Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F')[::-1], - ['multi_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), - [(1,0,0),(0,0,0),(1,1,0),(0,1,0),(1,2,0),(0,2,0), - (1,0,1),(0,0,1),(1,1,1),(0,1,1),(1,2,1),(0,2,1)]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,::-1], - ['multi_index'],[['readonly']]) + [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), + (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), - [(0,2,0),(1,2,0),(0,1,0),(1,1,0),(0,0,0),(1,0,0), - (0,2,1),(1,2,1),(0,1,1),(1,1,1),(0,0,1),(1,0,1)]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,:,::-1], - ['multi_index'],[['readonly']]) + [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), + (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), - [(0,0,1),(1,0,1),(0,1,1),(1,1,1),(0,2,1),(1,2,1), - (0,0,0),(1,0,0),(0,1,0),(1,1,0),(0,2,0),(1,2,0)]) + [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), + (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) def test_iter_best_order_c_index_1d(): # The C index should be correct with any reordering a = arange(4) # 1D order - i = nditer(a,['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,1,2,3]) + i = nditer(a, ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) # 1D reversed order - i = nditer(a[::-1],['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [3,2,1,0]) + i = nditer(a[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) def test_iter_best_order_c_index_2d(): # The C index should be correct with any reordering a = arange(6) # 2D C-order - i = nditer(a.reshape(2,3),['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,1,2,3,4,5]) + i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) # 2D Fortran-order - i = nditer(a.reshape(2,3).copy(order='F'), - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,3,1,4,2,5]) + i = nditer(a.reshape(2, 3).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) # 2D reversed C-order - i = nditer(a.reshape(2,3)[::-1],['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [3,4,5,0,1,2]) - i = nditer(a.reshape(2,3)[:,::-1],['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [2,1,0,5,4,3]) - i = nditer(a.reshape(2,3)[::-1,::-1],['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [5,4,3,2,1,0]) + i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) # 2D reversed Fortran-order - i = nditer(a.reshape(2,3).copy(order='F')[::-1], - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [3,0,4,1,5,2]) - i = nditer(a.reshape(2,3).copy(order='F')[:,::-1], - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [2,5,1,4,0,3]) - i = nditer(a.reshape(2,3).copy(order='F')[::-1,::-1], - ['c_index'],[['readonly']]) - assert_equal(iter_indices(i), [5,2,4,1,3,0]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) def test_iter_best_order_c_index_3d(): # The C index should be correct with any reordering a = arange(12) # 3D C-order - i = nditer(a.reshape(2,3,2),['c_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) assert_equal(iter_indices(i), - [0,1,2,3,4,5,6,7,8,9,10,11]) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) # 3D Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F'), - ['c_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['c_index'], [['readonly']]) assert_equal(iter_indices(i), - [0,6,2,8,4,10,1,7,3,9,5,11]) + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) # 3D reversed C-order - i = nditer(a.reshape(2,3,2)[::-1],['c_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), - [6,7,8,9,10,11,0,1,2,3,4,5]) - i = nditer(a.reshape(2,3,2)[:,::-1],['c_index'],[['readonly']]) + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), - [4,5,2,3,0,1,10,11,8,9,6,7]) - i = nditer(a.reshape(2,3,2)[:,:,::-1],['c_index'],[['readonly']]) + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), - [1,0,3,2,5,4,7,6,9,8,11,10]) + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) # 3D reversed Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F')[::-1], - ['c_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['c_index'], [['readonly']]) assert_equal(iter_indices(i), - [6,0,8,2,10,4,7,1,9,3,11,5]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,::-1], - ['c_index'],[['readonly']]) + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) assert_equal(iter_indices(i), - [4,10,2,8,0,6,5,11,3,9,1,7]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,:,::-1], - ['c_index'],[['readonly']]) + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['c_index'], [['readonly']]) assert_equal(iter_indices(i), - [1,7,3,9,5,11,0,6,2,8,4,10]) + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) def test_iter_best_order_f_index_1d(): # The Fortran index should be correct with any reordering a = arange(4) # 1D order - i = nditer(a,['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,1,2,3]) + i = nditer(a, ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) # 1D reversed order - i = nditer(a[::-1],['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [3,2,1,0]) + i = nditer(a[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) def test_iter_best_order_f_index_2d(): # The Fortran index should be correct with any reordering a = arange(6) # 2D C-order - i = nditer(a.reshape(2,3),['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,2,4,1,3,5]) + i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) # 2D Fortran-order - i = nditer(a.reshape(2,3).copy(order='F'), - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [0,1,2,3,4,5]) + i = nditer(a.reshape(2, 3).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) # 2D reversed C-order - i = nditer(a.reshape(2,3)[::-1],['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [1,3,5,0,2,4]) - i = nditer(a.reshape(2,3)[:,::-1],['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [4,2,0,5,3,1]) - i = nditer(a.reshape(2,3)[::-1,::-1],['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [5,3,1,4,2,0]) + i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) # 2D reversed Fortran-order - i = nditer(a.reshape(2,3).copy(order='F')[::-1], - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [1,0,3,2,5,4]) - i = nditer(a.reshape(2,3).copy(order='F')[:,::-1], - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [4,5,2,3,0,1]) - i = nditer(a.reshape(2,3).copy(order='F')[::-1,::-1], - ['f_index'],[['readonly']]) - assert_equal(iter_indices(i), [5,4,3,2,1,0]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) def test_iter_best_order_f_index_3d(): # The Fortran index should be correct with any reordering a = arange(12) # 3D C-order - i = nditer(a.reshape(2,3,2),['f_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) assert_equal(iter_indices(i), - [0,6,2,8,4,10,1,7,3,9,5,11]) + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) # 3D Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F'), - ['f_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['f_index'], [['readonly']]) assert_equal(iter_indices(i), - [0,1,2,3,4,5,6,7,8,9,10,11]) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) # 3D reversed C-order - i = nditer(a.reshape(2,3,2)[::-1],['f_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), - [1,7,3,9,5,11,0,6,2,8,4,10]) - i = nditer(a.reshape(2,3,2)[:,::-1],['f_index'],[['readonly']]) + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), - [4,10,2,8,0,6,5,11,3,9,1,7]) - i = nditer(a.reshape(2,3,2)[:,:,::-1],['f_index'],[['readonly']]) + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), - [6,0,8,2,10,4,7,1,9,3,11,5]) + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) # 3D reversed Fortran-order - i = nditer(a.reshape(2,3,2).copy(order='F')[::-1], - ['f_index'],[['readonly']]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['f_index'], [['readonly']]) assert_equal(iter_indices(i), - [1,0,3,2,5,4,7,6,9,8,11,10]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,::-1], - ['f_index'],[['readonly']]) + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) assert_equal(iter_indices(i), - [4,5,2,3,0,1,10,11,8,9,6,7]) - i = nditer(a.reshape(2,3,2).copy(order='F')[:,:,::-1], - ['f_index'],[['readonly']]) + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + ['f_index'], [['readonly']]) assert_equal(iter_indices(i), - [6,7,8,9,10,11,0,1,2,3,4,5]) + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) def test_iter_no_inner_full_coalesce(): # Check no_inner iterators which coalesce into a single inner loop - for shape in [(5,), (3,4), (2,3,4), (2,3,4,3), (2,3,2,2,3)]: + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: size = np.prod(shape) a = arange(size) # Test each combination of forward and backwards indexing @@ -424,7 +424,7 @@ def test_iter_no_inner_full_coalesce(): dirs_index = [slice(None)]*len(shape) for bit in range(len(shape)): if ((2**bit)&dirs): - dirs_index[bit] = slice(None,None,-1) + dirs_index[bit] = slice(None, None, -1) dirs_index = tuple(dirs_index) aview = a.reshape(shape)[dirs_index] @@ -438,7 +438,7 @@ def test_iter_no_inner_full_coalesce(): assert_equal(i[0].shape, (size,)) # Other order if len(shape) > 2: - i = nditer(aview.swapaxes(0,1), + i = nditer(aview.swapaxes(0, 1), ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (size,)) @@ -448,21 +448,21 @@ def test_iter_no_inner_dim_coalescing(): # Skipping the last element in a dimension prevents coalescing # with the next-bigger dimension - a = arange(24).reshape(2,3,4)[:,:,:-1] + a = arange(24).reshape(2, 3, 4)[:,:, :-1] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (3,)) - a = arange(24).reshape(2,3,4)[:,:-1,:] + a = arange(24).reshape(2, 3, 4)[:, :-1,:] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (8,)) - a = arange(24).reshape(2,3,4)[:-1,:,:] + a = arange(24).reshape(2, 3, 4)[:-1,:,:] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (12,)) # Even with lots of 1-sized dimensions, should still coalesce - a = arange(24).reshape(1,1,2,1,1,3,1,1,4,1,1) + a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (24,)) @@ -471,25 +471,25 @@ def test_iter_dim_coalescing(): # Check that the correct number of dimensions are coalesced # Tracking a multi-index disables coalescing - a = arange(24).reshape(2,3,4) + a = arange(24).reshape(2, 3, 4) i = nditer(a, ['multi_index'], [['readonly']]) assert_equal(i.ndim, 3) # A tracked index can allow coalescing if it's compatible with the array - a3d = arange(24).reshape(2,3,4) + a3d = arange(24).reshape(2, 3, 4) i = nditer(a3d, ['c_index'], [['readonly']]) assert_equal(i.ndim, 1) - i = nditer(a3d.swapaxes(0,1), ['c_index'], [['readonly']]) + i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) assert_equal(i.ndim, 3) i = nditer(a3d.T, ['c_index'], [['readonly']]) assert_equal(i.ndim, 3) i = nditer(a3d.T, ['f_index'], [['readonly']]) assert_equal(i.ndim, 1) - i = nditer(a3d.T.swapaxes(0,1), ['f_index'], [['readonly']]) + i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) assert_equal(i.ndim, 3) # When C or F order is forced, coalescing may still occur - a3d = arange(24).reshape(2,3,4) + a3d = arange(24).reshape(2, 3, 4) i = nditer(a3d, order='C') assert_equal(i.ndim, 1) i = nditer(a3d.T, order='C') @@ -512,95 +512,95 @@ def test_iter_broadcasting(): assert_equal(i.shape, (6,)) # 2D with scalar - i = nditer([arange(6).reshape(2,3), np.int32(2)], + i = nditer([arange(6).reshape(2, 3), np.int32(2)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) - assert_equal(i.shape, (2,3)) + assert_equal(i.shape, (2, 3)) # 2D with 1D - i = nditer([arange(6).reshape(2,3), arange(3)], + i = nditer([arange(6).reshape(2, 3), arange(3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) - assert_equal(i.shape, (2,3)) - i = nditer([arange(2).reshape(2,1), arange(3)], + assert_equal(i.shape, (2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) - assert_equal(i.shape, (2,3)) + assert_equal(i.shape, (2, 3)) # 2D with 2D - i = nditer([arange(2).reshape(2,1), arange(3).reshape(1,3)], + i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 6) - assert_equal(i.shape, (2,3)) + assert_equal(i.shape, (2, 3)) # 3D with scalar - i = nditer([np.int32(2), arange(24).reshape(4,2,3)], + i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) + assert_equal(i.shape, (4, 2, 3)) # 3D with 1D - i = nditer([arange(3), arange(24).reshape(4,2,3)], + i = nditer([arange(3), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - i = nditer([arange(3), arange(8).reshape(4,2,1)], + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3), arange(8).reshape(4, 2, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) + assert_equal(i.shape, (4, 2, 3)) # 3D with 2D - i = nditer([arange(6).reshape(2,3), arange(24).reshape(4,2,3)], + i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - i = nditer([arange(2).reshape(2,1), arange(24).reshape(4,2,3)], + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - i = nditer([arange(3).reshape(1,3), arange(8).reshape(4,2,1)], + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) + assert_equal(i.shape, (4, 2, 3)) # 3D with 3D - i = nditer([arange(2).reshape(1,2,1), arange(3).reshape(1,1,3), - arange(4).reshape(4,1,1)], + i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), + arange(4).reshape(4, 1, 1)], ['multi_index'], [['readonly']]*3) assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - i = nditer([arange(6).reshape(1,2,3), arange(4).reshape(4,1,1)], + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) - i = nditer([arange(24).reshape(4,2,3), arange(12).reshape(4,1,3)], + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], ['multi_index'], [['readonly']]*2) assert_equal(i.itersize, 24) - assert_equal(i.shape, (4,2,3)) + assert_equal(i.shape, (4, 2, 3)) def test_iter_itershape(): # Check that allocated outputs work with a specified shape - a = np.arange(6, dtype='i2').reshape(2,3) - i = nditer([a, None], [], [['readonly'], ['writeonly','allocate']], - op_axes=[[0,1,None], None], - itershape=(-1,-1,4)) - assert_equal(i.operands[1].shape, (2,3,4)) - assert_equal(i.operands[1].strides, (24,8,2)) - - i = nditer([a.T, None], [], [['readonly'], ['writeonly','allocate']], - op_axes=[[0,1,None], None], - itershape=(-1,-1,4)) - assert_equal(i.operands[1].shape, (3,2,4)) - assert_equal(i.operands[1].strides, (8,24,2)) - - i = nditer([a.T, None], [], [['readonly'], ['writeonly','allocate']], + a = np.arange(6, dtype='i2').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (2, 3, 4)) + assert_equal(i.operands[1].strides, (24, 8, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (8, 24, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], order='F', - op_axes=[[0,1,None], None], - itershape=(-1,-1,4)) - assert_equal(i.operands[1].shape, (3,2,4)) - assert_equal(i.operands[1].strides, (2,6,12)) + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (2, 6, 12)) # If we specify 1 in the itershape, it shouldn't allow broadcasting # of that dimension to a bigger value assert_raises(ValueError, nditer, [a, None], [], - [['readonly'], ['writeonly','allocate']], - op_axes=[[0,1,None], None], - itershape=(-1,1,4)) + [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, 1, 4)) # Test bug that for no op_axes but itershape, they are NULLed correctly i = np.nditer([np.ones(2), None, None], itershape=(2,)) @@ -612,30 +612,30 @@ def test_iter_broadcasting_errors(): [], [['readonly']]*2) # 2D with 1D assert_raises(ValueError, nditer, - [arange(6).reshape(2,3), arange(2)], + [arange(6).reshape(2, 3), arange(2)], [], [['readonly']]*2) # 2D with 2D assert_raises(ValueError, nditer, - [arange(6).reshape(2,3), arange(9).reshape(3,3)], + [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], [], [['readonly']]*2) assert_raises(ValueError, nditer, - [arange(6).reshape(2,3), arange(4).reshape(2,2)], + [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], [], [['readonly']]*2) # 3D with 3D assert_raises(ValueError, nditer, - [arange(36).reshape(3,3,4), arange(24).reshape(2,3,4)], + [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], [], [['readonly']]*2) assert_raises(ValueError, nditer, - [arange(8).reshape(2,4,1), arange(24).reshape(2,3,4)], + [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], [], [['readonly']]*2) # Verify that the error message mentions the right shapes try: - i = nditer([arange(2).reshape(1,2,1), - arange(3).reshape(1,3), - arange(6).reshape(2,3)], + i = nditer([arange(2).reshape(1, 2, 1), + arange(3).reshape(1, 3), + arange(6).reshape(2, 3)], [], - [['readonly'], ['readonly'], ['writeonly','no_broadcast']]) + [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) assert_(False, 'Should have raised a broadcast error') except ValueError as e: msg = str(e) @@ -647,10 +647,10 @@ def test_iter_broadcasting_errors(): 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg) try: - i = nditer([arange(6).reshape(2,3), arange(2)], [], - [['readonly'],['readonly']], - op_axes=[[0,1], [0,np.newaxis]], - itershape=(4,3)) + i = nditer([arange(6).reshape(2, 3), arange(2)], [], + [['readonly'], ['readonly']], + op_axes=[[0, 1], [0, np.newaxis]], + itershape=(4, 3)) assert_(False, 'Should have raised a broadcast error') except ValueError as e: msg = str(e) @@ -665,9 +665,9 @@ def test_iter_broadcasting_errors(): 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg) try: - i = nditer([np.zeros((2,1,1)), np.zeros((2,))], + i = nditer([np.zeros((2, 1, 1)), np.zeros((2,))], [], - [['writeonly','no_broadcast'], ['readonly']]) + [['writeonly', 'no_broadcast'], ['readonly']]) assert_(False, 'Should have raised a broadcast error') except ValueError as e: msg = str(e) @@ -690,7 +690,7 @@ def test_iter_flags_errors(): # Bad global flag assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) # Bad op flag - assert_raises(ValueError, nditer, [a], [], [['readonly','bad flag']]) + assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) # Bad order parameter assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') # Bad casting parameter @@ -699,21 +699,21 @@ def test_iter_flags_errors(): assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2) # Cannot track both a C and an F index assert_raises(ValueError, nditer, a, - ['c_index','f_index'], [['readonly']]) + ['c_index', 'f_index'], [['readonly']]) # Inner iteration and multi-indices/indices are incompatible assert_raises(ValueError, nditer, a, - ['external_loop','multi_index'], [['readonly']]) + ['external_loop', 'multi_index'], [['readonly']]) assert_raises(ValueError, nditer, a, - ['external_loop','c_index'], [['readonly']]) + ['external_loop', 'c_index'], [['readonly']]) assert_raises(ValueError, nditer, a, - ['external_loop','f_index'], [['readonly']]) + ['external_loop', 'f_index'], [['readonly']]) # Must specify exactly one of readwrite/readonly/writeonly per operand assert_raises(ValueError, nditer, a, [], [[]]) - assert_raises(ValueError, nditer, a, [], [['readonly','writeonly']]) - assert_raises(ValueError, nditer, a, [], [['readonly','readwrite']]) - assert_raises(ValueError, nditer, a, [], [['writeonly','readwrite']]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) assert_raises(ValueError, nditer, a, - [], [['readonly','writeonly','readwrite']]) + [], [['readonly', 'writeonly', 'readwrite']]) # Python scalars are always readonly assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) @@ -738,7 +738,7 @@ def test_iter_flags_errors(): def assign_iterindex(i): i.iterindex = 0; def assign_iterrange(i): - i.iterrange = (0,1); + i.iterrange = (0, 1); i = nditer(arange(6), ['external_loop']) assert_raises(ValueError, assign_multi_index, i) assert_raises(ValueError, assign_index, i) @@ -753,13 +753,13 @@ def test_iter_flags_errors(): def test_iter_slice(): a, b, c = np.arange(3), np.arange(3), np.arange(3.) - i = nditer([a,b,c], [], ['readwrite']) - i[0:2] = (3,3) - assert_equal(a, [3,1,2]) - assert_equal(b, [3,1,2]) - assert_equal(c, [0,1,2]) + i = nditer([a, b, c], [], ['readwrite']) + i[0:2] = (3, 3) + assert_equal(a, [3, 1, 2]) + assert_equal(b, [3, 1, 2]) + assert_equal(c, [0, 1, 2]) i[1] = 12 - assert_equal(i[0:2], [3,12]) + assert_equal(i[0:2], [3, 12]) def test_iter_nbo_align_contig(): # Check that byte order, alignment, and contig changes work @@ -768,7 +768,7 @@ def test_iter_nbo_align_contig(): a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() assert_(a.dtype.byteorder != au.dtype.byteorder) - i = nditer(au, [], [['readwrite','updateifcopy']], + i = nditer(au, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) @@ -782,7 +782,7 @@ def test_iter_nbo_align_contig(): a = np.arange(6, dtype='f4') au = a.byteswap().newbyteorder() assert_(a.dtype.byteorder != au.dtype.byteorder) - i = nditer(au, [], [['readwrite','updateifcopy','nbo']], casting='equiv') + i = nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], casting='equiv') assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) assert_equal(i.operands[0], a) @@ -800,7 +800,7 @@ def test_iter_nbo_align_contig(): assert_(not i.operands[0].flags.aligned) assert_equal(i.operands[0], a); # With 'aligned', should make a copy - i = nditer(a, [], [['readwrite','updateifcopy','aligned']]) + i = nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) assert_(i.operands[0].flags.aligned) assert_equal(i.operands[0], a); i.operands[0][:] = 3 @@ -814,8 +814,8 @@ def test_iter_nbo_align_contig(): assert_(i.operands[0].flags.contiguous) assert_equal(i.operands[0], a[:6]); # If it isn't contiguous, should buffer - i = nditer(a[::2], ['buffered','external_loop'], - [['readonly','contig']], + i = nditer(a[::2], ['buffered', 'external_loop'], + [['readonly', 'contig']], buffersize=10) assert_(i[0].flags.contiguous) assert_equal(i[0], a[::2]) @@ -824,106 +824,106 @@ def test_iter_array_cast(): # Check that arrays are cast as requested # No cast 'f4' -> 'f4' - a = np.arange(6, dtype='f4').reshape(2,3) + a = np.arange(6, dtype='f4').reshape(2, 3) i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f4')) # Byte-order cast '<f4' -> '>f4' - a = np.arange(6, dtype='<f4').reshape(2,3) - i = nditer(a, [], [['readwrite','updateifcopy']], + a = np.arange(6, dtype='<f4').reshape(2, 3) + i = nditer(a, [], [['readwrite', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('>f4')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('>f4')) # Safe case 'f4' -> 'f8' - a = np.arange(24, dtype='f4').reshape(2,3,4).swapaxes(1,2) - i = nditer(a, [], [['readonly','copy']], + a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) + i = nditer(a, [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f8')) # The memory layout of the temporary should match a (a is (48,4,16)) # except negative strides get flipped to positive strides. - assert_equal(i.operands[0].strides, (96,8,32)) - a = a[::-1,:,::-1] - i = nditer(a, [], [['readonly','copy']], + assert_equal(i.operands[0].strides, (96, 8, 32)) + a = a[::-1,:, ::-1] + i = nditer(a, [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f8')) - assert_equal(i.operands[0].strides, (96,8,32)) + assert_equal(i.operands[0].strides, (96, 8, 32)) # Same-kind cast 'f8' -> 'f4' -> 'f8' - a = np.arange(24, dtype='f8').reshape(2,3,4).T + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T i = nditer(a, [], - [['readwrite','updateifcopy']], + [['readwrite', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) assert_equal(i.operands[0], a) assert_equal(i.operands[0].dtype, np.dtype('f4')) assert_equal(i.operands[0].strides, (4, 16, 48)) # Check that UPDATEIFCOPY is activated - i.operands[0][2,1,1] = -12.5 - assert_(a[2,1,1] != -12.5) + i.operands[0][2, 1, 1] = -12.5 + assert_(a[2, 1, 1] != -12.5) i = None - assert_equal(a[2,1,1], -12.5) + assert_equal(a[2, 1, 1], -12.5) a = np.arange(6, dtype='i4')[::-2] i = nditer(a, [], - [['writeonly','updateifcopy']], + [['writeonly', 'updateifcopy']], casting='unsafe', op_dtypes=[np.dtype('f4')]) assert_equal(i.operands[0].dtype, np.dtype('f4')) # Even though the stride was negative in 'a', it # becomes positive in the temporary assert_equal(i.operands[0].strides, (4,)) - i.operands[0][:] = [1,2,3] + i.operands[0][:] = [1, 2, 3] i = None - assert_equal(a, [1,2,3]) + assert_equal(a, [1, 2, 3]) def test_iter_array_cast_errors(): # Check that invalid casts are caught # Need to enable copying for casts to occur - assert_raises(TypeError, nditer, arange(2,dtype='f4'), [], + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], [['readonly']], op_dtypes=[np.dtype('f8')]) # Also need to allow casting for casts to occur - assert_raises(TypeError, nditer, arange(2,dtype='f4'), [], - [['readonly','copy']], casting='no', + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='no', op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2,dtype='f4'), [], - [['readonly','copy']], casting='equiv', + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='equiv', op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2,dtype='f8'), [], - [['writeonly','updateifcopy']], + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], casting='no', op_dtypes=[np.dtype('f4')]) - assert_raises(TypeError, nditer, arange(2,dtype='f8'), [], - [['writeonly','updateifcopy']], + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], casting='equiv', op_dtypes=[np.dtype('f4')]) # '<f4' -> '>f4' should not work with casting='no' - assert_raises(TypeError, nditer, arange(2,dtype='<f4'), [], - [['readonly','copy']], casting='no', + assert_raises(TypeError, nditer, arange(2, dtype='<f4'), [], + [['readonly', 'copy']], casting='no', op_dtypes=[np.dtype('>f4')]) # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't - assert_raises(TypeError, nditer, arange(2,dtype='f4'), [], - [['readwrite','updateifcopy']], + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readwrite', 'updateifcopy']], casting='safe', op_dtypes=[np.dtype('f8')]) - assert_raises(TypeError, nditer, arange(2,dtype='f8'), [], - [['readwrite','updateifcopy']], + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['readwrite', 'updateifcopy']], casting='safe', op_dtypes=[np.dtype('f4')]) # 'f4' -> 'i4' is neither a safe nor a same-kind cast - assert_raises(TypeError, nditer, arange(2,dtype='f4'), [], - [['readonly','copy']], + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='same_kind', op_dtypes=[np.dtype('i4')]) - assert_raises(TypeError, nditer, arange(2,dtype='i4'), [], - [['writeonly','updateifcopy']], + assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], + [['writeonly', 'updateifcopy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) @@ -938,7 +938,7 @@ def test_iter_scalar_cast(): assert_equal(i.value, 2.5) # Safe cast 'f4' -> 'f8' i = nditer(np.float32(2.5), [], - [['readonly','copy']], + [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) assert_equal(i.dtypes[0], np.dtype('f8')) @@ -946,7 +946,7 @@ def test_iter_scalar_cast(): assert_equal(i.value, 2.5) # Same-kind cast 'f8' -> 'f4' i = nditer(np.float64(2.5), [], - [['readonly','copy']], + [['readonly', 'copy']], casting='same_kind', op_dtypes=[np.dtype('f4')]) assert_equal(i.dtypes[0], np.dtype('f4')) @@ -954,7 +954,7 @@ def test_iter_scalar_cast(): assert_equal(i.value, 2.5) # Unsafe cast 'f8' -> 'i4' i = nditer(np.float64(3.0), [], - [['readonly','copy']], + [['readonly', 'copy']], casting='unsafe', op_dtypes=[np.dtype('i4')]) assert_equal(i.dtypes[0], np.dtype('i4')) @@ -988,7 +988,7 @@ def test_iter_object_arrays_basic(): # Check that object arrays work obj = {'a':3,'b':'d'} - a = np.array([[1,2,3], None, obj, None], dtype='O') + a = np.array([[1, 2, 3], None, obj, None], dtype='O') rc = sys.getrefcount(obj) # Need to allow references for object arrays @@ -1001,15 +1001,15 @@ def test_iter_object_arrays_basic(): vals, i, x = [None]*3 assert_equal(sys.getrefcount(obj), rc) - i = nditer(a.reshape(2,2).T, ['refs_ok','buffered'], + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], ['readonly'], order='C') assert_(i.iterationneedsapi) vals = [x[()] for x in i] - assert_equal(np.array(vals, dtype='O'), a.reshape(2,2).ravel(order='F')) + assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) vals, i, x = [None]*3 assert_equal(sys.getrefcount(obj), rc) - i = nditer(a.reshape(2,2).T, ['refs_ok','buffered'], + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], ['readwrite'], order='C') for x in i: x[...] = None @@ -1020,34 +1020,34 @@ def test_iter_object_arrays_basic(): def test_iter_object_arrays_conversions(): # Conversions to/from objects a = np.arange(6, dtype='O') - i = nditer(a, ['refs_ok','buffered'], ['readwrite'], + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='i4') for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) a = np.arange(6, dtype='i4') - i = nditer(a, ['refs_ok','buffered'], ['readwrite'], + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='O') for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) # Non-contiguous object array - a = np.zeros((6,), dtype=[('p','i1'),('a','O')]) + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) a = a['a'] a[:] = np.arange(6) - i = nditer(a, ['refs_ok','buffered'], ['readwrite'], + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='i4') for x in i: x[...] += 1 assert_equal(a, np.arange(6)+1) #Non-contiguous value array - a = np.zeros((6,), dtype=[('p','i1'),('a','i4')]) + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) a = a['a'] a[:] = np.arange(6) + 98172488 - i = nditer(a, ['refs_ok','buffered'], ['readwrite'], + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], casting='unsafe', op_dtypes='O') ob = i[0][()] rc = sys.getrefcount(ob) @@ -1059,61 +1059,61 @@ def test_iter_object_arrays_conversions(): def test_iter_common_dtype(): # Check that the iterator finds a common data type correctly - i = nditer([array([3],dtype='f4'),array([0],dtype='f8')], + i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], ['common_dtype'], - [['readonly','copy']]*2, + [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')); assert_equal(i.dtypes[1], np.dtype('f8')); - i = nditer([array([3],dtype='i4'),array([0],dtype='f4')], + i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], ['common_dtype'], - [['readonly','copy']]*2, + [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('f8')); assert_equal(i.dtypes[1], np.dtype('f8')); - i = nditer([array([3],dtype='f4'),array(0,dtype='f8')], + i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], ['common_dtype'], - [['readonly','copy']]*2, + [['readonly', 'copy']]*2, casting='same_kind') assert_equal(i.dtypes[0], np.dtype('f4')); assert_equal(i.dtypes[1], np.dtype('f4')); - i = nditer([array([3],dtype='u4'),array(0,dtype='i4')], + i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], ['common_dtype'], - [['readonly','copy']]*2, + [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('u4')); assert_equal(i.dtypes[1], np.dtype('u4')); - i = nditer([array([3],dtype='u4'),array(-12,dtype='i4')], + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], ['common_dtype'], - [['readonly','copy']]*2, + [['readonly', 'copy']]*2, casting='safe') assert_equal(i.dtypes[0], np.dtype('i8')); assert_equal(i.dtypes[1], np.dtype('i8')); - i = nditer([array([3],dtype='u4'),array(-12,dtype='i4'), - array([2j],dtype='c8'),array([9],dtype='f8')], + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), + array([2j], dtype='c8'), array([9], dtype='f8')], ['common_dtype'], - [['readonly','copy']]*4, + [['readonly', 'copy']]*4, casting='safe') assert_equal(i.dtypes[0], np.dtype('c16')); assert_equal(i.dtypes[1], np.dtype('c16')); assert_equal(i.dtypes[2], np.dtype('c16')); assert_equal(i.dtypes[3], np.dtype('c16')); - assert_equal(i.value, (3,-12,2j,9)) + assert_equal(i.value, (3, -12, 2j, 9)) # When allocating outputs, other outputs aren't factored in - i = nditer([array([3],dtype='i4'),None,array([2j],dtype='c16')], [], - [['readonly','copy'], - ['writeonly','allocate'], + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], + [['readonly', 'copy'], + ['writeonly', 'allocate'], ['writeonly']], casting='safe') assert_equal(i.dtypes[0], np.dtype('i4')); assert_equal(i.dtypes[1], np.dtype('i4')); assert_equal(i.dtypes[2], np.dtype('c16')); # But, if common data types are requested, they are - i = nditer([array([3],dtype='i4'),None,array([2j],dtype='c16')], + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], ['common_dtype'], - [['readonly','copy'], - ['writeonly','allocate'], + [['readonly', 'copy'], + ['writeonly', 'allocate'], ['writeonly']], casting='safe') assert_equal(i.dtypes[0], np.dtype('c16')); @@ -1124,69 +1124,69 @@ def test_iter_op_axes(): # Check that custom axes work # Reverse the axes - a = arange(6).reshape(2,3) - i = nditer([a,a.T], [], [['readonly']]*2, op_axes=[[0,1],[1,0]]) - assert_(all([x==y for (x,y) in i])) - a = arange(24).reshape(2,3,4) - i = nditer([a.T,a], [], [['readonly']]*2, op_axes=[[2,1,0],None]) - assert_(all([x==y for (x,y) in i])) + a = arange(6).reshape(2, 3) + i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]]) + assert_(all([x==y for (x, y) in i])) + a = arange(24).reshape(2, 3, 4) + i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None]) + assert_(all([x==y for (x, y) in i])) # Broadcast 1D to any dimension - a = arange(1,31).reshape(2,3,5) - b = arange(1,3) - i = nditer([a,b], [], [['readonly']]*2, op_axes=[None,[0,-1,-1]]) - assert_equal([x*y for (x,y) in i], (a*b.reshape(2,1,1)).ravel()) - b = arange(1,4) - i = nditer([a,b], [], [['readonly']]*2, op_axes=[None,[-1,0,-1]]) - assert_equal([x*y for (x,y) in i], (a*b.reshape(1,3,1)).ravel()) - b = arange(1,6) - i = nditer([a,b], [], [['readonly']]*2, - op_axes=[None,[np.newaxis,np.newaxis,0]]) - assert_equal([x*y for (x,y) in i], (a*b.reshape(1,1,5)).ravel()) + a = arange(1, 31).reshape(2, 3, 5) + b = arange(1, 3) + i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel()) + b = arange(1, 4) + i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel()) + b = arange(1, 6) + i = nditer([a, b], [], [['readonly']]*2, + op_axes=[None, [np.newaxis, np.newaxis, 0]]) + assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel()) # Inner product-style broadcasting - a = arange(24).reshape(2,3,4) - b = arange(40).reshape(5,2,4) - i = nditer([a,b], ['multi_index'], [['readonly']]*2, - op_axes=[[0,1,-1,-1],[-1,-1,0,1]]) - assert_equal(i.shape, (2,3,5,2)) + a = arange(24).reshape(2, 3, 4) + b = arange(40).reshape(5, 2, 4) + i = nditer([a, b], ['multi_index'], [['readonly']]*2, + op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) + assert_equal(i.shape, (2, 3, 5, 2)) # Matrix product-style broadcasting - a = arange(12).reshape(3,4) - b = arange(20).reshape(4,5) - i = nditer([a,b], ['multi_index'], [['readonly']]*2, - op_axes=[[0,-1],[-1,1]]) - assert_equal(i.shape, (3,5)) + a = arange(12).reshape(3, 4) + b = arange(20).reshape(4, 5) + i = nditer([a, b], ['multi_index'], [['readonly']]*2, + op_axes=[[0, -1], [-1, 1]]) + assert_equal(i.shape, (3, 5)) def test_iter_op_axes_errors(): # Check that custom axes throws errors for bad inputs # Wrong number of items in op_axes - a = arange(6).reshape(2,3) - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0],[1],[0]]) + a = arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0], [1], [0]]) # Out of bounds items in op_axes - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[2,1],[0,1]]) - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0,1],[2,-1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[2, 1], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [2, -1]]) # Duplicate items in op_axes - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0,0],[0,1]]) - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0,1],[1,1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 0], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [1, 1]]) # Different sized arrays in op_axes - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0,1],[0,1,0]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [0, 1, 0]]) # Non-broadcastable dimensions in the result - assert_raises(ValueError, nditer, [a,a], [], [['readonly']]*2, - op_axes=[[0,1],[1,0]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2, + op_axes=[[0, 1], [1, 0]]) def test_iter_copy(): # Check that copying the iterator works correctly - a = arange(24).reshape(2,3,4) + a = arange(24).reshape(2, 3, 4) # Simple iterator i = nditer(a) @@ -1198,7 +1198,7 @@ def test_iter_copy(): assert_equal([x[()] for x in i], [x[()] for x in j]) # Buffered iterator - i = nditer(a, ['buffered','ranged'], order='F', buffersize=3) + i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) @@ -1206,11 +1206,11 @@ def test_iter_copy(): j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) - i.iterrange = (3,9) + i.iterrange = (3, 9) j = i.copy() assert_equal([x[()] for x in i], [x[()] for x in j]) - i.iterrange = (2,18) + i.iterrange = (2, 18) next(i) next(i) j = i.copy() @@ -1223,7 +1223,7 @@ def test_iter_copy(): i = None assert_equal([x[()] for x in j], a.ravel(order='F')) - a = arange(24, dtype='<i4').reshape(2,3,4) + a = arange(24, dtype='<i4').reshape(2, 3, 4) i = nditer(a, ['buffered'], order='F', casting='unsafe', op_dtypes='>f8', buffersize=5) j = i.copy() @@ -1235,8 +1235,8 @@ def test_iter_allocate_output_simple(): # Simple case a = arange(6) - i = nditer([a,None], [], [['readonly'],['writeonly','allocate']], - op_dtypes=[None,np.dtype('f4')]) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].dtype, np.dtype('f4')) @@ -1244,8 +1244,8 @@ def test_iter_allocate_output_buffered_readwrite(): # Allocated output with buffering + delay_bufalloc a = arange(6) - i = nditer([a,None], ['buffered','delay_bufalloc'], - [['readonly'],['allocate','readwrite']]) + i = nditer([a, None], ['buffered', 'delay_bufalloc'], + [['readonly'], ['allocate', 'readwrite']]) i.operands[1][:] = 1 i.reset() for x in i: @@ -1256,78 +1256,78 @@ def test_iter_allocate_output_itorder(): # The allocated output should match the iteration order # C-order input, best iteration order - a = arange(6, dtype='i4').reshape(2,3) - i = nditer([a,None], [], [['readonly'],['writeonly','allocate']], - op_dtypes=[None,np.dtype('f4')]) + a = arange(6, dtype='i4').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].strides, a.strides) assert_equal(i.operands[1].dtype, np.dtype('f4')) # F-order input, best iteration order - a = arange(24, dtype='i4').reshape(2,3,4).T - i = nditer([a,None], [], [['readonly'],['writeonly','allocate']], - op_dtypes=[None,np.dtype('f4')]) + a = arange(24, dtype='i4').reshape(2, 3, 4).T + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) assert_equal(i.operands[1].strides, a.strides) assert_equal(i.operands[1].dtype, np.dtype('f4')) # Non-contiguous input, C iteration order - a = arange(24, dtype='i4').reshape(2,3,4).swapaxes(0,1) - i = nditer([a,None], [], - [['readonly'],['writeonly','allocate']], + a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1) + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']], order='C', - op_dtypes=[None,np.dtype('f4')]) + op_dtypes=[None, np.dtype('f4')]) assert_equal(i.operands[1].shape, a.shape) - assert_equal(i.operands[1].strides, (32,16,4)) + assert_equal(i.operands[1].strides, (32, 16, 4)) assert_equal(i.operands[1].dtype, np.dtype('f4')) def test_iter_allocate_output_opaxes(): # Specifing op_axes should work - a = arange(24, dtype='i4').reshape(2,3,4) - i = nditer([None,a], [], [['writeonly','allocate'],['readonly']], - op_dtypes=[np.dtype('u4'),None], - op_axes=[[1,2,0],None]); - assert_equal(i.operands[0].shape, (4,2,3)) - assert_equal(i.operands[0].strides, (4,48,16)) + a = arange(24, dtype='i4').reshape(2, 3, 4) + i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']], + op_dtypes=[np.dtype('u4'), None], + op_axes=[[1, 2, 0], None]); + assert_equal(i.operands[0].shape, (4, 2, 3)) + assert_equal(i.operands[0].strides, (4, 48, 16)) assert_equal(i.operands[0].dtype, np.dtype('u4')) def test_iter_allocate_output_types_promotion(): # Check type promotion of automatic outputs - i = nditer([array([3],dtype='f4'),array([0],dtype='f8'),None], [], - [['readonly']]*2+[['writeonly','allocate']]) + i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')); - i = nditer([array([3],dtype='i4'),array([0],dtype='f4'),None], [], - [['readonly']]*2+[['writeonly','allocate']]) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f8')); - i = nditer([array([3],dtype='f4'),array(0,dtype='f8'),None], [], - [['readonly']]*2+[['writeonly','allocate']]) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('f4')); - i = nditer([array([3],dtype='u4'),array(0,dtype='i4'),None], [], - [['readonly']]*2+[['writeonly','allocate']]) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('u4')); - i = nditer([array([3],dtype='u4'),array(-12,dtype='i4'),None], [], - [['readonly']]*2+[['writeonly','allocate']]) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [], + [['readonly']]*2+[['writeonly', 'allocate']]) assert_equal(i.dtypes[2], np.dtype('i8')); def test_iter_allocate_output_types_byte_order(): # Verify the rules for byte order changes # When there's just one input, the output type exactly matches - a = array([3],dtype='u4').newbyteorder() - i = nditer([a,None], [], - [['readonly'],['writeonly','allocate']]) + a = array([3], dtype='u4').newbyteorder() + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']]) assert_equal(i.dtypes[0], i.dtypes[1]); # With two or more inputs, the output type is in native byte order - i = nditer([a,a,None], [], - [['readonly'],['readonly'],['writeonly','allocate']]) + i = nditer([a, a, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) assert_(i.dtypes[0] != i.dtypes[2]); assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2]) def test_iter_allocate_output_types_scalar(): # If the inputs are all scalars, the output should be a scalar - i = nditer([None,1,2.3,np.float32(12),np.complex128(3)],[], - [['writeonly','allocate']] + [['readonly']]*4) + i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [], + [['writeonly', 'allocate']] + [['readonly']]*4) assert_equal(i.operands[0].dtype, np.dtype('complex128')) assert_equal(i.operands[0].ndim, 0) @@ -1335,81 +1335,81 @@ def test_iter_allocate_output_subtype(): # Make sure that the subtype with priority wins # matrix vs ndarray - a = np.matrix([[1,2], [3,4]]) - b = np.arange(4).reshape(2,2).T - i = nditer([a,b,None], [], - [['readonly'],['readonly'],['writeonly','allocate']]) + a = np.matrix([[1, 2], [3, 4]]) + b = np.arange(4).reshape(2, 2).T + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) assert_equal(type(a), type(i.operands[2])) assert_(type(b) != type(i.operands[2])) - assert_equal(i.operands[2].shape, (2,2)) + assert_equal(i.operands[2].shape, (2, 2)) # matrix always wants things to be 2D - b = np.arange(4).reshape(1,2,2) - assert_raises(RuntimeError, nditer, [a,b,None], [], - [['readonly'],['readonly'],['writeonly','allocate']]) + b = np.arange(4).reshape(1, 2, 2) + assert_raises(RuntimeError, nditer, [a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) # but if subtypes are disabled, the result can still work - i = nditer([a,b,None], [], - [['readonly'],['readonly'],['writeonly','allocate','no_subtype']]) + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_subtype']]) assert_equal(type(b), type(i.operands[2])) assert_(type(a) != type(i.operands[2])) - assert_equal(i.operands[2].shape, (1,2,2)) + assert_equal(i.operands[2].shape, (1, 2, 2)) def test_iter_allocate_output_errors(): # Check that the iterator will throw errors for bad output allocations # Need an input if no output data type is specified a = arange(6) - assert_raises(TypeError, nditer, [a,None], [], - [['writeonly'],['writeonly','allocate']]) + assert_raises(TypeError, nditer, [a, None], [], + [['writeonly'], ['writeonly', 'allocate']]) # Allocated output should be flagged for writing - assert_raises(ValueError, nditer, [a,None], [], - [['readonly'],['allocate','readonly']]) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['allocate', 'readonly']]) # Allocated output can't have buffering without delayed bufalloc - assert_raises(ValueError, nditer, [a,None], ['buffered'], - ['allocate','readwrite']) + assert_raises(ValueError, nditer, [a, None], ['buffered'], + ['allocate', 'readwrite']) # Must specify at least one input - assert_raises(ValueError, nditer, [None,None], [], - [['writeonly','allocate'], - ['writeonly','allocate']], - op_dtypes=[np.dtype('f4'),np.dtype('f4')]) + assert_raises(ValueError, nditer, [None, None], [], + [['writeonly', 'allocate'], + ['writeonly', 'allocate']], + op_dtypes=[np.dtype('f4'), np.dtype('f4')]) # If using op_axes, must specify all the axes - a = arange(24, dtype='i4').reshape(2,3,4) - assert_raises(ValueError, nditer, [a,None], [], - [['readonly'],['writeonly','allocate']], - op_dtypes=[None,np.dtype('f4')], - op_axes=[None,[0,np.newaxis,1]]) + a = arange(24, dtype='i4').reshape(2, 3, 4) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, np.newaxis, 1]]) # If using op_axes, the axes must be within bounds - assert_raises(ValueError, nditer, [a,None], [], - [['readonly'],['writeonly','allocate']], - op_dtypes=[None,np.dtype('f4')], - op_axes=[None,[0,3,1]]) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 3, 1]]) # If using op_axes, there can't be duplicates - assert_raises(ValueError, nditer, [a,None], [], - [['readonly'],['writeonly','allocate']], - op_dtypes=[None,np.dtype('f4')], - op_axes=[None,[0,2,1,0]]) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 2, 1, 0]]) def test_iter_remove_axis(): - a = arange(24).reshape(2,3,4) + a = arange(24).reshape(2, 3, 4) - i = nditer(a,['multi_index']) + i = nditer(a, ['multi_index']) i.remove_axis(1) - assert_equal([x for x in i], a[:,0,:].ravel()) + assert_equal([x for x in i], a[:, 0,:].ravel()) a = a[::-1,:,:] - i = nditer(a,['multi_index']) + i = nditer(a, ['multi_index']) i.remove_axis(0) assert_equal([x for x in i], a[0,:,:].ravel()) def test_iter_remove_multi_index_inner_loop(): # Check that removing multi-index support works - a = arange(24).reshape(2,3,4) + a = arange(24).reshape(2, 3, 4) - i = nditer(a,['multi_index']) + i = nditer(a, ['multi_index']) assert_equal(i.ndim, 3) - assert_equal(i.shape, (2,3,4)) - assert_equal(i.itviews[0].shape, (2,3,4)) + assert_equal(i.shape, (2, 3, 4)) + assert_equal(i.itviews[0].shape, (2, 3, 4)) # Removing the multi-index tracking causes all dimensions to coalesce before = [x for x in i] @@ -1434,57 +1434,57 @@ def test_iter_iterindex(): # Make sure iterindex works buffersize = 5 - a = arange(24).reshape(4,3,2) + a = arange(24).reshape(4, 3, 2) for flags in ([], ['buffered']): i = nditer(a, flags, buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 2 - assert_equal(iter_iterindices(i), list(range(2,24))) + assert_equal(iter_iterindices(i), list(range(2, 24))) i = nditer(a, flags, order='F', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 5 - assert_equal(iter_iterindices(i), list(range(5,24))) + assert_equal(iter_iterindices(i), list(range(5, 24))) i = nditer(a[::-1], flags, order='F', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 9 - assert_equal(iter_iterindices(i), list(range(9,24))) + assert_equal(iter_iterindices(i), list(range(9, 24))) - i = nditer(a[::-1,::-1], flags, order='C', buffersize=buffersize) + i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 13 - assert_equal(iter_iterindices(i), list(range(13,24))) + assert_equal(iter_iterindices(i), list(range(13, 24))) - i = nditer(a[::1,::-1], flags, buffersize=buffersize) + i = nditer(a[::1, ::-1], flags, buffersize=buffersize) assert_equal(iter_iterindices(i), list(range(24))) i.iterindex = 23 - assert_equal(iter_iterindices(i), list(range(23,24))) + assert_equal(iter_iterindices(i), list(range(23, 24))) i.reset() i.iterindex = 2 - assert_equal(iter_iterindices(i), list(range(2,24))) + assert_equal(iter_iterindices(i), list(range(2, 24))) def test_iter_iterrange(): # Make sure getting and resetting the iterrange works buffersize = 5 - a = arange(24, dtype='i4').reshape(4,3,2) + a = arange(24, dtype='i4').reshape(4, 3, 2) a_fort = a.ravel(order='F') i = nditer(a, ['ranged'], ['readonly'], order='F', buffersize=buffersize) - assert_equal(i.iterrange, (0,24)) + assert_equal(i.iterrange, (0, 24)) assert_equal([x[()] for x in i], a_fort) - for r in [(0,24), (1,2), (3,24), (5,5), (0,20), (23,24)]: + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) - i = nditer(a, ['ranged','buffered'], ['readonly'], order='F', + i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F', op_dtypes='f8', buffersize=buffersize) - assert_equal(i.iterrange, (0,24)) + assert_equal(i.iterrange, (0, 24)) assert_equal([x[()] for x in i], a_fort) - for r in [(0,24), (1,2), (3,24), (5,5), (0,20), (23,24)]: + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) @@ -1495,12 +1495,12 @@ def test_iter_iterrange(): val = np.concatenate((val, x)) return val - i = nditer(a, ['ranged','buffered','external_loop'], + i = nditer(a, ['ranged', 'buffered', 'external_loop'], ['readonly'], order='F', op_dtypes='f8', buffersize=buffersize) - assert_equal(i.iterrange, (0,24)) + assert_equal(i.iterrange, (0, 24)) assert_equal(get_array(i), a_fort) - for r in [(0,24), (1,2), (3,24), (5,5), (0,20), (23,24)]: + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: i.iterrange = r assert_equal(i.iterrange, r) assert_equal(get_array(i), a_fort[r[0]:r[1]]) @@ -1510,21 +1510,21 @@ def test_iter_buffering(): arrays = [] # F-order swapped array arrays.append(np.arange(24, - dtype='c16').reshape(2,3,4).T.newbyteorder().byteswap()) + dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap()) # Contiguous 1-dimensional array arrays.append(np.arange(10, dtype='f4')) # Unaligned array a = np.zeros((4*16+1,), dtype='i1')[1:] a.dtype = 'i4' - a[:] = np.arange(16,dtype='i4') + a[:] = np.arange(16, dtype='i4') arrays.append(a) # 4-D F-order array - arrays.append(np.arange(120,dtype='i4').reshape(5,3,2,4).T) + arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T) for a in arrays: - for buffersize in (1,2,3,5,8,11,16,1024): + for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024): vals = [] - i = nditer(a, ['buffered','external_loop'], - [['readonly','nbo','aligned']], + i = nditer(a, ['buffered', 'external_loop'], + [['readonly', 'nbo', 'aligned']], order='C', casting='equiv', buffersize=buffersize) @@ -1538,9 +1538,9 @@ def test_iter_write_buffering(): # Test that buffering of writes is working # F-order swapped array - a = np.arange(24).reshape(2,3,4).T.newbyteorder().byteswap() + a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap() i = nditer(a, ['buffered'], - [['readwrite','nbo','aligned']], + [['readwrite', 'nbo', 'aligned']], casting='equiv', order='C', buffersize=16) @@ -1556,7 +1556,7 @@ def test_iter_buffering_delayed_alloc(): a = np.arange(6) b = np.arange(1, dtype='f4') - i = nditer([a,b], ['buffered','delay_bufalloc','multi_index','reduce_ok'], + i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'], ['readwrite'], casting='unsafe', op_dtypes='f4') @@ -1573,15 +1573,15 @@ def test_iter_buffering_delayed_alloc(): assert_equal(i.multi_index, (0,)) assert_equal(i[0], 0) i[1] = 1 - assert_equal(i[0:2], [0,1]) - assert_equal([[x[0][()],x[1][()]] for x in i], list(zip(range(6), [1]*6))) + assert_equal(i[0:2], [0, 1]) + assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6))) def test_iter_buffered_cast_simple(): # Test that buffering can handle a simple cast a = np.arange(10, dtype='f4') - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f8')], buffersize=3) @@ -1594,8 +1594,8 @@ def test_iter_buffered_cast_byteswapped(): # Test that buffering can handle a cast which requires swap->cast->swap a = np.arange(10, dtype='f4').newbyteorder().byteswap() - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f8').newbyteorder()], buffersize=3) @@ -1608,8 +1608,8 @@ def test_iter_buffered_cast_byteswapped(): warnings.simplefilter("ignore", np.ComplexWarning) a = np.arange(10, dtype='f8').newbyteorder().byteswap() - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], casting='unsafe', op_dtypes=[np.dtype('c8').newbyteorder()], buffersize=3) @@ -1625,8 +1625,8 @@ def test_iter_buffered_cast_byteswapped_complex(): a = np.arange(10, dtype='c8').newbyteorder().byteswap() a += 2j - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16')], buffersize=3) @@ -1636,8 +1636,8 @@ def test_iter_buffered_cast_byteswapped_complex(): a = np.arange(10, dtype='c8') a += 2j - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16').newbyteorder()], buffersize=3) @@ -1647,8 +1647,8 @@ def test_iter_buffered_cast_byteswapped_complex(): a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap() a += 2j - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('c16')], buffersize=3) @@ -1657,8 +1657,8 @@ def test_iter_buffered_cast_byteswapped_complex(): assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j) a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap() - i = nditer(a, ['buffered','external_loop'], - [['readwrite','nbo','aligned']], + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], casting='same_kind', op_dtypes=[np.dtype('f4')], buffersize=7) @@ -1670,9 +1670,9 @@ def test_iter_buffered_cast_structured_type(): # Tests buffering of structured types # simple -> struct type (duplicates the value) - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2,3)), ('d', 'O')] + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.arange(3, dtype='f4') + 0.5 - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = [np.array(x) for x in i] @@ -1687,13 +1687,13 @@ def test_iter_buffered_cast_structured_type(): assert_equal(vals[0].dtype, np.dtype(sdt)) # object -> struct type - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2,3)), ('d', 'O')] + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.zeros((3,), dtype='O') - a[0] = (0.5,0.5,[[0.5,0.5,0.5],[0.5,0.5,0.5]],0.5) - a[1] = (1.5,1.5,[[1.5,1.5,1.5],[1.5,1.5,1.5]],1.5) - a[2] = (2.5,2.5,[[2.5,2.5,2.5],[2.5,2.5,2.5]],2.5) + a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) + a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) + a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) rc = sys.getrefcount(a[0]) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = [x.copy() for x in i] @@ -1711,8 +1711,8 @@ def test_iter_buffered_cast_structured_type(): # struct type -> simple (takes the first value) sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - a = np.array([(5.5,7,'test'),(8,10,11)], dtype=sdt) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes='i4') assert_equal([x[()] for x in i], [5, 8]) @@ -1720,20 +1720,20 @@ def test_iter_buffered_cast_structured_type(): # struct type -> struct type (field-wise copy) sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] - a = np.array([(1,2,3),(4,5,6)], dtype=sdt1) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) assert_equal([np.array(x) for x in i], - [np.array((3,1,2), dtype=sdt2), - np.array((6,4,5), dtype=sdt2)]) + [np.array((3, 1, 2), dtype=sdt2), + np.array((6, 4, 5), dtype=sdt2)]) # struct type -> struct type (field gets discarded) sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1,2,3),(4,5,6)], dtype=sdt1) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) @@ -1741,15 +1741,15 @@ def test_iter_buffered_cast_structured_type(): for x in i: vals.append(np.array(x)) x['a'] = x['b']+3 - assert_equal(vals, [np.array((2,1), dtype=sdt2), - np.array((5,4), dtype=sdt2)]) - assert_equal(a, np.array([(5,2,None),(8,5,None)], dtype=sdt1)) + assert_equal(vals, [np.array((2, 1), dtype=sdt2), + np.array((5, 4), dtype=sdt2)]) + assert_equal(a, np.array([(5, 2, None), (8, 5, None)], dtype=sdt1)) # struct type -> struct type (structured field gets discarded) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'),('b','i4')])] + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'i4')])] sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1,2,(0,9)),(4,5,(20,21))], dtype=sdt1) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], + a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) @@ -1757,15 +1757,15 @@ def test_iter_buffered_cast_structured_type(): for x in i: vals.append(np.array(x)) x['a'] = x['b']+3 - assert_equal(vals, [np.array((2,1), dtype=sdt2), - np.array((5,4), dtype=sdt2)]) - assert_equal(a, np.array([(5,2,(0,0)),(8,5,(0,0))], dtype=sdt1)) + assert_equal(vals, [np.array((2, 1), dtype=sdt2), + np.array((5, 4), dtype=sdt2)]) + assert_equal(a, np.array([(5, 2, (0, 0)), (8, 5, (0, 0))], dtype=sdt1)) # struct type -> struct type (structured field w/ ref gets discarded) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'),('b','O')])] + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])] sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1,2,(0,9)),(4,5,(20,21))], dtype=sdt1) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], + a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) @@ -1773,36 +1773,36 @@ def test_iter_buffered_cast_structured_type(): for x in i: vals.append(np.array(x)) x['a'] = x['b']+3 - assert_equal(vals, [np.array((2,1), dtype=sdt2), - np.array((5,4), dtype=sdt2)]) - assert_equal(a, np.array([(5,2,(0,None)),(8,5,(0,None))], dtype=sdt1)) + assert_equal(vals, [np.array((2, 1), dtype=sdt2), + np.array((5, 4), dtype=sdt2)]) + assert_equal(a, np.array([(5, 2, (0, None)), (8, 5, (0, None))], dtype=sdt1)) # struct type -> struct type back (structured field w/ ref gets discarded) sdt1 = [('b', 'O'), ('a', 'f8')] - sdt2 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'),('b','O')])] - a = np.array([(1,2),(4,5)], dtype=sdt1) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], + sdt2 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])] + a = np.array([(1, 2), (4, 5)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) vals = [] for x in i: vals.append(np.array(x)) - assert_equal(x['d'], np.array((0, None), dtype=[('a','i2'),('b','O')])) + assert_equal(x['d'], np.array((0, None), dtype=[('a', 'i2'), ('b', 'O')])) x['a'] = x['b']+3 - assert_equal(vals, [np.array((2,1,(0,None)), dtype=sdt2), - np.array((5,4,(0,None)), dtype=sdt2)]) - assert_equal(a, np.array([(1,4),(4,7)], dtype=sdt1)) + assert_equal(vals, [np.array((2, 1, (0, None)), dtype=sdt2), + np.array((5, 4, (0, None)), dtype=sdt2)]) + assert_equal(a, np.array([(1, 4), (4, 7)], dtype=sdt1)) def test_iter_buffered_cast_subarray(): # Tests buffering of subarrays # one element -> many (copies it to all) sdt1 = [('a', 'f4')] - sdt2 = [('a', 'f8', (3,2,2))] + sdt2 = [('a', 'f8', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) a['a'] = np.arange(6) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) @@ -1810,11 +1810,11 @@ def test_iter_buffered_cast_subarray(): assert_(np.all(x['a'] == count)) # one element -> many -> back (copies it to all) - sdt1 = [('a', 'O', (1,1))] - sdt2 = [('a', 'O', (3,2,2))] + sdt1 = [('a', 'O', (1, 1))] + sdt2 = [('a', 'O', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'][:,0,0] = np.arange(6) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], + a['a'][:, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) @@ -1823,14 +1823,14 @@ def test_iter_buffered_cast_subarray(): assert_(np.all(x['a'] == count)) x['a'][0] += 2 count += 1 - assert_equal(a['a'], np.arange(6).reshape(6,1,1)+2) + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2) # many -> one element -> back (copies just element 0) - sdt1 = [('a', 'O', (3,2,2))] + sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'O', (1,))] a = np.zeros((6,), dtype=sdt1) - a['a'][:,0,0,0] = np.arange(6) - i = nditer(a, ['buffered','refs_ok'], ['readwrite'], + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) @@ -1839,14 +1839,14 @@ def test_iter_buffered_cast_subarray(): assert_equal(x['a'], count) x['a'] += 2 count += 1 - assert_equal(a['a'], np.arange(6).reshape(6,1,1,1)*np.ones((1,3,2,2))+2) + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2) # many -> one element -> back (copies just element 0) - sdt1 = [('a', 'f8', (3,2,2))] + sdt1 = [('a', 'f8', (3, 2, 2))] sdt2 = [('a', 'O', (1,))] a = np.zeros((6,), dtype=sdt1) - a['a'][:,0,0,0] = np.arange(6) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) @@ -1856,11 +1856,11 @@ def test_iter_buffered_cast_subarray(): count += 1 # many -> one element (copies just element 0) - sdt1 = [('a', 'O', (3,2,2))] + sdt1 = [('a', 'O', (3, 2, 2))] sdt2 = [('a', 'f4', (1,))] a = np.zeros((6,), dtype=sdt1) - a['a'][:,0,0,0] = np.arange(6) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) @@ -1870,11 +1870,11 @@ def test_iter_buffered_cast_subarray(): count += 1 # many -> matching shape (straightforward copy) - sdt1 = [('a', 'O', (3,2,2))] - sdt2 = [('a', 'f4', (3,2,2))] + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (3, 2, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*3*2*2).reshape(6,3,2,2) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) @@ -1887,8 +1887,8 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'f8', (6,))] sdt2 = [('a', 'f4', (2,))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*6).reshape(6,6) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + a['a'] = np.arange(6*6).reshape(6, 6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) @@ -1901,23 +1901,23 @@ def test_iter_buffered_cast_subarray(): sdt1 = [('a', 'f8', (2,))] sdt2 = [('a', 'f4', (6,))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6,2) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + a['a'] = np.arange(6*2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: assert_equal(x['a'][:2], a[count]['a']) - assert_equal(x['a'][2:], [0,0,0,0]) + assert_equal(x['a'][2:], [0, 0, 0, 0]) count += 1 # vector -> matrix (broadcasts) sdt1 = [('a', 'f8', (2,))] - sdt2 = [('a', 'f4', (2,2))] + sdt2 = [('a', 'f4', (2, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6,2) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + a['a'] = np.arange(6*2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) @@ -1928,76 +1928,76 @@ def test_iter_buffered_cast_subarray(): count += 1 # vector -> matrix (broadcasts and zero-pads) - sdt1 = [('a', 'f8', (2,1))] - sdt2 = [('a', 'f4', (3,2))] + sdt1 = [('a', 'f8', (2, 1))] + sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2).reshape(6,2,1) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + a['a'] = np.arange(6*2).reshape(6, 2, 1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: - assert_equal(x['a'][:2,0], a[count]['a'][:,0]) - assert_equal(x['a'][:2,1], a[count]['a'][:,0]) - assert_equal(x['a'][2,:], [0,0]) + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) + assert_equal(x['a'][2,:], [0, 0]) count += 1 # matrix -> matrix (truncates and zero-pads) - sdt1 = [('a', 'f8', (2,3))] - sdt2 = [('a', 'f4', (3,2))] + sdt1 = [('a', 'f8', (2, 3))] + sdt2 = [('a', 'f4', (3, 2))] a = np.zeros((6,), dtype=sdt1) - a['a'] = np.arange(6*2*3).reshape(6,2,3) - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + a['a'] = np.arange(6*2*3).reshape(6, 2, 3) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) count = 0 for x in i: - assert_equal(x['a'][:2,0], a[count]['a'][:,0]) - assert_equal(x['a'][:2,1], a[count]['a'][:,1]) - assert_equal(x['a'][2,:], [0,0]) + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) + assert_equal(x['a'][2,:], [0, 0]) count += 1 def test_iter_buffering_badwriteback(): # Writing back from a buffer cannot combine elements # a needs write buffering, but had a broadcast dimension - a = np.arange(6).reshape(2,3,1) - b = np.arange(12).reshape(2,3,2) - assert_raises(ValueError,nditer,[a,b], - ['buffered','external_loop'], - [['readwrite'],['writeonly']], + a = np.arange(6).reshape(2, 3, 1) + b = np.arange(12).reshape(2, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], order='C') # But if a is readonly, it's fine - i = nditer([a,b],['buffered','external_loop'], - [['readonly'],['writeonly']], + i = nditer([a, b], ['buffered', 'external_loop'], + [['readonly'], ['writeonly']], order='C') # If a has just one element, it's fine too (constant 0 stride, a reduction) - a = np.arange(1).reshape(1,1,1) - i = nditer([a,b],['buffered','external_loop','reduce_ok'], - [['readwrite'],['writeonly']], + a = np.arange(1).reshape(1, 1, 1) + i = nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['writeonly']], order='C') # check that it fails on other dimensions too - a = np.arange(6).reshape(1,3,2) - assert_raises(ValueError,nditer,[a,b], - ['buffered','external_loop'], - [['readwrite'],['writeonly']], + a = np.arange(6).reshape(1, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], order='C') - a = np.arange(4).reshape(2,1,2) - assert_raises(ValueError,nditer,[a,b], - ['buffered','external_loop'], - [['readwrite'],['writeonly']], + a = np.arange(4).reshape(2, 1, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], order='C') def test_iter_buffering_string(): # Safe casting disallows shrinking strings a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) assert_equal(a.dtype, np.dtype('S4')); - assert_raises(TypeError,nditer,a,['buffered'],['readonly'], + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], op_dtypes='S2') i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') assert_equal(i[0], asbytes('abc')) @@ -2005,7 +2005,7 @@ def test_iter_buffering_string(): a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode) assert_equal(a.dtype, np.dtype('U4')); - assert_raises(TypeError,nditer,a,['buffered'],['readonly'], + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], op_dtypes='U2') i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') assert_equal(i[0], sixu('abc')) @@ -2014,7 +2014,7 @@ def test_iter_buffering_string(): def test_iter_buffering_growinner(): # Test that the inner loop grows when no buffering is needed a = np.arange(30) - i = nditer(a, ['buffered','growinner','external_loop'], + i = nditer(a, ['buffered', 'growinner', 'external_loop'], buffersize=5) # Should end up with just one inner loop here assert_equal(i[0].size, a.size) @@ -2025,8 +2025,8 @@ def test_iter_buffered_reduce_reuse(): # large enough array for all views, including negative strides. a = np.arange(2*3**5)[3**5:3**5+1] flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] - op_flags = [('readonly',), ('readwrite','allocate')] - op_axes_list = [[(0,1,2), (0,1,-1)], [(0,1,2), (0,-1,-1)]] + op_flags = [('readonly',), ('readwrite', 'allocate')] + op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] # wrong dtype to force buffering op_dtypes = [np.float, a.dtype] @@ -2037,7 +2037,7 @@ def test_iter_buffered_reduce_reuse(): # last stride is reduced and because of that not # important for this test, as it is the inner stride. strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) - arr = np.lib.stride_tricks.as_strided(a, (3,3,3), strides) + arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) for skip in [0, 1]: yield arr, op_axes, skip @@ -2072,174 +2072,174 @@ def test_iter_buffered_reduce_reuse(): def test_iter_no_broadcast(): # Test that the no_broadcast flag works - a = np.arange(24).reshape(2,3,4) - b = np.arange(6).reshape(2,3,1) - c = np.arange(12).reshape(3,4) + a = np.arange(24).reshape(2, 3, 4) + b = np.arange(6).reshape(2, 3, 1) + c = np.arange(12).reshape(3, 4) - i = nditer([a,b,c], [], - [['readonly','no_broadcast'],['readonly'],['readonly']]) - assert_raises(ValueError, nditer, [a,b,c], [], - [['readonly'],['readonly','no_broadcast'],['readonly']]) - assert_raises(ValueError, nditer, [a,b,c], [], - [['readonly'],['readonly'],['readonly','no_broadcast']]) + i = nditer([a, b, c], [], + [['readonly', 'no_broadcast'], ['readonly'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) def test_iter_nested_iters_basic(): # Test nested iteration basic usage - a = arange(12).reshape(2,3,2) + a = arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[0],[1,2]]) + i, j = np.nested_iters(a, [[0], [1, 2]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,1,2,3,4,5],[6,7,8,9,10,11]]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - i, j = np.nested_iters(a, [[0,1],[2]]) + i, j = np.nested_iters(a, [[0, 1], [2]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - i, j = np.nested_iters(a, [[0,2],[1]]) + i, j = np.nested_iters(a, [[0, 2], [1]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,2,4],[1,3,5],[6,8,10],[7,9,11]]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) def test_iter_nested_iters_reorder(): # Test nested iteration basic usage - a = arange(12).reshape(2,3,2) + a = arange(12).reshape(2, 3, 2) # In 'K' order (default), it gets reordered - i, j = np.nested_iters(a, [[0],[2,1]]) + i, j = np.nested_iters(a, [[0], [2, 1]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,1,2,3,4,5],[6,7,8,9,10,11]]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - i, j = np.nested_iters(a, [[1,0],[2]]) + i, j = np.nested_iters(a, [[1, 0], [2]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - i, j = np.nested_iters(a, [[2,0],[1]]) + i, j = np.nested_iters(a, [[2, 0], [1]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,2,4],[1,3,5],[6,8,10],[7,9,11]]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) # In 'C' order, it doesn't - i, j = np.nested_iters(a, [[0],[2,1]], order='C') + i, j = np.nested_iters(a, [[0], [2, 1]], order='C') vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,2,4,1,3,5],[6,8,10,7,9,11]]) + assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) - i, j = np.nested_iters(a, [[1,0],[2]], order='C') + i, j = np.nested_iters(a, [[1, 0], [2]], order='C') vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,1],[6,7],[2,3],[8,9],[4,5],[10,11]]) + assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) - i, j = np.nested_iters(a, [[2,0],[1]], order='C') + i, j = np.nested_iters(a, [[2, 0], [1]], order='C') vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,2,4],[6,8,10],[1,3,5],[7,9,11]]) + assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) def test_iter_nested_iters_flip_axes(): # Test nested iteration with negative axes - a = arange(12).reshape(2,3,2)[::-1,::-1,::-1] + a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] # In 'K' order (default), the axes all get flipped - i, j = np.nested_iters(a, [[0],[1,2]]) + i, j = np.nested_iters(a, [[0], [1, 2]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,1,2,3,4,5],[6,7,8,9,10,11]]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - i, j = np.nested_iters(a, [[0,1],[2]]) + i, j = np.nested_iters(a, [[0, 1], [2]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - i, j = np.nested_iters(a, [[0,2],[1]]) + i, j = np.nested_iters(a, [[0, 2], [1]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,2,4],[1,3,5],[6,8,10],[7,9,11]]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) # In 'C' order, flipping axes is disabled - i, j = np.nested_iters(a, [[0],[1,2]], order='C') + i, j = np.nested_iters(a, [[0], [1, 2]], order='C') vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[11,10,9,8,7,6],[5,4,3,2,1,0]]) + assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) - i, j = np.nested_iters(a, [[0,1],[2]], order='C') + i, j = np.nested_iters(a, [[0, 1], [2]], order='C') vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[11,10],[9,8],[7,6],[5,4],[3,2],[1,0]]) + assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) - i, j = np.nested_iters(a, [[0,2],[1]], order='C') + i, j = np.nested_iters(a, [[0, 2], [1]], order='C') vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[11,9,7],[10,8,6],[5,3,1],[4,2,0]]) + assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) def test_iter_nested_iters_broadcast(): # Test nested iteration with broadcasting - a = arange(2).reshape(2,1) - b = arange(3).reshape(1,3) + a = arange(2).reshape(2, 1) + b = arange(3).reshape(1, 3) - i, j = np.nested_iters([a,b], [[0],[1]]) + i, j = np.nested_iters([a, b], [[0], [1]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[[0,0],[0,1],[0,2]],[[1,0],[1,1],[1,2]]]) + assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) - i, j = np.nested_iters([a,b], [[1],[0]]) + i, j = np.nested_iters([a, b], [[1], [0]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[[0,0],[1,0]],[[0,1],[1,1]],[[0,2],[1,2]]]) + assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) def test_iter_nested_iters_dtype_copy(): # Test nested iteration with a copy to change dtype # copy - a = arange(6, dtype='i4').reshape(2,3) - i, j = np.nested_iters(a, [[0],[1]], - op_flags=['readonly','copy'], + a = arange(6, dtype='i4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readonly', 'copy'], op_dtypes='f8') assert_equal(j[0].dtype, np.dtype('f8')) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,1,2],[3,4,5]]) + assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) vals = None # updateifcopy - a = arange(6, dtype='f4').reshape(2,3) - i, j = np.nested_iters(a, [[0],[1]], - op_flags=['readwrite','updateifcopy'], + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], casting='same_kind', op_dtypes='f8') assert_equal(j[0].dtype, np.dtype('f8')) for x in i: for y in j: y[...] += 1 - assert_equal(a, [[0,1,2],[3,4,5]]) + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) i, j, x, y = (None,)*4 # force the updateifcopy - assert_equal(a, [[1,2,3],[4,5,6]]) + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_iter_nested_iters_dtype_buffered(): # Test nested iteration with buffering to change dtype - a = arange(6, dtype='f4').reshape(2,3) - i, j = np.nested_iters(a, [[0],[1]], + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], flags=['buffered'], op_flags=['readwrite'], casting='same_kind', @@ -2248,27 +2248,27 @@ def test_iter_nested_iters_dtype_buffered(): for x in i: for y in j: y[...] += 1 - assert_equal(a, [[1,2,3],[4,5,6]]) + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_iter_reduction_error(): a = np.arange(6) - assert_raises(ValueError, nditer, [a,None], [], - [['readonly'], ['readwrite','allocate']], - op_axes=[[0],[-1]]) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) - a = np.arange(6).reshape(2,3) - assert_raises(ValueError, nditer, [a,None], ['external_loop'], - [['readonly'], ['readwrite','allocate']], - op_axes=[[0,1],[-1,-1]]) + a = np.arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, None], ['external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) def test_iter_reduction(): # Test doing reductions with the iterator a = np.arange(6) - i = nditer([a,None], ['reduce_ok'], - [['readonly'], ['readwrite','allocate']], - op_axes=[[0],[-1]]) + i = nditer([a, None], ['reduce_ok'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) # Need to initialize the output operand to the addition unit i.operands[1][...] = 0 # Do the reduction @@ -2278,10 +2278,10 @@ def test_iter_reduction(): assert_equal(i.operands[1].ndim, 0) assert_equal(i.operands[1], np.sum(a)) - a = np.arange(6).reshape(2,3) - i = nditer([a,None], ['reduce_ok','external_loop'], - [['readonly'], ['readwrite','allocate']], - op_axes=[[0,1],[-1,-1]]) + a = np.arange(6).reshape(2, 3) + i = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) # Need to initialize the output operand to the addition unit i.operands[1][...] = 0 # Reduction shape/strides for the output @@ -2296,14 +2296,14 @@ def test_iter_reduction(): # This is a tricky reduction case for the buffering double loop # to handle - a = np.ones((2,3,5)) - it1 = nditer([a,None], ['reduce_ok','external_loop'], - [['readonly'], ['readwrite','allocate']], - op_axes=[None,[0,-1,1]]) - it2 = nditer([a,None], ['reduce_ok','external_loop', - 'buffered','delay_bufalloc'], - [['readonly'], ['readwrite','allocate']], - op_axes=[None,[0,-1,1]], buffersize=10) + a = np.ones((2, 3, 5)) + it1 = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]]) + it2 = nditer([a, None], ['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]], buffersize=10) it1.operands[1].fill(0) it2.operands[1].fill(0) it2.reset() @@ -2319,9 +2319,9 @@ def test_iter_buffering_reduction(): a = np.arange(6) b = np.array(0., dtype='f8').byteswap().newbyteorder() - i = nditer([a,b], ['reduce_ok', 'buffered'], - [['readonly'], ['readwrite','nbo']], - op_axes=[[0],[-1]]) + i = nditer([a, b], ['reduce_ok', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0], [-1]]) assert_equal(i[1].dtype, np.dtype('f8')) assert_(i[1].dtype != b.dtype) # Do the reduction @@ -2330,11 +2330,11 @@ def test_iter_buffering_reduction(): # Since no axes were specified, should have allocated a scalar assert_equal(b, np.sum(a)) - a = np.arange(6).reshape(2,3) - b = np.array([0,0], dtype='f8').byteswap().newbyteorder() - i = nditer([a,b], ['reduce_ok','external_loop', 'buffered'], - [['readonly'], ['readwrite','nbo']], - op_axes=[[0,1],[0,-1]]) + a = np.arange(6).reshape(2, 3) + b = np.array([0, 0], dtype='f8').byteswap().newbyteorder() + i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0, 1], [0, -1]]) # Reduction shape/strides for the output assert_equal(i[1].shape, (3,)) assert_equal(i[1].strides, (0,)) @@ -2345,135 +2345,135 @@ def test_iter_buffering_reduction(): # Iterator inner double loop was wrong on this one p = np.arange(2) + 1 - it = np.nditer([p,None], - ['delay_bufalloc','reduce_ok','buffered','external_loop'], - [['readonly'],['readwrite','allocate']], - op_axes=[[-1,0],[-1,-1]], - itershape=(2,2)) + it = np.nditer([p, None], + ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[-1, 0], [-1, -1]], + itershape=(2, 2)) it.operands[1].fill(0) it.reset() - assert_equal(it[0], [1,2,1,2]) + assert_equal(it[0], [1, 2, 1, 2]) def test_iter_buffering_reduction_reuse_reduce_loops(): # There was a bug triggering reuse of the reduce loop inappropriately, # which caused processing to happen in unnecessarily small chunks # and overran the buffer. - a = np.zeros((2,7)) - b = np.zeros((1,7)) - it = np.nditer([a,b], flags=['reduce_ok', 'external_loop', 'buffered'], + a = np.zeros((2, 7)) + b = np.zeros((1, 7)) + it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], op_flags=[['readonly'], ['readwrite']], buffersize = 5) bufsizes = [] for x, y in it: bufsizes.append(x.shape[0]) - assert_equal(bufsizes, [5,2,5,2]) + assert_equal(bufsizes, [5, 2, 5, 2]) assert_equal(sum(bufsizes), a.size) def test_iter_writemasked_badinput(): - a = np.zeros((2,3)) + a = np.zeros((2, 3)) b = np.zeros((3,)) - m = np.array([[True,True,False],[False,True,False]]) - m2 = np.array([True,True,False]) - m3 = np.array([0,1,1], dtype='u1') - mbad1 = np.array([0,1,1], dtype='i1') - mbad2 = np.array([0,1,1], dtype='f4') + m = np.array([[True, True, False], [False, True, False]]) + m2 = np.array([True, True, False]) + m3 = np.array([0, 1, 1], dtype='u1') + mbad1 = np.array([0, 1, 1], dtype='i1') + mbad2 = np.array([0, 1, 1], dtype='f4') # Need an 'arraymask' if any operand is 'writemasked' - assert_raises(ValueError, nditer, [a,m], [], - [['readwrite','writemasked'],['readonly']]) + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite', 'writemasked'], ['readonly']]) # A 'writemasked' operand must not be readonly - assert_raises(ValueError, nditer, [a,m], [], - [['readonly','writemasked'],['readonly','arraymask']]) + assert_raises(ValueError, nditer, [a, m], [], + [['readonly', 'writemasked'], ['readonly', 'arraymask']]) # 'writemasked' and 'arraymask' may not be used together - assert_raises(ValueError, nditer, [a,m], [], - [['readonly'],['readwrite','arraymask','writemasked']]) + assert_raises(ValueError, nditer, [a, m], [], + [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) # 'arraymask' may only be specified once - assert_raises(ValueError, nditer, [a,m, m2], [], - [['readwrite','writemasked'], - ['readonly','arraymask'], - ['readonly','arraymask']]) + assert_raises(ValueError, nditer, [a, m, m2], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask'], + ['readonly', 'arraymask']]) # An 'arraymask' with nothing 'writemasked' also doesn't make sense - assert_raises(ValueError, nditer, [a,m], [], - [['readwrite'],['readonly','arraymask']]) + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite'], ['readonly', 'arraymask']]) # A writemasked reduction requires a similarly smaller mask - assert_raises(ValueError, nditer, [a,b,m], ['reduce_ok'], + assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], [['readonly'], - ['readwrite','writemasked'], - ['readonly','arraymask']]) + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) # But this should work with a smaller/equal mask to the reduction operand - np.nditer([a,b,m2], ['reduce_ok'], + np.nditer([a, b, m2], ['reduce_ok'], [['readonly'], - ['readwrite','writemasked'], - ['readonly','arraymask']]) + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) # The arraymask itself cannot be a reduction - assert_raises(ValueError, nditer, [a,b,m2], ['reduce_ok'], + assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], [['readonly'], - ['readwrite','writemasked'], - ['readwrite','arraymask']]) + ['readwrite', 'writemasked'], + ['readwrite', 'arraymask']]) # A uint8 mask is ok too - np.nditer([a,m3], ['buffered'], - [['readwrite','writemasked'], - ['readonly','arraymask']], - op_dtypes=['f4',None], + np.nditer([a, m3], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], casting='same_kind') # An int8 mask isn't ok - assert_raises(TypeError, np.nditer, [a,mbad1], ['buffered'], - [['readwrite','writemasked'], - ['readonly','arraymask']], - op_dtypes=['f4',None], + assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], casting='same_kind') # A float32 mask isn't ok - assert_raises(TypeError, np.nditer, [a,mbad2], ['buffered'], - [['readwrite','writemasked'], - ['readonly','arraymask']], - op_dtypes=['f4',None], + assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], casting='same_kind') def test_iter_writemasked(): a = np.zeros((3,), dtype='f8') - msk = np.array([True,True,False]) + msk = np.array([True, True, False]) # When buffering is unused, 'writemasked' effectively does nothing. # It's up to the user of the iterator to obey the requested semantics. - it = np.nditer([a,msk], [], - [['readwrite','writemasked'], - ['readonly','arraymask']]) + it = np.nditer([a, msk], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) for x, m in it: x[...] = 1 # Because we violated the semantics, all the values became 1 - assert_equal(a, [1,1,1]) + assert_equal(a, [1, 1, 1]) # Even if buffering is enabled, we still may be accessing the array # directly. - it = np.nditer([a,msk], ['buffered'], - [['readwrite','writemasked'], - ['readonly','arraymask']]) + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) for x, m in it: x[...] = 2.5 # Because we violated the semantics, all the values became 2.5 - assert_equal(a, [2.5,2.5,2.5]) + assert_equal(a, [2.5, 2.5, 2.5]) # If buffering will definitely happening, for instance because of # a cast, only the items selected by the mask will be copied back from # the buffer. - it = np.nditer([a,msk], ['buffered'], - [['readwrite','writemasked'], - ['readonly','arraymask']], - op_dtypes=['i8',None], + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['i8', None], casting='unsafe') for x, m in it: x[...] = 3 # Even though we violated the semantics, only the selected values # were copied back - assert_equal(a, [3,3,2.5]) + assert_equal(a, [3, 3, 2.5]) def test_iter_non_writable_attribute_deletion(): it = np.nditer(np.ones(2)) @@ -2514,15 +2514,15 @@ def test_iter_allocated_array_dtypes(): for a, b in it: b[0] = a - 1 b[1] = a + 1 - assert_equal(it.operands[1], [[0,2], [2,4], [19,21]]) + assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]]) # Make sure this works for scalars too - it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2,2))]) + it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))]) for a, b, c in it: - c[0,0] = a - b - c[0,1] = a + b - c[1,0] = a * b - c[1,1] = a / b + c[0, 0] = a - b + c[0, 1] = a + b + c[1, 0] = a * b + c[1, 1] = a / b assert_equal(it.operands[2], [[8, 12], [20, 5]]) @@ -2546,9 +2546,9 @@ def test_0d_iter(): # note that itershape=(), still behaves like None due to the conversions # Test a more complex buffered casting case (same as another test above) - sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2,3)), ('d', 'O')] + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] a = np.array(0.5, dtype='f4') - i = nditer(a, ['buffered','refs_ok'], ['readonly'], + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes=sdt) vals = next(i) assert_equal(vals['a'], 0.5) @@ -2558,25 +2558,25 @@ def test_0d_iter(): def test_0d_nested_iter(): - a = np.arange(12).reshape(2,3,2) - i, j = np.nested_iters(a, [[],[1,0,2]]) + a = np.arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[], [1, 0, 2]]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0,1,2,3,4,5,6,7,8,9,10,11]]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) - i, j = np.nested_iters(a, [[1,0,2],[]]) + i, j = np.nested_iters(a, [[1, 0, 2], []]) vals = [] for x in i: vals.append([y for y in j]) - assert_equal(vals, [[0],[1],[2],[3],[4],[5],[6],[7],[8],[9],[10],[11]]) + assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) - i, j, k = np.nested_iters(a, [[2,0], [] ,[1]]) + i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) vals = [] for x in i: for y in j: vals.append([z for z in k]) - assert_equal(vals, [[0,2,4],[1,3,5],[6,8,10],[7,9,11]]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) if __name__ == "__main__": diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index fdf1c1ac1..6202bd125 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -17,28 +17,28 @@ class Vec(object): if sequence is None: sequence=[] self.array=array(sequence) - def __add__(self,other): + def __add__(self, other): out=Vec() out.array=self.array+other.array return out - def __sub__(self,other): + def __sub__(self, other): out=Vec() out.array=self.array-other.array return out - def __mul__(self,other): # with scalar + def __mul__(self, other): # with scalar out=Vec(self.array.copy()) out.array*=other return out - def __rmul__(self,other): + def __rmul__(self, other): return self*other class TestDot(TestCase): def setUp(self): - self.A = rand(10,8) - self.b1 = rand(8,1) + self.A = rand(10, 8) + self.b1 = rand(8, 1) self.b2 = rand(8) - self.b3 = rand(1,8) + self.b3 = rand(1, 8) self.b4 = rand(10) self.N = 14 @@ -74,8 +74,8 @@ class TestDot(TestCase): def test_vecmat3(self): A, b4 = self.A, self.b4 - c1 = dot(A.transpose(),b4) - c2 = dot_(A.transpose(),b4) + c1 = dot(A.transpose(), b4) + c2 = dot_(A.transpose(), b4) assert_almost_equal(c1, c2, decimal=self.N) def test_vecvecouter(self): @@ -91,35 +91,35 @@ class TestDot(TestCase): assert_almost_equal(c1, c2, decimal=self.N) def test_columnvect1(self): - b1 = ones((3,1)) + b1 = ones((3, 1)) b2 = [5.3] - c1 = dot(b1,b2) - c2 = dot_(b1,b2) + c1 = dot(b1, b2) + c2 = dot_(b1, b2) assert_almost_equal(c1, c2, decimal=self.N) def test_columnvect2(self): - b1 = ones((3,1)).transpose() + b1 = ones((3, 1)).transpose() b2 = [6.2] - c1 = dot(b2,b1) - c2 = dot_(b2,b1) + c1 = dot(b2, b1) + c2 = dot_(b2, b1) assert_almost_equal(c1, c2, decimal=self.N) def test_vecscalar(self): - b1 = rand(1,1) - b2 = rand(1,8) - c1 = dot(b1,b2) - c2 = dot_(b1,b2) + b1 = rand(1, 1) + b2 = rand(1, 8) + c1 = dot(b1, b2) + c2 = dot_(b1, b2) assert_almost_equal(c1, c2, decimal=self.N) def test_vecscalar2(self): - b1 = rand(8,1) - b2 = rand(1,1) - c1 = dot(b1,b2) - c2 = dot_(b1,b2) + b1 = rand(8, 1) + b2 = rand(1, 1) + c1 = dot(b1, b2) + c2 = dot_(b1, b2) assert_almost_equal(c1, c2, decimal=self.N) def test_all(self): - dims = [(),(1,),(1,1)] + dims = [(), (1,), (1, 1)] for dim1 in dims: for dim2 in dims: arg1 = rand(*dim1) @@ -130,53 +130,53 @@ class TestDot(TestCase): assert_almost_equal(c1, c2, decimal=self.N) def test_vecobject(self): - U_non_cont = transpose([[1.,1.],[1.,2.]]) + U_non_cont = transpose([[1., 1.], [1., 2.]]) U_cont = ascontiguousarray(U_non_cont) - x = array([Vec([1.,0.]),Vec([0.,1.])]) - zeros = array([Vec([0.,0.]),Vec([0.,0.])]) - zeros_test = dot(U_cont,x) - dot(U_non_cont,x) + x = array([Vec([1., 0.]), Vec([0., 1.])]) + zeros = array([Vec([0., 0.]), Vec([0., 0.])]) + zeros_test = dot(U_cont, x) - dot(U_non_cont, x) assert_equal(zeros[0].array, zeros_test[0].array) assert_equal(zeros[1].array, zeros_test[1].array) class TestResize(TestCase): def test_copies(self): - A = array([[1,2],[3,4]]) - Ar1 = array([[1,2,3,4],[1,2,3,4]]) - assert_equal(resize(A, (2,4)), Ar1) + A = array([[1, 2], [3, 4]]) + Ar1 = array([[1, 2, 3, 4], [1, 2, 3, 4]]) + assert_equal(resize(A, (2, 4)), Ar1) - Ar2 = array([[1,2],[3,4],[1,2],[3,4]]) - assert_equal(resize(A, (4,2)), Ar2) + Ar2 = array([[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(resize(A, (4, 2)), Ar2) - Ar3 = array([[1,2,3],[4,1,2],[3,4,1],[2,3,4]]) - assert_equal(resize(A, (4,3)), Ar3) + Ar3 = array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) + assert_equal(resize(A, (4, 3)), Ar3) def test_zeroresize(self): - A = array([[1,2],[3,4]]) + A = array([[1, 2], [3, 4]]) Ar = resize(A, (0,)) assert_equal(Ar, array([])) class TestNonarrayArgs(TestCase): # check that non-array arguments to functions wrap them in arrays def test_squeeze(self): - A = [[[1,1,1],[2,2,2],[3,3,3]]] - assert_(squeeze(A).shape == (3,3)) + A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] + assert_(squeeze(A).shape == (3, 3)) def test_cumproduct(self): - A = [[1,2,3],[4,5,6]] - assert_(all(cumproduct(A) == array([1,2,6,24,120,720]))) + A = [[1, 2, 3], [4, 5, 6]] + assert_(all(cumproduct(A) == array([1, 2, 6, 24, 120, 720]))) def test_size(self): - A = [[1,2,3],[4,5,6]] + A = [[1, 2, 3], [4, 5, 6]] assert_(size(A) == 6) - assert_(size(A,0) == 2) - assert_(size(A,1) == 3) + assert_(size(A, 0) == 2) + assert_(size(A, 1) == 3) def test_mean(self): - A = [[1,2,3],[4,5,6]] + A = [[1, 2, 3], [4, 5, 6]] assert_(mean(A) == 3.5) - assert_(all(mean(A,0) == array([2.5,3.5,4.5]))) - assert_(all(mean(A,1) == array([2.,5.]))) + assert_(all(mean(A, 0) == array([2.5, 3.5, 4.5]))) + assert_(all(mean(A, 1) == array([2., 5.]))) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) @@ -184,10 +184,10 @@ class TestNonarrayArgs(TestCase): assert_(w[0].category is RuntimeWarning) def test_std(self): - A = [[1,2,3],[4,5,6]] + A = [[1, 2, 3], [4, 5, 6]] assert_almost_equal(std(A), 1.707825127659933) - assert_almost_equal(std(A,0), array([1.5, 1.5, 1.5])) - assert_almost_equal(std(A,1), array([0.81649658, 0.81649658])) + assert_almost_equal(std(A, 0), array([1.5, 1.5, 1.5])) + assert_almost_equal(std(A, 1), array([0.81649658, 0.81649658])) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) @@ -195,10 +195,10 @@ class TestNonarrayArgs(TestCase): assert_(w[0].category is RuntimeWarning) def test_var(self): - A = [[1,2,3],[4,5,6]] + A = [[1, 2, 3], [4, 5, 6]] assert_almost_equal(var(A), 2.9166666666666665) - assert_almost_equal(var(A,0), array([2.25, 2.25, 2.25])) - assert_almost_equal(var(A,1), array([0.66666667, 0.66666667])) + assert_almost_equal(var(A, 0), array([2.25, 2.25, 2.25])) + assert_almost_equal(var(A, 1), array([0.66666667, 0.66666667])) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) @@ -461,31 +461,31 @@ class TestFloatExceptions(TestCase): invalid = 'invalid' self.assert_raises_fpe(underflow, - lambda a,b:a/b, ft_tiny, ft_max) + lambda a, b:a/b, ft_tiny, ft_max) self.assert_raises_fpe(underflow, - lambda a,b:a*b, ft_tiny, ft_tiny) + lambda a, b:a*b, ft_tiny, ft_tiny) self.assert_raises_fpe(overflow, - lambda a,b:a*b, ft_max, ftype(2)) + lambda a, b:a*b, ft_max, ftype(2)) self.assert_raises_fpe(overflow, - lambda a,b:a/b, ft_max, ftype(0.5)) + lambda a, b:a/b, ft_max, ftype(0.5)) self.assert_raises_fpe(overflow, - lambda a,b:a+b, ft_max, ft_max*ft_eps) + lambda a, b:a+b, ft_max, ft_max*ft_eps) self.assert_raises_fpe(overflow, - lambda a,b:a-b, -ft_max, ft_max*ft_eps) + lambda a, b:a-b, -ft_max, ft_max*ft_eps) self.assert_raises_fpe(overflow, np.power, ftype(2), ftype(2**fi.nexp)) self.assert_raises_fpe(divbyzero, - lambda a,b:a/b, ftype(1), ftype(0)) + lambda a, b:a/b, ftype(1), ftype(0)) self.assert_raises_fpe(invalid, - lambda a,b:a/b, ftype(np.inf), ftype(np.inf)) + lambda a, b:a/b, ftype(np.inf), ftype(np.inf)) self.assert_raises_fpe(invalid, - lambda a,b:a/b, ftype(0), ftype(0)) + lambda a, b:a/b, ftype(0), ftype(0)) self.assert_raises_fpe(invalid, - lambda a,b:a-b, ftype(np.inf), ftype(np.inf)) + lambda a, b:a-b, ftype(np.inf), ftype(np.inf)) self.assert_raises_fpe(invalid, - lambda a,b:a+b, ftype(np.inf), ftype(-np.inf)) + lambda a, b:a+b, ftype(np.inf), ftype(-np.inf)) self.assert_raises_fpe(invalid, - lambda a,b:a*b, ftype(0), ftype(np.inf)) + lambda a, b:a*b, ftype(0), ftype(np.inf)) class TestTypes(TestCase): def check_promotion_cases(self, promote_func): @@ -497,56 +497,56 @@ class TestTypes(TestCase): c64, c128, cld = complex64(0), complex128(0), clongdouble(0) # coercion within the same kind - assert_equal(promote_func(i8,i16), np.dtype(int16)) - assert_equal(promote_func(i32,i8), np.dtype(int32)) - assert_equal(promote_func(i16,i64), np.dtype(int64)) - assert_equal(promote_func(u8,u32), np.dtype(uint32)) - assert_equal(promote_func(f32,f64), np.dtype(float64)) - assert_equal(promote_func(fld,f32), np.dtype(longdouble)) - assert_equal(promote_func(f64,fld), np.dtype(longdouble)) - assert_equal(promote_func(c128,c64), np.dtype(complex128)) - assert_equal(promote_func(cld,c128), np.dtype(clongdouble)) - assert_equal(promote_func(c64,fld), np.dtype(clongdouble)) + assert_equal(promote_func(i8, i16), np.dtype(int16)) + assert_equal(promote_func(i32, i8), np.dtype(int32)) + assert_equal(promote_func(i16, i64), np.dtype(int64)) + assert_equal(promote_func(u8, u32), np.dtype(uint32)) + assert_equal(promote_func(f32, f64), np.dtype(float64)) + assert_equal(promote_func(fld, f32), np.dtype(longdouble)) + assert_equal(promote_func(f64, fld), np.dtype(longdouble)) + assert_equal(promote_func(c128, c64), np.dtype(complex128)) + assert_equal(promote_func(cld, c128), np.dtype(clongdouble)) + assert_equal(promote_func(c64, fld), np.dtype(clongdouble)) # coercion between kinds - assert_equal(promote_func(b,i32), np.dtype(int32)) - assert_equal(promote_func(b,u8), np.dtype(uint8)) - assert_equal(promote_func(i8,u8), np.dtype(int16)) - assert_equal(promote_func(u8,i32), np.dtype(int32)) - assert_equal(promote_func(i64,u32), np.dtype(int64)) - assert_equal(promote_func(u64,i32), np.dtype(float64)) - assert_equal(promote_func(i32,f32), np.dtype(float64)) - assert_equal(promote_func(i64,f32), np.dtype(float64)) - assert_equal(promote_func(f32,i16), np.dtype(float32)) - assert_equal(promote_func(f32,u32), np.dtype(float64)) - assert_equal(promote_func(f32,c64), np.dtype(complex64)) - assert_equal(promote_func(c128,f32), np.dtype(complex128)) - assert_equal(promote_func(cld,f64), np.dtype(clongdouble)) + assert_equal(promote_func(b, i32), np.dtype(int32)) + assert_equal(promote_func(b, u8), np.dtype(uint8)) + assert_equal(promote_func(i8, u8), np.dtype(int16)) + assert_equal(promote_func(u8, i32), np.dtype(int32)) + assert_equal(promote_func(i64, u32), np.dtype(int64)) + assert_equal(promote_func(u64, i32), np.dtype(float64)) + assert_equal(promote_func(i32, f32), np.dtype(float64)) + assert_equal(promote_func(i64, f32), np.dtype(float64)) + assert_equal(promote_func(f32, i16), np.dtype(float32)) + assert_equal(promote_func(f32, u32), np.dtype(float64)) + assert_equal(promote_func(f32, c64), np.dtype(complex64)) + assert_equal(promote_func(c128, f32), np.dtype(complex128)) + assert_equal(promote_func(cld, f64), np.dtype(clongdouble)) # coercion between scalars and 1-D arrays - assert_equal(promote_func(array([b]),i8), np.dtype(int8)) - assert_equal(promote_func(array([b]),u8), np.dtype(uint8)) - assert_equal(promote_func(array([b]),i32), np.dtype(int32)) - assert_equal(promote_func(array([b]),u32), np.dtype(uint32)) - assert_equal(promote_func(array([i8]),i64), np.dtype(int8)) - assert_equal(promote_func(u64,array([i32])), np.dtype(int32)) - assert_equal(promote_func(i64,array([u32])), np.dtype(uint32)) - assert_equal(promote_func(int32(-1),array([u64])), np.dtype(float64)) - assert_equal(promote_func(f64,array([f32])), np.dtype(float32)) - assert_equal(promote_func(fld,array([f32])), np.dtype(float32)) - assert_equal(promote_func(array([f64]),fld), np.dtype(float64)) - assert_equal(promote_func(fld,array([c64])), np.dtype(complex64)) - assert_equal(promote_func(c64,array([f64])), np.dtype(complex128)) - assert_equal(promote_func(complex64(3j),array([f64])), + assert_equal(promote_func(array([b]), i8), np.dtype(int8)) + assert_equal(promote_func(array([b]), u8), np.dtype(uint8)) + assert_equal(promote_func(array([b]), i32), np.dtype(int32)) + assert_equal(promote_func(array([b]), u32), np.dtype(uint32)) + assert_equal(promote_func(array([i8]), i64), np.dtype(int8)) + assert_equal(promote_func(u64, array([i32])), np.dtype(int32)) + assert_equal(promote_func(i64, array([u32])), np.dtype(uint32)) + assert_equal(promote_func(int32(-1), array([u64])), np.dtype(float64)) + assert_equal(promote_func(f64, array([f32])), np.dtype(float32)) + assert_equal(promote_func(fld, array([f32])), np.dtype(float32)) + assert_equal(promote_func(array([f64]), fld), np.dtype(float64)) + assert_equal(promote_func(fld, array([c64])), np.dtype(complex64)) + assert_equal(promote_func(c64, array([f64])), np.dtype(complex128)) + assert_equal(promote_func(complex64(3j), array([f64])), np.dtype(complex128)) # coercion between scalars and 1-D arrays, where # the scalar has greater kind than the array - assert_equal(promote_func(array([b]),f64), np.dtype(float64)) - assert_equal(promote_func(array([b]),i64), np.dtype(int64)) - assert_equal(promote_func(array([b]),u64), np.dtype(uint64)) - assert_equal(promote_func(array([i8]),f64), np.dtype(float64)) - assert_equal(promote_func(array([u16]),f64), np.dtype(float64)) + assert_equal(promote_func(array([b]), f64), np.dtype(float64)) + assert_equal(promote_func(array([b]), i64), np.dtype(int64)) + assert_equal(promote_func(array([b]), u64), np.dtype(uint64)) + assert_equal(promote_func(array([i8]), f64), np.dtype(float64)) + assert_equal(promote_func(array([u16]), f64), np.dtype(float64)) # uint and int are treated as the same "kind" for # the purposes of array-scalar promotion. @@ -556,7 +556,7 @@ class TestTypes(TestCase): # the purposes of array-scalar promotion, so that you can do # (0j + float32array) to get a complex64 array instead of # a complex128 array. - assert_equal(promote_func(array([f32]),c128), np.dtype(complex64)) + assert_equal(promote_func(array([f32]), c128), np.dtype(complex64)) def test_coercion(self): def res_type(a, b): @@ -565,7 +565,7 @@ class TestTypes(TestCase): # Use-case: float/complex scalar * bool/int8 array # shouldn't narrow the float/complex type - for a in [np.array([True,False]), np.array([-3,12], dtype=np.int8)]: + for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: b = 1.234 * a assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype) b = np.longdouble(1.234) * a @@ -692,8 +692,8 @@ class TestFromiter(TestCase): expected = array(list(self.makegen())) a = fromiter(self.makegen(), int) a20 = fromiter(self.makegen(), int, 20) - self.assertTrue(alltrue(a == expected,axis=0)) - self.assertTrue(alltrue(a20 == expected[:20],axis=0)) + self.assertTrue(alltrue(a == expected, axis=0)) + self.assertTrue(alltrue(a20 == expected[:20], axis=0)) def load_data(self, n, eindex): # Utility method for the issue 2592 tests. @@ -731,48 +731,48 @@ class TestNonzero(TestCase): assert_equal(np.nonzero(array(1)), ([0],)) def test_nonzero_onedim(self): - x = array([1,0,2,-1,0,0,8]) + x = array([1, 0, 2, -1, 0, 0, 8]) assert_equal(np.count_nonzero(x), 4) assert_equal(np.count_nonzero(x), 4) assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) - x = array([(1,2),(0,0),(1,1),(-1,3),(0,7)], - dtype=[('a','i4'),('b','i2')]) + x = array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], + dtype=[('a', 'i4'), ('b', 'i2')]) assert_equal(np.count_nonzero(x['a']), 3) assert_equal(np.count_nonzero(x['b']), 4) - assert_equal(np.nonzero(x['a']), ([0,2,3],)) - assert_equal(np.nonzero(x['b']), ([0,2,3,4],)) + assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) + assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) def test_nonzero_twodim(self): - x = array([[0,1,0],[2,0,3]]) + x = array([[0, 1, 0], [2, 0, 3]]) assert_equal(np.count_nonzero(x), 3) - assert_equal(np.nonzero(x), ([0,1,1],[1,0,2])) + assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) x = np.eye(3) assert_equal(np.count_nonzero(x), 3) - assert_equal(np.nonzero(x), ([0,1,2],[0,1,2])) + assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) - x = array([[(0,1),(0,0),(1,11)], - [(1,1),(1,0),(0,0)], - [(0,0),(1,5),(0,1)]], dtype=[('a','f4'),('b','u1')]) + x = array([[(0, 1), (0, 0), (1, 11)], + [(1, 1), (1, 0), (0, 0)], + [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) assert_equal(np.count_nonzero(x['a']), 4) assert_equal(np.count_nonzero(x['b']), 5) - assert_equal(np.nonzero(x['a']), ([0,1,1,2],[2,0,1,1])) - assert_equal(np.nonzero(x['b']), ([0,0,1,2,2],[0,2,0,1,2])) + assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) + assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) assert_equal(np.count_nonzero(x['a'].T), 4) assert_equal(np.count_nonzero(x['b'].T), 5) - assert_equal(np.nonzero(x['a'].T), ([0,1,1,2],[1,1,2,0])) - assert_equal(np.nonzero(x['b'].T), ([0,0,1,2,2],[0,1,2,0,2])) + assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) + assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) class TestIndex(TestCase): def test_boolean(self): - a = rand(3,5,8) - V = rand(5,8) - g1 = randint(0,5,size=15) - g2 = randint(0,8,size=15) - V[g1,g2] = -V[g1,g2] - assert_((array([a[0][V>0],a[1][V>0],a[2][V>0]]) == a[:,V>0]).all()) + a = rand(3, 5, 8) + V = rand(5, 8) + g1 = randint(0, 5, size=15) + g2 = randint(0, 8, size=15) + V[g1, g2] = -V[g1, g2] + assert_((array([a[0][V>0], a[1][V>0], a[2][V>0]]) == a[:, V>0]).all()) def test_boolean_edgecase(self): a = np.array([], dtype='int32') @@ -784,10 +784,10 @@ class TestIndex(TestCase): class TestBinaryRepr(TestCase): def test_zero(self): - assert_equal(binary_repr(0),'0') + assert_equal(binary_repr(0), '0') def test_large(self): - assert_equal(binary_repr(10736848),'101000111101010011010000') + assert_equal(binary_repr(10736848), '101000111101010011010000') def test_negative(self): assert_equal(binary_repr(-1), '-1') @@ -810,52 +810,52 @@ class TestBaseRepr(TestCase): class TestArrayComparisons(TestCase): def test_array_equal(self): - res = array_equal(array([1,2]), array([1,2])) + res = array_equal(array([1, 2]), array([1, 2])) assert_(res) assert_(type(res) is bool) - res = array_equal(array([1,2]), array([1,2,3])) + res = array_equal(array([1, 2]), array([1, 2, 3])) assert_(not res) assert_(type(res) is bool) - res = array_equal(array([1,2]), array([3,4])) + res = array_equal(array([1, 2]), array([3, 4])) assert_(not res) assert_(type(res) is bool) - res = array_equal(array([1,2]), array([1,3])) + res = array_equal(array([1, 2]), array([1, 3])) assert_(not res) assert_(type(res) is bool) res = array_equal(array(['a'], dtype='S1'), array(['a'], dtype='S1')) assert_(res) assert_(type(res) is bool) - res = array_equal(array([('a',1)], dtype='S1,u4'), array([('a',1)], dtype='S1,u4')) + res = array_equal(array([('a', 1)], dtype='S1,u4'), array([('a', 1)], dtype='S1,u4')) assert_(res) assert_(type(res) is bool) def test_array_equiv(self): - res = array_equiv(array([1,2]), array([1,2])) + res = array_equiv(array([1, 2]), array([1, 2])) assert_(res) assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([1,2,3])) + res = array_equiv(array([1, 2]), array([1, 2, 3])) assert_(not res) assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([3,4])) + res = array_equiv(array([1, 2]), array([3, 4])) assert_(not res) assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([1,3])) + res = array_equiv(array([1, 2]), array([1, 3])) assert_(not res) assert_(type(res) is bool) - res = array_equiv(array([1,1]), array([1])) + res = array_equiv(array([1, 1]), array([1])) assert_(res) assert_(type(res) is bool) - res = array_equiv(array([1,1]), array([[1],[1]])) + res = array_equiv(array([1, 1]), array([[1], [1]])) assert_(res) assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([2])) + res = array_equiv(array([1, 2]), array([2])) assert_(not res) assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([[1],[2]])) + res = array_equiv(array([1, 2]), array([[1], [2]])) assert_(not res) assert_(type(res) is bool) - res = array_equiv(array([1,2]), array([[1,2,3],[4,5,6],[7,8,9]])) + res = array_equiv(array([1, 2]), array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) assert_(not res) assert_(type(res) is bool) @@ -875,9 +875,9 @@ class TestClip(TestCase): def fastclip(self, a, m, M, out=None): if out is None: - return a.clip(m,M) + return a.clip(m, M) else: - return a.clip(m,M,out) + return a.clip(m, M, out) def clip(self, a, m, M, out=None): # use slow-clip @@ -1111,8 +1111,8 @@ class TestClip(TestCase): a = self._generate_int32_data(self.nr, self.nc) m = float32(-2) M = float32(4) - act = self.fastclip(a,m,M) - ac = self.clip(a,m,M) + act = self.fastclip(a, m, M) + ac = self.clip(a, m, M) assert_array_strict_equal(ac, act) def test_type_cast_05(self): @@ -1152,7 +1152,7 @@ class TestClip(TestCase): M = 1. a_s = self._neg_byteorder(a) assert_(not a_s.dtype.isnative) - ac = self.fastclip(a_s, m , M) + ac = self.fastclip(a_s, m, M) act = a_s.clip(m, M) assert_array_strict_equal(ac, act) @@ -1163,7 +1163,7 @@ class TestClip(TestCase): M = 1. m_s = self._neg_byteorder(m) assert_(not m_s.dtype.isnative) - ac = self.fastclip(a, m_s , M) + ac = self.fastclip(a, m_s, M) act = self.clip(a, m_s, M) assert_array_strict_equal(ac, act) @@ -1174,7 +1174,7 @@ class TestClip(TestCase): m = float32(-0.5) M = float32(1) act = self.clip(a, m, M, out = b) - ac = self.fastclip(a, m , M, out = b) + ac = self.fastclip(a, m, M, out = b) assert_array_strict_equal(ac, act) def test_type_cast_11(self): @@ -1185,7 +1185,7 @@ class TestClip(TestCase): bt = b.copy() m = -0.5 M = 1. - self.fastclip(a, m , M, out = b) + self.fastclip(a, m, M, out = b) self.clip(a, m, M, out = bt) assert_array_strict_equal(b, bt) @@ -1196,7 +1196,7 @@ class TestClip(TestCase): m = int32(0) M = int32(1) act = self.clip(a, m, M, out = b) - ac = self.fastclip(a, m , M, out = b) + ac = self.fastclip(a, m, M, out = b) assert_array_strict_equal(ac, act) def test_clip_with_out_simple(self): @@ -1296,21 +1296,21 @@ class TestAllclose(object): def tearDown(self): np.seterr(**self.olderr) - def tst_allclose(self,x,y): - assert_(allclose(x,y), "%s and %s not close" % (x,y)) + def tst_allclose(self, x, y): + assert_(allclose(x, y), "%s and %s not close" % (x, y)) - def tst_not_allclose(self,x,y): - assert_(not allclose(x,y), "%s and %s shouldn't be close" % (x,y)) + def tst_not_allclose(self, x, y): + assert_(not allclose(x, y), "%s and %s shouldn't be close" % (x, y)) def test_ip_allclose(self): #Parametric test factory. - arr = array([100,1000]) - aran = arange(125).reshape((5,5,5)) + arr = array([100, 1000]) + aran = arange(125).reshape((5, 5, 5)) atol = self.atol rtol = self.rtol - data = [([1,0], [1,0]), + data = [([1, 0], [1, 0]), ([atol], [0]), ([1], [1+rtol+atol]), (arr, arr + arr*rtol), @@ -1319,36 +1319,36 @@ class TestAllclose(object): (inf, inf), (inf, [inf])] - for (x,y) in data: - yield (self.tst_allclose,x,y) + for (x, y) in data: + yield (self.tst_allclose, x, y) def test_ip_not_allclose(self): #Parametric test factory. - aran = arange(125).reshape((5,5,5)) + aran = arange(125).reshape((5, 5, 5)) atol = self.atol rtol = self.rtol - data = [([inf,0], [1,inf]), - ([inf,0], [1,0]), - ([inf,inf], [1,inf]), - ([inf,inf], [1,0]), + data = [([inf, 0], [1, inf]), + ([inf, 0], [1, 0]), + ([inf, inf], [1, inf]), + ([inf, inf], [1, 0]), ([-inf, 0], [inf, 0]), - ([nan,0], [nan,0]), + ([nan, 0], [nan, 0]), ([atol*2], [0]), ([1], [1+rtol+atol*2]), (aran, aran + aran*atol + atol*2), - (array([inf,1]), array([0,inf]))] + (array([inf, 1]), array([0, inf]))] - for (x,y) in data: - yield (self.tst_not_allclose,x,y) + for (x, y) in data: + yield (self.tst_not_allclose, x, y) def test_no_parameter_modification(self): - x = array([inf,1]) - y = array([0,inf]) - allclose(x,y) - assert_array_equal(x,array([inf,1])) - assert_array_equal(y,array([0,inf])) + x = array([inf, 1]) + y = array([0, inf]) + allclose(x, y) + assert_array_equal(x, array([inf, 1])) + assert_array_equal(y, array([0, inf])) class TestIsclose(object): @@ -1358,8 +1358,8 @@ class TestIsclose(object): def setup(self): atol = self.atol rtol = self.rtol - arr = array([100,1000]) - aran = arange(125).reshape((5,5,5)) + arr = array([100, 1000]) + aran = arange(125).reshape((5, 5, 5)) self.all_close_tests = [ ([1, 0], [1, 0]), @@ -1420,12 +1420,12 @@ class TestIsclose(object): def test_ip_all_isclose(self): self.setup() - for (x,y) in self.all_close_tests: + for (x, y) in self.all_close_tests: yield (self.tst_all_isclose, x, y) def test_ip_none_isclose(self): self.setup() - for (x,y) in self.none_close_tests: + for (x, y) in self.none_close_tests: yield (self.tst_none_isclose, x, y) def test_ip_isclose_allclose(self): @@ -1465,31 +1465,31 @@ class TestIsclose(object): class TestStdVar(TestCase): def setUp(self): - self.A = array([1,-1,1,-1]) + self.A = array([1, -1, 1, -1]) self.real_var = 1 def test_basic(self): - assert_almost_equal(var(self.A),self.real_var) - assert_almost_equal(std(self.A)**2,self.real_var) + assert_almost_equal(var(self.A), self.real_var) + assert_almost_equal(std(self.A)**2, self.real_var) def test_ddof1(self): - assert_almost_equal(var(self.A,ddof=1), + assert_almost_equal(var(self.A, ddof=1), self.real_var*len(self.A)/float(len(self.A)-1)) - assert_almost_equal(std(self.A,ddof=1)**2, + assert_almost_equal(std(self.A, ddof=1)**2, self.real_var*len(self.A)/float(len(self.A)-1)) def test_ddof2(self): - assert_almost_equal(var(self.A,ddof=2), + assert_almost_equal(var(self.A, ddof=2), self.real_var*len(self.A)/float(len(self.A)-2)) - assert_almost_equal(std(self.A,ddof=2)**2, + assert_almost_equal(std(self.A, ddof=2)**2, self.real_var*len(self.A)/float(len(self.A)-2)) class TestStdVarComplex(TestCase): def test_basic(self): - A = array([1,1.j,-1,-1.j]) + A = array([1, 1.j, -1, -1.j]) real_var = 1 - assert_almost_equal(var(A),real_var) - assert_almost_equal(std(A)**2,real_var) + assert_almost_equal(var(A), real_var) + assert_almost_equal(std(A)**2, real_var) class TestCreationFuncs(TestCase): @@ -1573,20 +1573,20 @@ class TestLikeFuncs(TestCase): (arange(6, dtype='f4'), None), (arange(6), 'c16'), # 2D C-layout arrays - (arange(6).reshape(2,3), None), - (arange(6).reshape(3,2), 'i1'), + (arange(6).reshape(2, 3), None), + (arange(6).reshape(3, 2), 'i1'), # 2D F-layout arrays - (arange(6).reshape((2,3), order='F'), None), - (arange(6).reshape((3,2), order='F'), 'i1'), + (arange(6).reshape((2, 3), order='F'), None), + (arange(6).reshape((3, 2), order='F'), 'i1'), # 3D C-layout arrays - (arange(24).reshape(2,3,4), None), - (arange(24).reshape(4,3,2), 'f4'), + (arange(24).reshape(2, 3, 4), None), + (arange(24).reshape(4, 3, 2), 'f4'), # 3D F-layout arrays - (arange(24).reshape((2,3,4), order='F'), None), - (arange(24).reshape((4,3,2), order='F'), 'f4'), + (arange(24).reshape((2, 3, 4), order='F'), None), + (arange(24).reshape((4, 3, 2), order='F'), 'f4'), # 3D non-C/F-layout arrays - (arange(24).reshape(2,3,4).swapaxes(0,1), None), - (arange(24).reshape(4,3,2).swapaxes(0,1), '?'), + (arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), ] def compare_array_value(self, dz, value, fill_value): @@ -1654,7 +1654,7 @@ class TestLikeFuncs(TestCase): self.compare_array_value(dz, value, fill_value) # Test the 'subok' parameter - a = np.matrix([[1,2],[3,4]]) + a = np.matrix([[1, 2], [3, 4]]) b = like_function(a, **fill_kwarg) assert_(type(b) is np.matrix) @@ -1769,7 +1769,7 @@ class TestRoll(TestCase): assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) def test_roll2d(self): - x2 = np.reshape(np.arange(10), (2,5)) + x2 = np.reshape(np.arange(10), (2, 5)) x2r = np.roll(x2, 1) assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py index 173ebbf9c..3cafcf65e 100644 --- a/numpy/core/tests/test_numerictypes.py +++ b/numpy/core/tests/test_numerictypes.py @@ -20,8 +20,8 @@ Pdescr = [ # A plain list of tuples with values for testing: PbufferT = [ # x y z - ([3,2], [[6.,4.],[6.,4.]], 8), - ([4,3], [[7.,5.],[7.,5.]], 9), + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), ] @@ -60,8 +60,8 @@ NbufferT = [ # x Info color info y z # value y2 Info2 name z2 Name Value # name value y3 z3 - ([3,2], (6j, 6., (asbytes('nn'), [6j,4j], [6.,4.], [1,2]), asbytes('NN'), True), asbytes('cc'), (asunicode('NN'), 6j), [[6.,4.],[6.,4.]], 8), - ([4,3], (7j, 7., (asbytes('oo'), [7j,5j], [7.,5.], [2,1]), asbytes('OO'), False), asbytes('dd'), (asunicode('OO'), 7j), [[7.,5.],[7.,5.]], 9), + ([3, 2], (6j, 6., (asbytes('nn'), [6j, 4j], [6., 4.], [1, 2]), asbytes('NN'), True), asbytes('cc'), (asunicode('NN'), 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., (asbytes('oo'), [7j, 5j], [7., 5.], [2, 1]), asbytes('OO'), False), asbytes('dd'), (asunicode('OO'), 7j), [[7., 5.], [7., 5.]], 9), ] @@ -74,7 +74,7 @@ def normalize_descr(descr): for item in descr: dtype = item[1] if isinstance(dtype, str): - if dtype[0] not in ['|','<','>']: + if dtype[0] not in ['|', '<', '>']: onebyte = dtype[1:] == "1" if onebyte or dtype[0] in ['S', 'V', 'b']: dtype = "|" + dtype @@ -125,13 +125,13 @@ class create_zeros(object): def test_zerosMD(self): """Check creation of multi-dimensional objects""" - h = np.zeros((2,3), dtype=self._descr) + h = np.zeros((2, 3), dtype=self._descr) self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) self.assertTrue(h.dtype['z'].name == 'uint8') self.assertTrue(h.dtype['z'].char == 'B') self.assertTrue(h.dtype['z'].type == np.uint8) # A small check that data is ok - assert_equal(h['z'], np.zeros((2,3), dtype='u1')) + assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) class test_create_zeros_plain(create_zeros, TestCase): @@ -160,7 +160,7 @@ class create_values(object): h = np.array([self._buffer], dtype=self._descr) self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) if self.multiple_rows: - self.assertTrue(h.shape == (1,2)) + self.assertTrue(h.shape == (1, 2)) else: self.assertTrue(h.shape == (1,)) @@ -169,9 +169,9 @@ class create_values(object): h = np.array([[self._buffer]], dtype=self._descr) self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) if self.multiple_rows: - self.assertTrue(h.shape == (1,1,2)) + self.assertTrue(h.shape == (1, 1, 2)) else: - self.assertTrue(h.shape == (1,1)) + self.assertTrue(h.shape == (1, 1)) class test_create_values_plain_single(create_values, TestCase): @@ -338,37 +338,37 @@ class test_read_values_nested_multiple(read_values_nested, TestCase): class TestEmptyField(TestCase): def test_assign(self): a = np.arange(10, dtype=np.float32) - a.dtype = [("int", "<0i4"),("float", "<2f4")] - assert_(a['int'].shape == (5,0)) - assert_(a['float'].shape == (5,2)) + a.dtype = [("int", "<0i4"), ("float", "<2f4")] + assert_(a['int'].shape == (5, 0)) + assert_(a['float'].shape == (5, 2)) class TestCommonType(TestCase): def test_scalar_loses1(self): - res = np.find_common_type(['f4','f4','i2'],['f8']) + res = np.find_common_type(['f4', 'f4', 'i2'], ['f8']) assert_(res == 'f4') def test_scalar_loses2(self): - res = np.find_common_type(['f4','f4'],['i8']) + res = np.find_common_type(['f4', 'f4'], ['i8']) assert_(res == 'f4') def test_scalar_wins(self): - res = np.find_common_type(['f4','f4','i2'],['c8']) + res = np.find_common_type(['f4', 'f4', 'i2'], ['c8']) assert_(res == 'c8') def test_scalar_wins2(self): - res = np.find_common_type(['u4','i4','i4'],['f4']) + res = np.find_common_type(['u4', 'i4', 'i4'], ['f4']) assert_(res == 'f8') def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose - res = np.find_common_type(['u8','i8','i8'],['f8']) + res = np.find_common_type(['u8', 'i8', 'i8'], ['f8']) assert_(res == 'f8') class TestMultipleFields(TestCase): def setUp(self): - self.ary = np.array([(1,2,3,4),(5,6,7,8)], dtype='i4,f4,i2,c8') + self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') def _bad_call(self): - return self.ary['f0','f1'] + return self.ary['f0', 'f1'] def test_no_tuple(self): self.assertRaises(ValueError, self._bad_call) def test_return(self): - res = self.ary[['f0','f2']].tolist() - assert_(res == [(1,3), (5,7)]) + res = self.ary[['f0', 'f2']].tolist() + assert_(res == [(1, 3), (5, 7)]) if __name__ == "__main__": run_module_suite() diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py index 342ca27c6..487b5de7d 100644 --- a/numpy/core/tests/test_print.py +++ b/numpy/core/tests/test_print.py @@ -16,7 +16,7 @@ _REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} def check_float_type(tp): - for x in [0, 1,-1, 1e20] : + for x in [0, 1, -1, 1e20] : assert_equal(str(tp(x)), str(float(x)), err_msg='Failed str formatting for type %s' % tp) @@ -56,7 +56,7 @@ def test_nan_inf_float(): yield check_nan_inf_float, t def check_complex_type(tp): - for x in [0, 1,-1, 1e20] : + for x in [0, 1, -1, 1e20] : assert_equal(str(tp(x)), str(complex(x)), err_msg='Failed str formatting for type %s' % tp) assert_equal(str(tp(x*1j)), str(complex(x*1j)), @@ -130,7 +130,7 @@ def _test_redirected_print(x, tp, ref=None): err_msg='print failed for type%s' % tp) def check_float_type_print(tp): - for x in [0, 1,-1, 1e20]: + for x in [0, 1, -1, 1e20]: _test_redirected_print(float(x), tp) for x in [np.inf, -np.inf, np.nan]: diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 86b0e48a2..7118af8c3 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -25,34 +25,34 @@ class TestRegression(TestCase): def test_invalid_round(self,level=rlevel): """Ticket #3""" v = 4.7599999999999998 - assert_array_equal(np.array([v]),np.array(v)) + assert_array_equal(np.array([v]), np.array(v)) def test_mem_empty(self,level=rlevel): """Ticket #7""" - np.empty((1,),dtype=[('x',np.int64)]) + np.empty((1,), dtype=[('x', np.int64)]) def test_pickle_transposed(self,level=rlevel): """Ticket #16""" - a = np.transpose(np.array([[2,9],[7,0],[3,8]])) + a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]])) f = BytesIO() - pickle.dump(a,f) + pickle.dump(a, f) f.seek(0) b = pickle.load(f) f.close() - assert_array_equal(a,b) + assert_array_equal(a, b) def test_typeNA(self,level=rlevel): """Ticket #31""" - assert_equal(np.typeNA[np.int64],'Int64') - assert_equal(np.typeNA[np.uint64],'UInt64') + assert_equal(np.typeNA[np.int64], 'Int64') + assert_equal(np.typeNA[np.uint64], 'UInt64') def test_dtype_names(self,level=rlevel): """Ticket #35""" - dt = np.dtype([(('name','label'),np.int32,3)]) + dt = np.dtype([(('name', 'label'), np.int32, 3)]) def test_reduce(self,level=rlevel): """Ticket #40""" - assert_almost_equal(np.add.reduce([1.,.5],dtype=None), 1.5) + assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5) def test_zeros_order(self,level=rlevel): """Ticket #43""" @@ -79,7 +79,7 @@ class TestRegression(TestCase): def test_negative_nd_indexing(self,level=rlevel): """Ticket #49""" - c = np.arange(125).reshape((5,5,5)) + c = np.arange(125).reshape((5, 5, 5)) origidx = np.array([-1, 0, 1]) idx = np.array(origidx) c[idx] @@ -88,7 +88,7 @@ class TestRegression(TestCase): def test_char_dump(self,level=rlevel): """Ticket #50""" f = BytesIO() - ca = np.char.array(np.arange(1000,1010),itemsize=4) + ca = np.char.array(np.arange(1000, 1010), itemsize=4) ca.dump(f) f.seek(0) ca = np.load(f) @@ -96,11 +96,11 @@ class TestRegression(TestCase): def test_noncontiguous_fill(self,level=rlevel): """Ticket #58.""" - a = np.zeros((5,3)) - b = a[:,:2,] + a = np.zeros((5, 3)) + b = a[:, :2,] def rs(): b.shape = (10,) - self.assertRaises(AttributeError,rs) + self.assertRaises(AttributeError, rs) def test_bool(self,level=rlevel): """Ticket #60""" @@ -109,14 +109,14 @@ class TestRegression(TestCase): def test_indexing1(self,level=rlevel): """Ticket #64""" descr = [('x', [('y', [('z', 'c16', (2,)),]),]),] - buffer = ((([6j,4j],),),) + buffer = ((([6j, 4j],),),) h = np.array(buffer, dtype=descr) h['x']['y']['z'] def test_indexing2(self,level=rlevel): """Ticket #65""" descr = [('x', 'i4', (2,))] - buffer = ([3,2],) + buffer = ([3, 2],) h = np.array(buffer, dtype=descr) h['x'] @@ -128,7 +128,7 @@ class TestRegression(TestCase): def test_scalar_compare(self,level=rlevel): """Ticket #72""" a = np.array(['test', 'auto']) - assert_array_equal(a == 'auto', np.array([False,True])) + assert_array_equal(a == 'auto', np.array([False, True])) self.assertTrue(a[1] == 'auto') self.assertTrue(a[0] != 'auto') b = np.linspace(0, 10, 11) @@ -149,8 +149,8 @@ class TestRegression(TestCase): def test_mem_dtype_align(self,level=rlevel): """Ticket #93""" - self.assertRaises(TypeError,np.dtype, - {'names':['a'],'formats':['foo']},align=1) + self.assertRaises(TypeError, np.dtype, + {'names':['a'],'formats':['foo']}, align=1) @dec.knownfailureif((sys.version_info[0] >= 3) or (sys.platform == "win32" and platform.architecture()[0] == "64bit"), @@ -159,22 +159,22 @@ class TestRegression(TestCase): def test_intp(self,level=rlevel): """Ticket #99""" i_width = np.int_(0).nbytes*2 - 1 - np.intp('0x' + 'f'*i_width,16) - self.assertRaises(OverflowError,np.intp,'0x' + 'f'*(i_width+1),16) - self.assertRaises(ValueError,np.intp,'0x1',32) - assert_equal(255,np.intp('0xFF',16)) - assert_equal(1024,np.intp(1024)) + np.intp('0x' + 'f'*i_width, 16) + self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16) + self.assertRaises(ValueError, np.intp, '0x1', 32) + assert_equal(255, np.intp('0xFF', 16)) + assert_equal(1024, np.intp(1024)) def test_endian_bool_indexing(self,level=rlevel): """Ticket #105""" - a = np.arange(10.,dtype='>f8') - b = np.arange(10.,dtype='<f8') + a = np.arange(10., dtype='>f8') + b = np.arange(10., dtype='<f8') xa = np.where((a>2) & (a<6)) xb = np.where((b>2) & (b<6)) ya = ((a>2) & (a<6)) yb = ((b>2) & (b<6)) - assert_array_almost_equal(xa,ya.nonzero()) - assert_array_almost_equal(xb,yb.nonzero()) + assert_array_almost_equal(xa, ya.nonzero()) + assert_array_almost_equal(xb, yb.nonzero()) assert_(np.all(a[ya] > 0.5)) assert_(np.all(b[yb] > 0.5)) @@ -196,7 +196,7 @@ class TestRegression(TestCase): ]) buf = np.recarray(1, dtype=dt) buf[0]['head'] = 1 - buf[0]['data'][:] = [1,1] + buf[0]['data'][:] = [1, 1] h = buf[0]['head'] d = buf[0]['data'][0] @@ -206,8 +206,8 @@ class TestRegression(TestCase): def test_mem_dot(self,level=rlevel): """Ticket #106""" - x = np.random.randn(0,1) - y = np.random.randn(10,1) + x = np.random.randn(0, 1) + y = np.random.randn(10, 1) # Dummy array to detect bad memory access: _z = np.ones(10) _dummy = np.empty((0, 10)) @@ -221,10 +221,10 @@ class TestRegression(TestCase): def test_arange_endian(self,level=rlevel): """Ticket #111""" ref = np.arange(10) - x = np.arange(10,dtype='<f8') - assert_array_equal(ref,x) - x = np.arange(10,dtype='>f8') - assert_array_equal(ref,x) + x = np.arange(10, dtype='<f8') + assert_array_equal(ref, x) + x = np.arange(10, dtype='>f8') + assert_array_equal(ref, x) # Longfloat support is not consistent enough across # platforms for this test to be meaningful. @@ -236,21 +236,21 @@ class TestRegression(TestCase): def test_argmax(self,level=rlevel): """Ticket #119""" - a = np.random.normal(0,1,(4,5,6,7,8)) + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): aargmax = a.argmax(i) def test_mem_divmod(self,level=rlevel): """Ticket #126""" for i in range(10): - divmod(np.array([i])[0],10) + divmod(np.array([i])[0], 10) def test_hstack_invalid_dims(self,level=rlevel): """Ticket #128""" - x = np.arange(9).reshape((3,3)) - y = np.array([0,0,0]) - self.assertRaises(ValueError,np.hstack,(x,y)) + x = np.arange(9).reshape((3, 3)) + y = np.array([0, 0, 0]) + self.assertRaises(ValueError, np.hstack, (x, y)) def test_squeeze_type(self,level=rlevel): """Ticket #133""" @@ -261,7 +261,7 @@ class TestRegression(TestCase): def test_add_identity(self,level=rlevel): """Ticket #143""" - assert_equal(0,np.add.identity) + assert_equal(0, np.add.identity) def test_numpy_float_python_long_addition(self): # Check that numpy float and python longs can be added correctly. @@ -270,41 +270,41 @@ class TestRegression(TestCase): def test_binary_repr_0(self,level=rlevel): """Ticket #151""" - assert_equal('0',np.binary_repr(0)) + assert_equal('0', np.binary_repr(0)) def test_rec_iterate(self,level=rlevel): """Ticket #160""" - descr = np.dtype([('i',int),('f',float),('s','|S3')]) - x = np.rec.array([(1,1.1,'1.0'), - (2,2.2,'2.0')],dtype=descr) + descr = np.dtype([('i', int), ('f', float), ('s', '|S3')]) + x = np.rec.array([(1, 1.1, '1.0'), + (2, 2.2, '2.0')], dtype=descr) x[0].tolist() [i for i in x[0]] def test_unicode_string_comparison(self,level=rlevel): """Ticket #190""" - a = np.array('hello',np.unicode_) + a = np.array('hello', np.unicode_) b = np.array('world') a == b def test_tostring_FORTRANORDER_discontiguous(self,level=rlevel): """Fix in r2836""" # Create discontiguous Fortran-ordered array - x = np.array(np.random.rand(3,3),order='F')[:,:2] - assert_array_almost_equal(x.ravel(),np.fromstring(x.tostring())) + x = np.array(np.random.rand(3, 3), order='F')[:, :2] + assert_array_almost_equal(x.ravel(), np.fromstring(x.tostring())) def test_flat_assignment(self,level=rlevel): """Correct behaviour of ticket #194""" - x = np.empty((3,1)) + x = np.empty((3, 1)) x.flat = np.arange(3) - assert_array_almost_equal(x,[[0],[1],[2]]) - x.flat = np.arange(3,dtype=float) - assert_array_almost_equal(x,[[0],[1],[2]]) + assert_array_almost_equal(x, [[0], [1], [2]]) + x.flat = np.arange(3, dtype=float) + assert_array_almost_equal(x, [[0], [1], [2]]) def test_broadcast_flat_assignment(self,level=rlevel): """Ticket #194""" - x = np.empty((3,1)) + x = np.empty((3, 1)) def bfa(): x[:] = np.arange(3) - def bfb(): x[:] = np.arange(3,dtype=float) + def bfb(): x[:] = np.arange(3, dtype=float) self.assertRaises(ValueError, bfa) self.assertRaises(ValueError, bfb) @@ -323,29 +323,29 @@ class TestRegression(TestCase): def test_unpickle_dtype_with_object(self,level=rlevel): """Implemented in r2840""" - dt = np.dtype([('x',int),('y',np.object_),('z','O')]) + dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')]) f = BytesIO() - pickle.dump(dt,f) + pickle.dump(dt, f) f.seek(0) dt_ = pickle.load(f) f.close() - assert_equal(dt,dt_) + assert_equal(dt, dt_) def test_mem_array_creation_invalid_specification(self,level=rlevel): """Ticket #196""" - dt = np.dtype([('x',int),('y',np.object_)]) + dt = np.dtype([('x', int), ('y', np.object_)]) # Wrong way - self.assertRaises(ValueError, np.array, [1,'object'], dt) + self.assertRaises(ValueError, np.array, [1, 'object'], dt) # Correct way - np.array([(1,'object')],dt) + np.array([(1, 'object')], dt) def test_recarray_single_element(self,level=rlevel): """Ticket #202""" - a = np.array([1,2,3],dtype=np.int32) + a = np.array([1, 2, 3], dtype=np.int32) b = a.copy() - r = np.rec.array(a,shape=1,formats=['3i4'],names=['d']) - assert_array_equal(a,b) - assert_equal(a,r[0][0]) + r = np.rec.array(a, shape=1, formats=['3i4'], names=['d']) + assert_array_equal(a, b) + assert_equal(a, r[0][0]) def test_zero_sized_array_indexing(self,level=rlevel): """Ticket #205""" @@ -355,36 +355,36 @@ class TestRegression(TestCase): def test_chararray_rstrip(self,level=rlevel): """Ticket #222""" - x = np.chararray((1,),5) + x = np.chararray((1,), 5) x[0] = asbytes('a ') x = x.rstrip() assert_equal(x[0], asbytes('a')) def test_object_array_shape(self,level=rlevel): """Ticket #239""" - assert_equal(np.array([[1,2],3,4],dtype=object).shape, (3,)) - assert_equal(np.array([[1,2],[3,4]],dtype=object).shape, (2,2)) - assert_equal(np.array([(1,2),(3,4)],dtype=object).shape, (2,2)) - assert_equal(np.array([],dtype=object).shape, (0,)) - assert_equal(np.array([[],[],[]],dtype=object).shape, (3,0)) - assert_equal(np.array([[3,4],[5,6],None],dtype=object).shape, (3,)) + assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,)) + assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2)) + assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2)) + assert_equal(np.array([], dtype=object).shape, (0,)) + assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0)) + assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,)) def test_mem_around(self,level=rlevel): """Ticket #243""" x = np.zeros((1,)) y = [0] decimal = 6 - np.around(abs(x-y),decimal) <= 10.0**(-decimal) + np.around(abs(x-y), decimal) <= 10.0**(-decimal) def test_character_array_strip(self,level=rlevel): """Ticket #246""" - x = np.char.array(("x","x ","x ")) - for c in x: assert_equal(c,"x") + x = np.char.array(("x", "x ", "x ")) + for c in x: assert_equal(c, "x") def test_lexsort(self,level=rlevel): """Lexsort memory error""" - v = np.array([1,2,3,4,5,6,7,8,9,10]) - assert_equal(np.lexsort(v),0) + v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + assert_equal(np.lexsort(v), 0) def test_pickle_dtype(self,level=rlevel): """Ticket #251""" @@ -392,26 +392,26 @@ class TestRegression(TestCase): def test_swap_real(self, level=rlevel): """Ticket #265""" - assert_equal(np.arange(4,dtype='>c8').imag.max(),0.0) - assert_equal(np.arange(4,dtype='<c8').imag.max(),0.0) - assert_equal(np.arange(4,dtype='>c8').real.max(),3.0) - assert_equal(np.arange(4,dtype='<c8').real.max(),3.0) + assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0) + assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0) + assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0) + assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0) def test_object_array_from_list(self, level=rlevel): """Ticket #270""" - a = np.array([1,'A',None]) + a = np.array([1, 'A', None]) def test_multiple_assign(self, level=rlevel): """Ticket #273""" - a = np.zeros((3,1),int) - a[[1,2]] = 1 + a = np.zeros((3, 1), int) + a[[1, 2]] = 1 def test_empty_array_type(self, level=rlevel): assert_equal(np.array([]).dtype, np.zeros(0).dtype) def test_void_copyswap(self, level=rlevel): - dt = np.dtype([('one', '<i4'),('two', '<i4')]) - x = np.array((1,2), dtype=dt) + dt = np.dtype([('one', '<i4'), ('two', '<i4')]) + x = np.array((1, 2), dtype=dt) x = x.byteswap() assert_(x['one'] > 1 and x['two'] > 2) @@ -426,7 +426,7 @@ class TestRegression(TestCase): funcs2 = ['compress', 'take', 'repeat'] for func in funcs1: - arr = np.random.rand(8,7) + arr = np.random.rand(8, 7) arr2 = arr.copy() if isinstance(func, tuple): func_meth = func[1] @@ -441,8 +441,8 @@ class TestRegression(TestCase): assert_(abs(res1-res2).max() < 1e-8, func) for func in funcs2: - arr1 = np.random.rand(8,7) - arr2 = np.random.rand(8,7) + arr1 = np.random.rand(8, 7) + arr2 = np.random.rand(8, 7) res1 = None if func == 'compress': arr1 = arr1.ravel() @@ -456,51 +456,51 @@ class TestRegression(TestCase): def test_mem_lexsort_strings(self, level=rlevel): """Ticket #298""" - lst = ['abc','cde','fgh'] + lst = ['abc', 'cde', 'fgh'] np.lexsort((lst,)) def test_fancy_index(self, level=rlevel): """Ticket #302""" - x = np.array([1,2])[np.array([0])] - assert_equal(x.shape,(1,)) + x = np.array([1, 2])[np.array([0])] + assert_equal(x.shape, (1,)) def test_recarray_copy(self, level=rlevel): """Ticket #312""" - dt = [('x',np.int16),('y',np.float64)] - ra = np.array([(1,2.3)], dtype=dt) + dt = [('x', np.int16), ('y', np.float64)] + ra = np.array([(1, 2.3)], dtype=dt) rb = np.rec.array(ra, dtype=dt) rb['x'] = 2. assert_(ra['x'] != rb['x']) def test_rec_fromarray(self, level=rlevel): """Ticket #322""" - x1 = np.array([[1,2],[3,4],[5,6]]) - x2 = np.array(['a','dd','xyz']) - x3 = np.array([1.1,2,3]) - np.rec.fromarrays([x1,x2,x3], formats="(2,)i4,a3,f8") + x1 = np.array([[1, 2], [3, 4], [5, 6]]) + x2 = np.array(['a', 'dd', 'xyz']) + x3 = np.array([1.1, 2, 3]) + np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8") def test_object_array_assign(self, level=rlevel): - x = np.empty((2,2),object) - x.flat[2] = (1,2,3) - assert_equal(x.flat[2],(1,2,3)) + x = np.empty((2, 2), object) + x.flat[2] = (1, 2, 3) + assert_equal(x.flat[2], (1, 2, 3)) def test_ndmin_float64(self, level=rlevel): """Ticket #324""" - x = np.array([1,2,3],dtype=np.float64) - assert_equal(np.array(x,dtype=np.float32,ndmin=2).ndim,2) - assert_equal(np.array(x,dtype=np.float64,ndmin=2).ndim,2) + x = np.array([1, 2, 3], dtype=np.float64) + assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) + assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) def test_ndmin_order(self, level=rlevel): """Issue #465 and related checks""" - assert_(np.array([1,2], order='C', ndmin=3).flags.c_contiguous) - assert_(np.array([1,2], order='F', ndmin=3).flags.f_contiguous) - assert_(np.array(np.ones((2,2), order='F'), ndmin=3).flags.f_contiguous) - assert_(np.array(np.ones((2,2), order='C'), ndmin=3).flags.c_contiguous) + assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) + assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) def test_mem_axis_minimization(self, level=rlevel): """Ticket #327""" data = np.arange(5) - data = np.add.outer(data,data) + data = np.add.outer(data, data) def test_mem_float_imag(self, level=rlevel): """Ticket #330""" @@ -508,7 +508,7 @@ class TestRegression(TestCase): def test_dtype_tuple(self, level=rlevel): """Ticket #334""" - assert_(np.dtype('i4') == np.dtype(('i4',()))) + assert_(np.dtype('i4') == np.dtype(('i4', ()))) def test_dtype_posttuple(self, level=rlevel): """Ticket #335""" @@ -521,7 +521,7 @@ class TestRegression(TestCase): def test_string_array_size(self, level=rlevel): """Ticket #342""" self.assertRaises(ValueError, - np.array,[['X'],['X','X','X']],'|S1') + np.array, [['X'], ['X', 'X', 'X']], '|S1') def test_dtype_repr(self, level=rlevel): """Ticket #344""" @@ -531,22 +531,22 @@ class TestRegression(TestCase): def test_reshape_order(self, level=rlevel): """Make sure reshape order works.""" - a = np.arange(6).reshape(2,3,order='F') - assert_equal(a,[[0,2,4],[1,3,5]]) - a = np.array([[1,2],[3,4],[5,6],[7,8]]) - b = a[:,1] - assert_equal(b.reshape(2,2,order='F'), [[2,6],[4,8]]) + a = np.arange(6).reshape(2, 3, order='F') + assert_equal(a, [[0, 2, 4], [1, 3, 5]]) + a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + b = a[:, 1] + assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) def test_reshape_zero_strides(self, level=rlevel): """Issue #380, test reshaping of zero strided arrays""" a = np.ones(1) a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) - assert_(a.reshape(5,1).strides[0] == 0) + assert_(a.reshape(5, 1).strides[0] == 0) def test_reshape_zero_size(self, level=rlevel): """Github Issue #2700, setting shape failed for 0-sized arrays""" - a = np.ones((0,2)) - a.shape = (-1,2) + a = np.ones((0, 2)) + a.shape = (-1, 2) # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous. @@ -558,48 +558,48 @@ class TestRegression(TestCase): strides_f = (8, 24, 48, 48) assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) - assert_equal(np.array(0, dtype=np.int32).reshape(1,1).strides, (4,4)) + assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) def test_repeat_discont(self, level=rlevel): """Ticket #352""" - a = np.arange(12).reshape(4,3)[:,2] - assert_equal(a.repeat(3), [2,2,2,5,5,5,8,8,8,11,11,11]) + a = np.arange(12).reshape(4, 3)[:, 2] + assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) def test_array_index(self, level=rlevel): """Make sure optimization is not called in this case.""" - a = np.array([1,2,3]) - a2 = np.array([[1,2,3]]) + a = np.array([1, 2, 3]) + a2 = np.array([[1, 2, 3]]) assert_equal(a[np.where(a==3)], a2[np.where(a2==3)]) def test_object_argmax(self, level=rlevel): - a = np.array([1,2,3],dtype=object) + a = np.array([1, 2, 3], dtype=object) assert_(a.argmax() == 2) def test_recarray_fields(self, level=rlevel): """Ticket #372""" - dt0 = np.dtype([('f0','i4'),('f1','i4')]) - dt1 = np.dtype([('f0','i8'),('f1','i8')]) - for a in [np.array([(1,2),(3,4)],"i4,i4"), - np.rec.array([(1,2),(3,4)],"i4,i4"), - np.rec.array([(1,2),(3,4)]), - np.rec.fromarrays([(1,2),(3,4)],"i4,i4"), - np.rec.fromarrays([(1,2),(3,4)])]: - assert_(a.dtype in [dt0,dt1]) + dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) + dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) + for a in [np.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)]), + np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), + np.rec.fromarrays([(1, 2), (3, 4)])]: + assert_(a.dtype in [dt0, dt1]) def test_random_shuffle(self, level=rlevel): """Ticket #374""" - a = np.arange(5).reshape((5,1)) + a = np.arange(5).reshape((5, 1)) b = a.copy() np.random.shuffle(b) - assert_equal(np.sort(b, axis=0),a) + assert_equal(np.sort(b, axis=0), a) def test_refcount_vdot(self, level=rlevel): """Changeset #3443""" _assert_valid_refcount(np.vdot) def test_startswith(self, level=rlevel): - ca = np.char.array(['Hi','There']) - assert_equal(ca.startswith('H'),[True,False]) + ca = np.char.array(['Hi', 'There']) + assert_equal(ca.startswith('H'), [True, False]) def test_noncommutative_reduce_accumulate(self, level=rlevel): """Ticket #413""" @@ -614,14 +614,14 @@ class TestRegression(TestCase): def test_convolve_empty(self, level=rlevel): """Convolve should raise an error for empty input array.""" - self.assertRaises(ValueError,np.convolve,[],[1]) - self.assertRaises(ValueError,np.convolve,[1],[]) + self.assertRaises(ValueError, np.convolve, [], [1]) + self.assertRaises(ValueError, np.convolve, [1], []) def test_multidim_byteswap(self, level=rlevel): """Ticket #449""" - r=np.array([(1,(0,1,2))], dtype="i2,3i2") + r=np.array([(1, (0, 1, 2))], dtype="i2,3i2") assert_array_equal(r.byteswap(), - np.array([(256,(0,256,512))],r.dtype)) + np.array([(256, (0, 256, 512))], r.dtype)) def test_string_NULL(self, level=rlevel): """Changeset 3557""" @@ -635,11 +635,11 @@ class TestRegression(TestCase): def test_take_output(self, level=rlevel): """Ensure that 'take' honours output parameter.""" - x = np.arange(12).reshape((3,4)) - a = np.take(x,[0,2],axis=1) + x = np.arange(12).reshape((3, 4)) + a = np.take(x, [0, 2], axis=1) b = np.zeros_like(a) - np.take(x,[0,2],axis=1,out=b) - assert_array_equal(a,b) + np.take(x, [0, 2], axis=1, out=b) + assert_array_equal(a, b) def test_take_object_fail(self): # Issue gh-3001 @@ -654,7 +654,7 @@ class TestRegression(TestCase): def test_array_str_64bit(self, level=rlevel): """Ticket #501""" - s = np.array([1, np.nan],dtype=np.float64) + s = np.array([1, np.nan], dtype=np.float64) with np.errstate(all='raise'): sstr = np.array_str(s) @@ -695,31 +695,31 @@ class TestRegression(TestCase): def test_mem_deallocation_leak(self, level=rlevel): """Ticket #562""" - a = np.zeros(5,dtype=float) - b = np.array(a,dtype=float) + a = np.zeros(5, dtype=float) + b = np.array(a, dtype=float) del a, b def test_mem_on_invalid_dtype(self): "Ticket #583" - self.assertRaises(ValueError, np.fromiter, [['12',''],['13','']], str) + self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str) def test_dot_negative_stride(self, level=rlevel): """Ticket #588""" - x = np.array([[1,5,25,125.,625]]) - y = np.array([[20.],[160.],[640.],[1280.],[1024.]]) + x = np.array([[1, 5, 25, 125., 625]]) + y = np.array([[20.], [160.], [640.], [1280.], [1024.]]) z = y[::-1].copy() y2 = y[::-1] - assert_equal(np.dot(x,z),np.dot(x,y2)) + assert_equal(np.dot(x, z), np.dot(x, y2)) def test_object_casting(self, level=rlevel): # This used to trigger the object-type version of # the bitwise_or operation, because float64 -> object # casting succeeds def rs(): - x = np.ones([484,286]) - y = np.zeros([484,286]) + x = np.ones([484, 286]) + y = np.zeros([484, 286]) x |= y - self.assertRaises(TypeError,rs) + self.assertRaises(TypeError, rs) def test_unicode_scalar(self, level=rlevel): """Ticket #600""" @@ -730,42 +730,42 @@ class TestRegression(TestCase): def test_arange_non_native_dtype(self, level=rlevel): """Ticket #616""" - for T in ('>f4','<f4'): + for T in ('>f4', '<f4'): dt = np.dtype(T) - assert_equal(np.arange(0,dtype=dt).dtype,dt) - assert_equal(np.arange(0.5,dtype=dt).dtype,dt) - assert_equal(np.arange(5,dtype=dt).dtype,dt) + assert_equal(np.arange(0, dtype=dt).dtype, dt) + assert_equal(np.arange(0.5, dtype=dt).dtype, dt) + assert_equal(np.arange(5, dtype=dt).dtype, dt) def test_bool_indexing_invalid_nr_elements(self, level=rlevel): - s = np.ones(10,dtype=float) - x = np.array((15,),dtype=float) - def ia(x,s,v): x[(s>0)]=v - self.assertRaises(ValueError,ia,x,s,np.zeros(9,dtype=float)) - self.assertRaises(ValueError,ia,x,s,np.zeros(11,dtype=float)) + s = np.ones(10, dtype=float) + x = np.array((15,), dtype=float) + def ia(x, s, v): x[(s>0)]=v + self.assertRaises(ValueError, ia, x, s, np.zeros(9, dtype=float)) + self.assertRaises(ValueError, ia, x, s, np.zeros(11, dtype=float)) def test_mem_scalar_indexing(self, level=rlevel): """Ticket #603""" - x = np.array([0],dtype=float) - index = np.array(0,dtype=np.int32) + x = np.array([0], dtype=float) + index = np.array(0, dtype=np.int32) x[index] def test_binary_repr_0_width(self, level=rlevel): - assert_equal(np.binary_repr(0,width=3),'000') + assert_equal(np.binary_repr(0, width=3), '000') def test_fromstring(self, level=rlevel): assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), - [12,9,9]) + [12, 9, 9]) def test_searchsorted_variable_length(self, level=rlevel): - x = np.array(['a','aa','b']) - y = np.array(['d','e']) - assert_equal(x.searchsorted(y), [3,3]) + x = np.array(['a', 'aa', 'b']) + y = np.array(['d', 'e']) + assert_equal(x.searchsorted(y), [3, 3]) def test_string_argsort_with_zeros(self, level=rlevel): """Check argsort for strings containing zeros.""" x = np.fromstring("\x00\x02\x00\x01", dtype="|S2") - assert_array_equal(x.argsort(kind='m'), np.array([1,0])) - assert_array_equal(x.argsort(kind='q'), np.array([1,0])) + assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) + assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) def test_string_sort_with_zeros(self, level=rlevel): """Check sort for strings containing zeros.""" @@ -775,29 +775,29 @@ class TestRegression(TestCase): def test_copy_detection_zero_dim(self, level=rlevel): """Ticket #658""" - np.indices((0,3,4)).T.reshape(-1,3) + np.indices((0, 3, 4)).T.reshape(-1, 3) def test_flat_byteorder(self, level=rlevel): """Ticket #657""" x = np.arange(10) - assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:]) - assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4')) + assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:]) + assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4')) def test_uint64_from_negative(self, level=rlevel) : assert_equal(np.uint64(-2), np.uint64(18446744073709551614)) def test_sign_bit(self, level=rlevel): - x = np.array([0,-0.0,0]) - assert_equal(str(np.abs(x)),'[ 0. 0. 0.]') + x = np.array([0, -0.0, 0]) + assert_equal(str(np.abs(x)), '[ 0. 0. 0.]') def test_flat_index_byteswap(self, level=rlevel): - for dt in (np.dtype('<i4'),np.dtype('>i4')): - x = np.array([-1,0,1],dtype=dt) + for dt in (np.dtype('<i4'), np.dtype('>i4')): + x = np.array([-1, 0, 1], dtype=dt) assert_equal(x.flat[0].dtype, x[0].dtype) def test_copy_detection_corner_case(self, level=rlevel): """Ticket #658""" - np.indices((0,3,4)).T.reshape(-1,3) + np.indices((0, 3, 4)).T.reshape(-1, 3) # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous, @@ -806,7 +806,7 @@ class TestRegression(TestCase): def test_copy_detection_corner_case2(self, level=rlevel): """Ticket #771: strides are not set correctly when reshaping 0-sized arrays""" - b = np.indices((0,3,4)).T.reshape(-1,3) + b = np.indices((0, 3, 4)).T.reshape(-1, 3) assert_equal(b.strides, (3 * b.itemsize, b.itemsize)) def test_object_array_refcounting(self, level=rlevel): @@ -857,7 +857,7 @@ class TestRegression(TestCase): assert_(cnt(a) == cnt0_a + 6) assert_(cnt(b) == cnt0_b + 6) - arr[:,0] = None + arr[:, 0] = None assert_(cnt(a) == cnt0_a + 1) del arr, arr0 @@ -866,8 +866,8 @@ class TestRegression(TestCase): arr = np.zeros((5, 2), dtype=np.object_) - arr[:,0] = a - arr[:,1] = b + arr[:, 0] = a + arr[:, 1] = b assert_(cnt(a) == cnt0_a + 5) assert_(cnt(b) == cnt0_b + 5) @@ -875,7 +875,7 @@ class TestRegression(TestCase): assert_(cnt(a) == cnt0_a + 10) assert_(cnt(b) == cnt0_b + 10) - arr2 = arr[:,0].copy() + arr2 = arr[:, 0].copy() assert_(cnt(a) == cnt0_a + 10) assert_(cnt(b) == cnt0_b + 5) @@ -902,10 +902,10 @@ class TestRegression(TestCase): arr3 = arr1.repeat(3, axis=0) assert_(cnt(a) == cnt0_a + 5 + 3*5) - arr3 = arr1.take([1,2,3], axis=0) + arr3 = arr1.take([1, 2, 3], axis=0) assert_(cnt(a) == cnt0_a + 5 + 3) - x = np.array([[0],[1],[0],[1],[1]], int) + x = np.array([[0], [1], [0], [1], [1]], int) arr3 = x.choose(arr1, arr2) assert_(cnt(a) == cnt0_a + 5 + 2) assert_(cnt(b) == cnt0_b + 5 + 3) @@ -935,7 +935,7 @@ class TestRegression(TestCase): assert_(not arr[0].deleted) def test_mem_fromiter_invalid_dtype_string(self, level=rlevel): - x = [1,2,3] + x = [1, 2, 3] self.assertRaises(ValueError, np.fromiter, [xi for xi in x], dtype='S') @@ -973,7 +973,7 @@ class TestRegression(TestCase): def test_dot_alignment_sse2(self, level=rlevel): """Test for ticket #551, changeset r5140""" - x = np.zeros((30,40)) + x = np.zeros((30, 40)) y = pickle.loads(pickle.dumps(x)) # y is now typically not aligned on a 8-byte boundary z = np.ones((1, y.shape[0])) @@ -1024,17 +1024,17 @@ class TestRegression(TestCase): return result def __array_finalize__(self, obj): self.info = getattr(obj, 'info', '') - dat = TestArray([[1,2,3,4],[5,6,7,8]],'jubba') + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') assert_(dat.info == 'jubba') - dat.resize((4,2)) + dat.resize((4, 2)) assert_(dat.info == 'jubba') dat.sort() assert_(dat.info == 'jubba') dat.fill(2) assert_(dat.info == 'jubba') - dat.put([2,3,4],[6,3,4]) + dat.put([2, 3, 4], [6, 3, 4]) assert_(dat.info == 'jubba') - dat.setfield(4, np.int32,0) + dat.setfield(4, np.int32, 0) assert_(dat.info == 'jubba') dat.setflags() assert_(dat.info == 'jubba') @@ -1045,12 +1045,12 @@ class TestRegression(TestCase): assert_(dat.argsort(1).info == 'jubba') assert_(dat.astype(TestArray).info == 'jubba') assert_(dat.byteswap().info == 'jubba') - assert_(dat.clip(2,7).info == 'jubba') - assert_(dat.compress([0,1,1]).info == 'jubba') + assert_(dat.clip(2, 7).info == 'jubba') + assert_(dat.compress([0, 1, 1]).info == 'jubba') assert_(dat.conj().info == 'jubba') assert_(dat.conjugate().info == 'jubba') assert_(dat.copy().info == 'jubba') - dat2 = TestArray([2, 3, 1, 0],'jubba') + dat2 = TestArray([2, 3, 1, 0], 'jubba') choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]] assert_(dat2.choose(choices).info == 'jubba') @@ -1058,7 +1058,7 @@ class TestRegression(TestCase): assert_(dat.cumsum(1).info == 'jubba') assert_(dat.diagonal().info == 'jubba') assert_(dat.flatten().info == 'jubba') - assert_(dat.getfield(np.int32,0).info == 'jubba') + assert_(dat.getfield(np.int32, 0).info == 'jubba') assert_(dat.imag.info == 'jubba') assert_(dat.max(1).info == 'jubba') assert_(dat.mean(1).info == 'jubba') @@ -1071,13 +1071,13 @@ class TestRegression(TestCase): assert_(dat.ravel().info == 'jubba') assert_(dat.real.info == 'jubba') assert_(dat.repeat(2).info == 'jubba') - assert_(dat.reshape((2,4)).info == 'jubba') + assert_(dat.reshape((2, 4)).info == 'jubba') assert_(dat.round().info == 'jubba') assert_(dat.squeeze().info == 'jubba') assert_(dat.std(1).info == 'jubba') assert_(dat.sum(1).info == 'jubba') - assert_(dat.swapaxes(0,1).info == 'jubba') - assert_(dat.take([2,3,5]).info == 'jubba') + assert_(dat.swapaxes(0, 1).info == 'jubba') + assert_(dat.take([2, 3, 5]).info == 'jubba') assert_(dat.transpose().info == 'jubba') assert_(dat.T.info == 'jubba') assert_(dat.var(1).info == 'jubba') @@ -1101,15 +1101,15 @@ class TestRegression(TestCase): def test_char_array_creation(self, level=rlevel): a = np.array('123', dtype='c') - b = np.array(asbytes_nested(['1','2','3'])) - assert_equal(a,b) + b = np.array(asbytes_nested(['1', '2', '3'])) + assert_equal(a, b) def test_unaligned_unicode_access(self, level=rlevel) : """Ticket #825""" - for i in range(1,9) : + for i in range(1, 9) : msg = 'unicode offset: %d chars'%i - t = np.dtype([('a','S%d'%i),('b','U2')]) - x = np.array([(asbytes('a'),sixu('b'))], dtype=t) + t = np.dtype([('a', 'S%d'%i), ('b', 'U2')]) + x = np.array([(asbytes('a'), sixu('b'))], dtype=t) if sys.version_info[0] >= 3: assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) else: @@ -1138,7 +1138,7 @@ class TestRegression(TestCase): a = np.object_() b = np.object_(3) b2 = np.object_(3.0) - c = np.object_([4,5]) + c = np.object_([4, 5]) d = np.object_([None, {}, []]) assert_(a is None) assert_(type(b) is int) @@ -1149,8 +1149,8 @@ class TestRegression(TestCase): def test_array_resize_method_system_error(self): """Ticket #840 - order should be an invalid keyword.""" - x = np.array([[0,1],[2,3]]) - self.assertRaises(TypeError, x.resize, (2,2), order='C') + x = np.array([[0, 1], [2, 3]]) + self.assertRaises(TypeError, x.resize, (2, 2), order='C') def test_for_zero_length_in_choose(self, level=rlevel): "Ticket #882" @@ -1189,7 +1189,7 @@ class TestRegression(TestCase): #that void scalar contains original data. test_string = np.array("test") test_string_void_scalar = np.core.multiarray.scalar( - np.dtype(("V",test_string.dtype.itemsize)), test_string.tostring()) + np.dtype(("V", test_string.dtype.itemsize)), test_string.tostring()) assert_(test_string_void_scalar.view(test_string.dtype) == test_string) @@ -1252,8 +1252,8 @@ class TestRegression(TestCase): """Ticket #1058""" a = np.fromiter(list(range(10)), dtype='b') b = np.fromiter(list(range(10)), dtype='B') - assert_(np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9]))) - assert_(np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9]))) + assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) def test_array_from_sequence_scalar_array(self): """Ticket #1078: segfaults when creating an array with a sequence of 0d @@ -1379,9 +1379,9 @@ class TestRegression(TestCase): """Ticket #1299""" stra = 'aaaa' strb = 'bbbb' - x = np.array([[(0,stra),(1,strb)]], 'i8,O') + x = np.array([[(0, stra), (1, strb)]], 'i8,O') x[x.nonzero()] = x.ravel()[:1] - assert_(x[0,1] == x[0,0]) + assert_(x[0, 1] == x[0, 0]) def test_structured_arrays_with_objects2(self): """Ticket #1299 second test""" @@ -1389,7 +1389,7 @@ class TestRegression(TestCase): strb = 'bbbb' numb = sys.getrefcount(strb) numa = sys.getrefcount(stra) - x = np.array([[(0,stra),(1,strb)]], 'i8,O') + x = np.array([[(0, stra), (1, strb)]], 'i8,O') x[x.nonzero()] = x.ravel()[:1] assert_(sys.getrefcount(strb) == numb) assert_(sys.getrefcount(stra) == numa + 2) @@ -1416,7 +1416,7 @@ class TestRegression(TestCase): except ImportError: from md5 import new as md5 - x = np.array([1,2,3], dtype=np.dtype('<i4')) + x = np.array([1, 2, 3], dtype=np.dtype('<i4')) assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6') def test_numeric_handleError(self): @@ -1436,8 +1436,8 @@ class TestRegression(TestCase): def test_fromiter_comparison(self, level=rlevel): a = np.fromiter(list(range(10)), dtype='b') b = np.fromiter(list(range(10)), dtype='B') - assert_(np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9]))) - assert_(np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9]))) + assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) def test_fromstring_crash(self): # Ticket #1345: the following should not cause a crash @@ -1471,7 +1471,7 @@ class TestRegression(TestCase): def test_ticket_1434(self): # Check that the out= argument in var and std has an effect - data = np.array(((1,2,3),(4,5,6),(7,8,9))) + data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) out = np.zeros((3,)) ret = data.var(axis=1, out=out) @@ -1489,7 +1489,7 @@ class TestRegression(TestCase): def test_subclass_int_tuple_assignment(self): # ticket #1563 class Subclass(np.ndarray): - def __new__(cls,i): + def __new__(cls, i): return np.ones((i,)).view(cls) x = Subclass(5) x[(0,)] = 2 # shouldn't raise an exception @@ -1499,14 +1499,14 @@ class TestRegression(TestCase): # ticket #1548 class Subclass(np.ndarray): pass - x = np.array([1,2,3]).view(Subclass) + x = np.array([1, 2, 3]).view(Subclass) y = np.add(x, x, x) assert_equal(id(x), id(y)) def test_take_refcount(self): # ticket #939 a = np.arange(16, dtype=np.float) - a.shape = (4,4) + a.shape = (4, 4) lut = np.ones((5 + 3, 4), np.float) rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) c1 = sys.getrefcount(rgba) @@ -1591,7 +1591,7 @@ class TestRegression(TestCase): def test_find_common_type_boolean(self): # Ticket #1695 - assert_(np.find_common_type([],['?','?']) == '?') + assert_(np.find_common_type([], ['?', '?']) == '?') def test_empty_mul(self): a = np.array([1.]) @@ -1636,16 +1636,16 @@ class TestRegression(TestCase): def test_squeeze_contiguous(self): """Similar to GitHub issue #387""" - a = np.zeros((1,2)).squeeze() - b = np.zeros((2,2,2), order='F')[:,:,::2].squeeze() + a = np.zeros((1, 2)).squeeze() + b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze() assert_(a.flags.c_contiguous) assert_(a.flags.f_contiguous) assert_(b.flags.f_contiguous) def test_reduce_contiguous(self): """GitHub issue #387""" - a = np.add.reduce(np.zeros((2,1,2)), (0,1)) - b = np.add.reduce(np.zeros((2,1,2)), 1) + a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) + b = np.add.reduce(np.zeros((2, 1, 2)), 1) assert_(a.flags.c_contiguous) assert_(a.flags.f_contiguous) assert_(b.flags.c_contiguous) @@ -1668,12 +1668,12 @@ class TestRegression(TestCase): def test_zerosize_accumulate(self): "Ticket #1733" x = np.array([[42, 0]], dtype=np.uint32) - assert_equal(np.add.accumulate(x[:-1,0]), []) + assert_equal(np.add.accumulate(x[:-1, 0]), []) def test_objectarray_setfield(self): # Setfield directly manipulates the raw array data, # so is invalid for object arrays. - x = np.array([1,2,3], dtype=object) + x = np.array([1, 2, 3], dtype=object) assert_raises(RuntimeError, x.setfield, 4, np.int32, 0) def test_setting_rank0_string(self): @@ -1697,7 +1697,7 @@ class TestRegression(TestCase): s1 = asbytes('black') s2 = asbytes('white') s3 = asbytes('other') - a = np.array([[s1],[s2],[s3]]) + a = np.array([[s1], [s2], [s3]]) assert_equal(a.dtype, np.dtype('S5')) b = a.astype(np.dtype('S0')) assert_equal(b.dtype, np.dtype('S5')) @@ -1706,7 +1706,7 @@ class TestRegression(TestCase): """Ticket #1756 """ s = asbytes('0123456789abcdef') a = np.array([s]*5) - for i in range(1,17): + for i in range(1, 17): a1 = np.array(a, "|S%d"%i) a2 = np.array([s[:i]]*5) assert_equal(a1, a2) @@ -1740,9 +1740,9 @@ class TestRegression(TestCase): def test_ticket_1608(self): "x.flat shouldn't modify data" - x = np.array([[1,2],[3,4]]).T + x = np.array([[1, 2], [3, 4]]).T y = np.array(x.flat) - assert_equal(x, [[1,3],[2,4]]) + assert_equal(x, [[1, 3], [2, 4]]) def test_pickle_string_overwrite(self): import re @@ -1767,10 +1767,10 @@ class TestRegression(TestCase): assert_equal(bytestring[0:1], '\x01'.encode('ascii')) def test_structured_type_to_object(self): - a_rec = np.array([(0,1), (3,2)], dtype='i4,i8') + a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') a_obj = np.empty((2,), dtype=object) - a_obj[0] = (0,1) - a_obj[1] = (3,2) + a_obj[0] = (0, 1) + a_obj[1] = (3, 2) # astype records -> object assert_equal(a_rec.astype(object), a_obj) # '=' records -> object @@ -1794,9 +1794,9 @@ class TestRegression(TestCase): b[...] = [[1], [2], [3], [4]] assert_equal(a, b) # The first dimension should get broadcast - a = np.zeros((2,2), dtype=object) - a[...] = [[1,2]] - assert_equal(a, [[1,2], [1,2]]) + a = np.zeros((2, 2), dtype=object) + a[...] = [[1, 2]] + assert_equal(a, [[1, 2], [1, 2]]) def test_memoryleak(self): # Ticket #1917 - ensure that array data doesn't leak diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 3e1aaef3b..ed038e82d 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -16,16 +16,16 @@ class TestTypes(TestCase): def test_types(self, level=1): for atype in types: a = atype(1) - assert_(a == 1, "error with %r: got %r" % (atype,a)) + assert_(a == 1, "error with %r: got %r" % (atype, a)) def test_type_add(self, level=1): # list of types for k, atype in enumerate(types): a_scalar = atype(3) - a_array = np.array([3],dtype=atype) + a_array = np.array([3], dtype=atype) for l, btype in enumerate(types): b_scalar = btype(1) - b_array = np.array([1],dtype=btype) + b_array = np.array([1], dtype=btype) c_scalar = a_scalar + b_scalar c_array = a_array + b_array # It was comparing the type numbers, but the new ufunc @@ -36,13 +36,13 @@ class TestTypes(TestCase): # does not produce properly symmetric results... assert_equal(c_scalar.dtype, c_array.dtype, "error with types (%d/'%c' + %d/'%c')" % - (k,np.dtype(atype).char,l,np.dtype(btype).char)) + (k, np.dtype(atype).char, l, np.dtype(btype).char)) def test_type_create(self, level=1): for k, atype in enumerate(types): - a = np.array([1,2,3],atype) - b = atype([1,2,3]) - assert_equal(a,b) + a = np.array([1, 2, 3], atype) + b = atype([1, 2, 3]) + assert_equal(a, b) class TestBaseMath(TestCase): @@ -81,21 +81,21 @@ class TestPower(TestCase): for t in [np.int8, np.int16, np.float16]: a = t(3) b = a ** 4 - assert_(b == 81, "error with %r: got %r" % (t,b)) + assert_(b == 81, "error with %r: got %r" % (t, b)) def test_large_types(self): for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: a = t(51) b = a ** 4 - msg = "error with %r: got %r" % (t,b) + msg = "error with %r: got %r" % (t, b) if np.issubdtype(t, np.integer): assert_(b == 6765201, msg) else: assert_almost_equal(b, 6765201, err_msg=msg) def test_mixed_types(self): - typelist = [np.int8,np.int16,np.float16, - np.float32,np.float64,np.int8, - np.int16,np.int32,np.int64] + typelist = [np.int8, np.int16, np.float16, + np.float32, np.float64, np.int8, + np.int16, np.int32, np.int64] for t1 in typelist: for t2 in typelist: a = t1(3) @@ -132,7 +132,7 @@ class TestConversion(TestCase): l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] for T in [None, np.float64, np.int64]: - a = np.array(l,dtype=T) + a = np.array(l, dtype=T) assert_equal([int(_m) for _m in a], li) a = np.array(l[:3], dtype=np.uint64) @@ -155,9 +155,9 @@ class TestRepr(object): last_exponent_bit_idx = finfo.nexp storage_bytes = np.dtype(t).itemsize*8 # could add some more types to the list below - for which in ['small denorm','small norm']: + for which in ['small denorm', 'small norm']: # Values from http://en.wikipedia.org/wiki/IEEE_754 - constr = np.array([0x00]*storage_bytes,dtype=np.uint8) + constr = np.array([0x00]*storage_bytes, dtype=np.uint8) if which == 'small denorm': byte = last_fraction_bit_idx // 8 bytebit = 7-(last_fraction_bit_idx % 8) diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py index 8cbcfede3..09bb2f308 100644 --- a/numpy/core/tests/test_shape_base.py +++ b/numpy/core/tests/test_shape_base.py @@ -13,14 +13,14 @@ class TestAtleast1d(TestCase): a = array(1) b = array(2) res = [atleast_1d(a), atleast_1d(b)] - desired = [array([1]),array([2])] + desired = [array([1]), array([2])] assert_array_equal(res, desired) def test_1D_array(self): a = array([1, 2]) b = array([2, 3]) res = [atleast_1d(a), atleast_1d(b)] - desired = [array([1, 2]),array([2, 3])] + desired = [array([1, 2]), array([2, 3])] assert_array_equal(res, desired) def test_2D_array(self): @@ -46,7 +46,7 @@ class TestAtleast1d(TestCase): assert_(atleast_1d(3j).shape == (1,)) assert_(atleast_1d(long(3)).shape == (1,)) assert_(atleast_1d(3.0).shape == (1,)) - assert_(atleast_1d([[2,3],[4,5]]).shape == (2,2)) + assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2)) class TestAtleast2d(TestCase): @@ -83,9 +83,9 @@ class TestAtleast2d(TestCase): def test_r2array(self): """ Test to make sure equivalent Travis O's r2array function """ - assert_(atleast_2d(3).shape == (1,1)) - assert_(atleast_2d([3j,1]).shape == (1,2)) - assert_(atleast_2d([[[3,1],[4,5]],[[3,5],[1,2]]]).shape == (2,2,2)) + assert_(atleast_2d(3).shape == (1, 1)) + assert_(atleast_2d([3j, 1]).shape == (1, 2)) + assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2)) class TestAtleast3d(TestCase): @@ -107,7 +107,7 @@ class TestAtleast3d(TestCase): a = array([[1, 2], [1, 2]]) b = array([[2, 3], [2, 3]]) res = [atleast_3d(a), atleast_3d(b)] - desired = [a[:,:,newaxis], b[:,:,newaxis]] + desired = [a[:,:, newaxis], b[:,:, newaxis]] assert_array_equal(res, desired) def test_3D_array(self): @@ -124,56 +124,56 @@ class TestHstack(TestCase): def test_0D_array(self): a = array(1) b = array(2) - res=hstack([a,b]) - desired = array([1,2]) - assert_array_equal(res,desired) + res=hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) def test_1D_array(self): a = array([1]) b = array([2]) - res=hstack([a,b]) - desired = array([1,2]) - assert_array_equal(res,desired) + res=hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) def test_2D_array(self): - a = array([[1],[2]]) - b = array([[1],[2]]) - res=hstack([a,b]) - desired = array([[1,1],[2,2]]) - assert_array_equal(res,desired) + a = array([[1], [2]]) + b = array([[1], [2]]) + res=hstack([a, b]) + desired = array([[1, 1], [2, 2]]) + assert_array_equal(res, desired) class TestVstack(TestCase): def test_0D_array(self): a = array(1) b = array(2) - res=vstack([a,b]) - desired = array([[1],[2]]) - assert_array_equal(res,desired) + res=vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) def test_1D_array(self): a = array([1]) b = array([2]) - res=vstack([a,b]) - desired = array([[1],[2]]) - assert_array_equal(res,desired) + res=vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) def test_2D_array(self): - a = array([[1],[2]]) - b = array([[1],[2]]) - res=vstack([a,b]) - desired = array([[1],[2],[1],[2]]) - assert_array_equal(res,desired) + a = array([[1], [2]]) + b = array([[1], [2]]) + res=vstack([a, b]) + desired = array([[1], [2], [1], [2]]) + assert_array_equal(res, desired) def test_2D_array2(self): - a = array([1,2]) - b = array([1,2]) - res=vstack([a,b]) - desired = array([[1,2],[1,2]]) - assert_array_equal(res,desired) + a = array([1, 2]) + b = array([1, 2]) + res=vstack([a, b]) + desired = array([[1, 2], [1, 2]]) + assert_array_equal(res, desired) def test_concatenate_axis_None(): - a = np.arange(4, dtype=np.float64).reshape((2,2)) + a = np.arange(4, dtype=np.float64).reshape((2, 2)) b = list(range(3)) c = ['x'] r = np.concatenate((a, a), axis=None) @@ -183,9 +183,9 @@ def test_concatenate_axis_None(): assert_equal(r.size, a.size + len(b)) assert_equal(r.dtype, a.dtype) r = np.concatenate((a, b, c), axis=None) - d = array(['0', '1', '2', '3', + d = array(['0', '1', '2', '3', '0', '1', '2', 'x']) - assert_array_equal(r,d) + assert_array_equal(r, d) def test_concatenate(): diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 2368e5809..7074a60a5 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -24,7 +24,7 @@ class TestUfunc(TestCase): L = 6 x = np.arange(L) idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel() - assert_array_equal(np.add.reduceat(x,idx)[::2], [1,3,5,7]) + assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7]) def test_generic_loops(self) : """Test generic loops. @@ -80,7 +80,7 @@ class TestUfunc(TestCase): """ fone = np.exp - ftwo = lambda x,y : x**y + ftwo = lambda x, y : x**y fone_val = 1 ftwo_val = 1 # check unary PyUFunc_f_f. @@ -111,27 +111,27 @@ class TestUfunc(TestCase): # check binary PyUFunc_ff_f. msg = "PyUFunc_ff_f" x = np.ones(10, dtype=np.single)[0::2] - assert_almost_equal(ftwo(x,x), ftwo_val, err_msg=msg) + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # check binary PyUFunc_dd_d. msg = "PyUFunc_dd_d" x = np.ones(10, dtype=np.double)[0::2] - assert_almost_equal(ftwo(x,x), ftwo_val, err_msg=msg) + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # check binary PyUFunc_gg_g. msg = "PyUFunc_gg_g" x = np.ones(10, dtype=np.longdouble)[0::2] - assert_almost_equal(ftwo(x,x), ftwo_val, err_msg=msg) + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # check binary PyUFunc_FF_F. msg = "PyUFunc_FF_F" x = np.ones(10, dtype=np.csingle)[0::2] - assert_almost_equal(ftwo(x,x), ftwo_val, err_msg=msg) + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # check binary PyUFunc_DD_D. msg = "PyUFunc_DD_D" x = np.ones(10, dtype=np.cdouble)[0::2] - assert_almost_equal(ftwo(x,x), ftwo_val, err_msg=msg) + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # check binary PyUFunc_GG_G. msg = "PyUFunc_GG_G" x = np.ones(10, dtype=np.clongdouble)[0::2] - assert_almost_equal(ftwo(x,x), ftwo_val, err_msg=msg) + assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg) # class to use in testing object method loops class foo(object): @@ -154,13 +154,13 @@ class TestUfunc(TestCase): # check binary PyUFunc_OO_O msg = "PyUFunc_OO_O" x = np.ones(10, dtype=np.object)[0::2] - assert_(np.all(np.add(x,x) == 2), msg) + assert_(np.all(np.add(x, x) == 2), msg) # check binary PyUFunc_OO_O_method msg = "PyUFunc_OO_O_method" x = np.zeros(10, dtype=np.object)[0::2] for i in range(len(x)) : x[i] = foo() - assert_(np.all(np.logical_xor(x,x)), msg) + assert_(np.all(np.logical_xor(x, x)), msg) # check PyUFunc_On_Om # fixme -- I don't know how to do this yet @@ -252,117 +252,117 @@ class TestUfunc(TestCase): def test_signature(self): # the arguments to test_signature are: nin, nout, core_signature # pass - assert_equal(umt.test_signature(2,1,"(i),(i)->()"), 1) + assert_equal(umt.test_signature(2, 1, "(i),(i)->()"), 1) # pass. empty core signature; treat as plain ufunc (with trivial core) - assert_equal(umt.test_signature(2,1,"(),()->()"), 0) + assert_equal(umt.test_signature(2, 1, "(),()->()"), 0) # in the following calls, a ValueError should be raised because # of error in core signature # error: extra parenthesis msg = "core_sig: extra parenthesis" try: - ret = umt.test_signature(2,1,"((i)),(i)->()") + ret = umt.test_signature(2, 1, "((i)),(i)->()") assert_equal(ret, None, err_msg=msg) except ValueError: None # error: parenthesis matching msg = "core_sig: parenthesis matching" try: - ret = umt.test_signature(2,1,"(i),)i(->()") + ret = umt.test_signature(2, 1, "(i),)i(->()") assert_equal(ret, None, err_msg=msg) except ValueError: None # error: incomplete signature. letters outside of parenthesis are ignored msg = "core_sig: incomplete signature" try: - ret = umt.test_signature(2,1,"(i),->()") + ret = umt.test_signature(2, 1, "(i),->()") assert_equal(ret, None, err_msg=msg) except ValueError: None # error: incomplete signature. 2 output arguments are specified msg = "core_sig: incomplete signature" try: - ret = umt.test_signature(2,2,"(i),(i)->()") + ret = umt.test_signature(2, 2, "(i),(i)->()") assert_equal(ret, None, err_msg=msg) except ValueError: None # more complicated names for variables - assert_equal(umt.test_signature(2,1,"(i1,i2),(J_1)->(_kAB)"),1) + assert_equal(umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)"), 1) def test_get_signature(self): assert_equal(umt.inner1d.signature, "(i),(i)->()") def test_forced_sig(self): - a = 0.5*np.arange(3,dtype='f8') - assert_equal(np.add(a,0.5), [0.5, 1, 1.5]) - assert_equal(np.add(a,0.5,sig='i',casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a,0.5,sig='ii->i',casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a,0.5,sig=('i4',),casting='unsafe'), [0, 0, 1]) - assert_equal(np.add(a,0.5,sig=('i4','i4','i4'), + a = 0.5*np.arange(3, dtype='f8') + assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) + assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1]) + assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), casting='unsafe'), [0, 0, 1]) - b = np.zeros((3,),dtype='f8') - np.add(a,0.5,out=b) + b = np.zeros((3,), dtype='f8') + np.add(a, 0.5, out=b) assert_equal(b, [0.5, 1, 1.5]) b[:] = 0 - np.add(a,0.5,sig='i',out=b, casting='unsafe') + np.add(a, 0.5, sig='i', out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) b[:] = 0 - np.add(a,0.5,sig='ii->i',out=b, casting='unsafe') + np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) b[:] = 0 - np.add(a,0.5,sig=('i4',),out=b, casting='unsafe') + np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) b[:] = 0 - np.add(a,0.5,sig=('i4','i4','i4'),out=b, casting='unsafe') + np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') assert_equal(b, [0, 0, 1]) def test_inner1d(self): - a = np.arange(6).reshape((2,3)) - assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1)) + a = np.arange(6).reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1)) a = np.arange(6) - assert_array_equal(umt.inner1d(a,a), np.sum(a*a)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a)) def test_broadcast(self): msg = "broadcast" - a = np.arange(4).reshape((2,1,2)) - b = np.arange(4).reshape((1,2,2)) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) + a = np.arange(4).reshape((2, 1, 2)) + b = np.arange(4).reshape((1, 2, 2)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) msg = "extend & broadcast loop dimensions" - b = np.arange(4).reshape((2,2)) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) + b = np.arange(4).reshape((2, 2)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) msg = "broadcast in core dimensions" - a = np.arange(8).reshape((4,2)) - b = np.arange(4).reshape((4,1)) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) + a = np.arange(8).reshape((4, 2)) + b = np.arange(4).reshape((4, 1)) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) msg = "extend & broadcast core and loop dimensions" - a = np.arange(8).reshape((4,2)) + a = np.arange(8).reshape((4, 2)) b = np.array(7) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) msg = "broadcast should fail" - a = np.arange(2).reshape((2,1,1)) - b = np.arange(3).reshape((3,1,1)) + a = np.arange(2).reshape((2, 1, 1)) + b = np.arange(3).reshape((3, 1, 1)) try: - ret = umt.inner1d(a,b) + ret = umt.inner1d(a, b) assert_equal(ret, None, err_msg=msg) except ValueError: None def test_type_cast(self): msg = "type cast" - a = np.arange(6, dtype='short').reshape((2,3)) - assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg) + a = np.arange(6, dtype='short').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) msg = "type cast on one argument" - a = np.arange(6).reshape((2,3)) + a = np.arange(6).reshape((2, 3)) b = a+0.1 - assert_array_almost_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), + assert_array_almost_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) def test_endian(self): msg = "big endian" - a = np.arange(6, dtype='>i4').reshape((2,3)) - assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg) + a = np.arange(6, dtype='>i4').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) msg = "little endian" - a = np.arange(6, dtype='<i4').reshape((2,3)) - assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg) + a = np.arange(6, dtype='<i4').reshape((2, 3)) + assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg) # Output should always be native-endian Ba = np.arange(1, dtype='>f8') @@ -379,71 +379,71 @@ class TestUfunc(TestCase): def test_incontiguous_array(self): msg = "incontiguous memory layout of array" - x = np.arange(64).reshape((2,2,2,2,2,2)) - a = x[:,0,:,0,:,0] - b = x[:,1,:,1,:,1] - a[0,0,0] = -1 + x = np.arange(64).reshape((2, 2, 2, 2, 2, 2)) + a = x[:, 0,:, 0,:, 0] + b = x[:, 1,:, 1,:, 1] + a[0, 0, 0] = -1 msg2 = "make sure it references to the original array" - assert_equal(x[0,0,0,0,0,0], -1, err_msg=msg2) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) - x = np.arange(24).reshape(2,3,4) + assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) + x = np.arange(24).reshape(2, 3, 4) a = x.T b = x.T - a[0,0,0] = -1 - assert_equal(x[0,0,0], -1, err_msg=msg2) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) + a[0, 0, 0] = -1 + assert_equal(x[0, 0, 0], -1, err_msg=msg2) + assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg) def test_output_argument(self): msg = "output argument" - a = np.arange(12).reshape((2,3,2)) - b = np.arange(4).reshape((2,1,2)) + 1 - c = np.zeros((2,3),dtype='int') - umt.inner1d(a,b,c) - assert_array_equal(c, np.sum(a*b,axis=-1), err_msg=msg) + a = np.arange(12).reshape((2, 3, 2)) + b = np.arange(4).reshape((2, 1, 2)) + 1 + c = np.zeros((2, 3), dtype='int') + umt.inner1d(a, b, c) + assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) c[:] = -1 - umt.inner1d(a,b,out=c) - assert_array_equal(c, np.sum(a*b,axis=-1), err_msg=msg) + umt.inner1d(a, b, out=c) + assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) msg = "output argument with type cast" - c = np.zeros((2,3),dtype='int16') - umt.inner1d(a,b,c) - assert_array_equal(c, np.sum(a*b,axis=-1), err_msg=msg) + c = np.zeros((2, 3), dtype='int16') + umt.inner1d(a, b, c) + assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) c[:] = -1 - umt.inner1d(a,b,out=c) - assert_array_equal(c, np.sum(a*b,axis=-1), err_msg=msg) + umt.inner1d(a, b, out=c) + assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg) msg = "output argument with incontiguous layout" - c = np.zeros((2,3,4),dtype='int16') - umt.inner1d(a,b,c[...,0]) - assert_array_equal(c[...,0], np.sum(a*b,axis=-1), err_msg=msg) + c = np.zeros((2, 3, 4), dtype='int16') + umt.inner1d(a, b, c[..., 0]) + assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg) c[:] = -1 - umt.inner1d(a,b,out=c[...,0]) - assert_array_equal(c[...,0], np.sum(a*b,axis=-1), err_msg=msg) + umt.inner1d(a, b, out=c[..., 0]) + assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg) def test_innerwt(self): - a = np.arange(6).reshape((2,3)) - b = np.arange(10,16).reshape((2,3)) - w = np.arange(20,26).reshape((2,3)) - assert_array_equal(umt.innerwt(a,b,w), np.sum(a*b*w,axis=-1)) - a = np.arange(100,124).reshape((2,3,4)) - b = np.arange(200,224).reshape((2,3,4)) - w = np.arange(300,324).reshape((2,3,4)) - assert_array_equal(umt.innerwt(a,b,w), np.sum(a*b*w,axis=-1)) + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) + a = np.arange(100, 124).reshape((2, 3, 4)) + b = np.arange(200, 224).reshape((2, 3, 4)) + w = np.arange(300, 324).reshape((2, 3, 4)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) def test_innerwt_empty(self): """Test generalized ufunc with zero-sized operands""" a = np.array([], dtype='f8') b = np.array([], dtype='f8') w = np.array([], dtype='f8') - assert_array_equal(umt.innerwt(a,b,w), np.sum(a*b*w,axis=-1)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1)) def test_matrix_multiply(self): self.compare_matrix_multiply_results(np.long) self.compare_matrix_multiply_results(np.double) def compare_matrix_multiply_results(self, tp): - d1 = np.array(rand(2,3,4), dtype=tp) - d2 = np.array(rand(2,3,4), dtype=tp) + d1 = np.array(rand(2, 3, 4), dtype=tp) + d2 = np.array(rand(2, 3, 4), dtype=tp) msg = "matrix multiply on type %s" % d1.dtype.name def permute_n(n): @@ -466,14 +466,14 @@ class TestUfunc(TestCase): base = slice_n(n-1) for sl in base: ret += (sl+(slice(None),),) - ret += (sl+(slice(0,1),),) + ret += (sl+(slice(0, 1),),) return ret - def broadcastable(s1,s2): + def broadcastable(s1, s2): return s1 == s2 or s1 == 1 or s2 == 1 permute_3 = permute_n(3) - slice_3 = slice_n(3) + ((slice(None,None,-1),)*3,) + slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,) ref = True for p1 in permute_3: @@ -487,9 +487,9 @@ class TestUfunc(TestCase): if broadcastable(a1.shape[-1], a2.shape[-2]) and \ broadcastable(a1.shape[0], a2.shape[0]): assert_array_almost_equal( - umt.matrix_multiply(a1,a2), - np.sum(a2[...,np.newaxis].swapaxes(-3,-1) * - a1[...,np.newaxis,:], axis=-1), + umt.matrix_multiply(a1, a2), + np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * + a1[..., np.newaxis,:], axis=-1), err_msg = msg+' %s %s' % (str(a1.shape), str(a2.shape))) @@ -587,9 +587,9 @@ class TestUfunc(TestCase): def test_casting_out_param(self): # Test that it's possible to do casts on output - a = np.ones((200,100), np.int64) - b = np.ones((200,100), np.int64) - c = np.ones((200,100), np.float64) + a = np.ones((200, 100), np.int64) + b = np.ones((200, 100), np.int64) + c = np.ones((200, 100), np.float64) np.add(a, b, out=c) assert_equal(c, 2) @@ -604,13 +604,13 @@ class TestUfunc(TestCase): b = np.ones(7) c = np.zeros(7) np.add(a, b, out=c, where=(a % 2 == 1)) - assert_equal(c, [0,2,0,4,0,6,0]) + assert_equal(c, [0, 2, 0, 4, 0, 6, 0]) - a = np.arange(4).reshape(2,2) + 2 - np.power(a, [2,3], out=a, where=[[0,1],[1,0]]) + a = np.arange(4).reshape(2, 2) + 2 + np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]]) assert_equal(a, [[2, 27], [16, 5]]) # Broadcasting the where= parameter - np.subtract(a, 2, out=a, where=[True,False]) + np.subtract(a, 2, out=a, where=[True, False]) assert_equal(a, [[0, 27], [14, 5]]) def test_where_param_buffer_output(self): @@ -621,76 +621,76 @@ class TestUfunc(TestCase): a = np.ones(10, np.int64) b = np.ones(10, np.int64) c = 1.5 * np.ones(10, np.float64) - np.add(a, b, out=c, where=[1,0,0,1,0,0,1,1,1,0]) - assert_equal(c, [2,1.5,1.5,2,1.5,1.5,2,2,2,1.5]) + np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0]) + assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5]) def check_identityless_reduction(self, a): # np.minimum.reduce is a identityless reduction # Verify that it sees the zero at various positions a[...] = 1 - a[1,0,0] = 0 + a[1, 0, 0] = 0 assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0,1)), [0,1,1,1]) - assert_equal(np.minimum.reduce(a, axis=(0,2)), [0,1,1]) - assert_equal(np.minimum.reduce(a, axis=(1,2)), [1,0]) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0]) assert_equal(np.minimum.reduce(a, axis=0), - [[0,1,1,1], [1,1,1,1], [1,1,1,1]]) + [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=1), - [[1,1,1,1], [0,1,1,1]]) + [[1, 1, 1, 1], [0, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=2), - [[1,1,1], [0,1,1]]) + [[1, 1, 1], [0, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=()), a) a[...] = 1 - a[0,1,0] = 0 + a[0, 1, 0] = 0 assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0,1)), [0,1,1,1]) - assert_equal(np.minimum.reduce(a, axis=(0,2)), [1,0,1]) - assert_equal(np.minimum.reduce(a, axis=(1,2)), [0,1]) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) assert_equal(np.minimum.reduce(a, axis=0), - [[1,1,1,1], [0,1,1,1], [1,1,1,1]]) + [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=1), - [[0,1,1,1], [1,1,1,1]]) + [[0, 1, 1, 1], [1, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=2), - [[1,0,1], [1,1,1]]) + [[1, 0, 1], [1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=()), a) a[...] = 1 - a[0,0,1] = 0 + a[0, 0, 1] = 0 assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0,1)), [1,0,1,1]) - assert_equal(np.minimum.reduce(a, axis=(0,2)), [0,1,1]) - assert_equal(np.minimum.reduce(a, axis=(1,2)), [0,1]) + assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) + assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) assert_equal(np.minimum.reduce(a, axis=0), - [[1,0,1,1], [1,1,1,1], [1,1,1,1]]) + [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=1), - [[1,0,1,1], [1,1,1,1]]) + [[1, 0, 1, 1], [1, 1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=2), - [[0,1,1], [1,1,1]]) + [[0, 1, 1], [1, 1, 1]]) assert_equal(np.minimum.reduce(a, axis=()), a) def test_identityless_reduction_corder(self): - a = np.empty((2,3,4), order='C') + a = np.empty((2, 3, 4), order='C') self.check_identityless_reduction(a) def test_identityless_reduction_forder(self): - a = np.empty((2,3,4), order='F') + a = np.empty((2, 3, 4), order='F') self.check_identityless_reduction(a) def test_identityless_reduction_otherorder(self): - a = np.empty((2,4,3), order='C').swapaxes(1,2) + a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) self.check_identityless_reduction(a) def test_identityless_reduction_noncontig(self): - a = np.empty((3,5,4), order='C').swapaxes(1,2) + a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) a = a[1:, 1:, 1:] self.check_identityless_reduction(a) def test_identityless_reduction_noncontig_unaligned(self): a = np.empty((3*4*5*8 + 1,), dtype='i1') a = a[1:].view(dtype='f8') - a.shape = (3,4,5) + a.shape = (3, 4, 5) a = a[1:, 1:, 1:] self.check_identityless_reduction(a) @@ -706,7 +706,7 @@ class TestUfunc(TestCase): res = np.divide.reduce(a, axis=()) assert_equal(res, a) - assert_raises(ValueError, np.divide.reduce, a, axis=(0,1)) + assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) def test_reduce_zero_axis(self): # If we have a n x m array and do a reduction with axis=1, then we are @@ -801,37 +801,37 @@ class TestUfunc(TestCase): # no output type should raise TypeError assert_raises(TypeError, test_add, a, b) - + def test_operand_flags(self): - a = np.arange(16, dtype='l').reshape(4,4) - b = np.arange(9, dtype='l').reshape(3,3) - opflag_tests.inplace_add(a[:-1,:-1], b) - assert_equal(a, np.array([[0,2,4,3],[7,9,11,7], - [14,16,18,11],[12,13,14,15]], dtype='l')) + a = np.arange(16, dtype='l').reshape(4, 4) + b = np.arange(9, dtype='l').reshape(3, 3) + opflag_tests.inplace_add(a[:-1, :-1], b) + assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], + [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l')) a = np.array(0) opflag_tests.inplace_add(a, 3) assert_equal(a, 3) opflag_tests.inplace_add(a, [3, 4]) assert_equal(a, 10) - + def test_struct_ufunc(self): import numpy.core.struct_ufunc_test as struct_ufunc - a = np.array([(1,2,3)], dtype='u8,u8,u8') - b = np.array([(1,2,3)], dtype='u8,u8,u8') + a = np.array([(1, 2, 3)], dtype='u8,u8,u8') + b = np.array([(1, 2, 3)], dtype='u8,u8,u8') result = struct_ufunc.add_triplet(a, b) assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) def test_custom_ufunc(self): - a = np.array([rational(1,2), rational(1,3), rational(1,4)], + a = np.array([rational(1, 2), rational(1, 3), rational(1, 4)], dtype=rational); - b = np.array([rational(1,2), rational(1,3), rational(1,4)], + b = np.array([rational(1, 2), rational(1, 3), rational(1, 4)], dtype=rational); result = test_add_rationals(a, b) - expected = np.array([rational(1), rational(2,3), rational(1,2)], + expected = np.array([rational(1), rational(2, 3), rational(1, 2)], dtype=rational); assert_equal(result, expected); @@ -868,95 +868,95 @@ class TestUfunc(TestCase): def test_inplace_fancy_indexing(self): a = np.arange(10) - np.add.at(a, [2,5,2], 1) + np.add.at(a, [2, 5, 2], 1) assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) a = np.arange(10) - b = np.array([100,100,100]) - np.add.at(a, [2,5,2], b) + b = np.array([100, 100, 100]) + np.add.at(a, [2, 5, 2], b) assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) - a = np.arange(9).reshape(3,3) - b = np.array([[100,100,100],[200,200,200],[300,300,300]]) - np.add.at(a, (slice(None), [1,2,1]), b) - assert_equal(a, [[0,201,102], [3,404,205], [6,607,308]]) + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, (slice(None), [1, 2, 1]), b) + assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) - a = np.arange(27).reshape(3,3,3) - b = np.array([100,200,300]) - np.add.at(a, (slice(None), slice(None), [1,2,1]), b) + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) assert_equal(a, - [[[0,401,202], - [3,404,205], - [6,407,208]], - - [[9, 410,211], - [12,413,214], - [15,416,217]], - - [[18,419,220], - [21,422,223], - [24,425,226]]]) - - a = np.arange(9).reshape(3,3) - b = np.array([[100,100,100],[200,200,200],[300,300,300]]) - np.add.at(a, ([1,2,1], slice(None)), b) - assert_equal(a, [[0,1,2], [403,404,405], [206,207,208]]) - - a = np.arange(27).reshape(3,3,3) - b = np.array([100,200,300]) - np.add.at(a, (slice(None), [1,2,1], slice(None)), b) + [[[0, 401, 202], + [3, 404, 205], + [6, 407, 208]], + + [[9, 410, 211], + [12, 413, 214], + [15, 416, 217]], + + [[18, 419, 220], + [21, 422, 223], + [24, 425, 226]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, ([1, 2, 1], slice(None)), b) + assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) assert_equal(a, - [[[0, 1, 2 ], - [203,404,605], - [106,207,308]], - - [[9, 10, 11 ], - [212,413,614], - [115,216,317]], + [[[0, 1, 2 ], + [203, 404, 605], + [106, 207, 308]], + + [[9, 10, 11 ], + [212, 413, 614], + [115, 216, 317]], [[18, 19, 20 ], - [221,422,623], - [124,225,326]]]) + [221, 422, 623], + [124, 225, 326]]]) - a = np.arange(9).reshape(3,3) - b = np.array([100,200,300]) - np.add.at(a, (0, [1,2,1]), b) - assert_equal(a, [[0,401,202], [3,4,5], [6,7,8]]) + a = np.arange(9).reshape(3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (0, [1, 2, 1]), b) + assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) - a = np.arange(27).reshape(3,3,3) - b = np.array([100,200,300]) - np.add.at(a, ([1,2,1], 0, slice(None)), b) + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, ([1, 2, 1], 0, slice(None)), b) assert_equal(a, - [[[0, 1, 2], - [3, 4, 5], + [[[0, 1, 2], + [3, 4, 5], [6, 7, 8]], - - [[209,410,611], - [12, 13, 14], + + [[209, 410, 611], + [12, 13, 14], [15, 16, 17]], - [[118,219,320], + [[118, 219, 320], [21, 22, 23], [24, 25, 26]]]) - a = np.arange(27).reshape(3,3,3) - b = np.array([100,200,300]) + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) np.add.at(a, (slice(None), slice(None), slice(None)), b) assert_equal(a, - [[[100,201,302], - [103,204,305], - [106,207,308]], - - [[109,210,311], - [112,213,314], - [115,216,317]], - - [[118,219,320], - [121,222,323], - [124,225,326]]]) + [[[100, 201, 302], + [103, 204, 305], + [106, 207, 308]], + + [[109, 210, 311], + [112, 213, 314], + [115, 216, 317]], + + [[118, 219, 320], + [121, 222, 323], + [124, 225, 326]]]) a = np.arange(10) - np.negative.at(a, [2,5,2]) + np.negative.at(a, [2, 5, 2]) assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9]) # Test 0-dim array @@ -969,7 +969,7 @@ class TestUfunc(TestCase): # Test mixed dtypes a = np.arange(10) - np.power.at(a, [1,2,3,2], 3.5) + np.power.at(a, [1, 2, 3, 2], 3.5) assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) # Test boolean indexing and boolean ufuncs @@ -985,19 +985,19 @@ class TestUfunc(TestCase): # Test empty subspace orig = np.arange(4) - a = orig[:,None][:,0:0] - np.add.at(a, [0,1], 3) + a = orig[:, None][:, 0:0] + np.add.at(a, [0, 1], 3) assert_array_equal(orig, np.arange(4)) # Test with swapped byte order - index = np.array([1,2,1], np.dtype('i').newbyteorder()) - values = np.array([1,2,3,4], np.dtype('f').newbyteorder()) + index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) + values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) np.add.at(values, index, 3) - assert_array_equal(values, [1,8,6,4]) + assert_array_equal(values, [1, 8, 6, 4]) # Test exception thrown values = np.array(['a', 1], dtype=np.object) - self.assertRaises(TypeError, np.add.at, values, [0,1], 1) + self.assertRaises(TypeError, np.add.at, values, [0, 1], 1) assert_array_equal(values, np.array(['a', 1], dtype=np.object)) if __name__ == "__main__": diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index b2d47d052..eb18304ea 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -78,7 +78,7 @@ class TestDivision(TestCase): msg = "Complex floor division implementation check" x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) y = np.array([0., -1., 0., 0.], dtype=np.complex128) - assert_equal(np.floor_divide(x**2,x), y, err_msg=msg) + assert_equal(np.floor_divide(x**2, x), y, err_msg=msg) # check overflow, underflow msg = "Complex floor division overflow/underflow check" x = np.array([1.e+110, 1.e-110], dtype=np.complex128) @@ -169,14 +169,14 @@ class TestPower(TestCase): assert_complex_equal(np.power(zero, -1+0.2j), cnan) def test_fast_power(self): - x = np.array([1,2,3], np.int16) + x = np.array([1, 2, 3], np.int16) assert_((x**2.00001).dtype is (x**2.0).dtype) class TestLog2(TestCase): def test_log2_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f','d','g'] : + for dt in ['f', 'd', 'g'] : xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_almost_equal(np.log2(xf), yf) @@ -186,7 +186,7 @@ class TestExp2(TestCase): def test_exp2_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f','d','g'] : + for dt in ['f', 'd', 'g'] : xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_almost_equal(np.exp2(yf), xf) @@ -198,7 +198,7 @@ class TestLogAddExp2(_FilterInvalids): x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] z = [6, 6, 6, 6, 6] - for dt, dec in zip(['f','d','g'],[6, 15, 15]) : + for dt, dec in zip(['f', 'd', 'g'], [6, 15, 15]) : xf = np.log2(np.array(x, dtype=dt)) yf = np.log2(np.array(y, dtype=dt)) zf = np.log2(np.array(z, dtype=dt)) @@ -208,7 +208,7 @@ class TestLogAddExp2(_FilterInvalids): x = [1000000, -1000000, 1000200, -1000200] y = [1000200, -1000200, 1000000, -1000000] z = [1000200, -1000000, 1000200, -1000000] - for dt in ['f','d','g'] : + for dt in ['f', 'd', 'g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) @@ -220,7 +220,7 @@ class TestLogAddExp2(_FilterInvalids): y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] z = [inf, inf, inf, -inf, inf, inf, 1, 1] with np.errstate(invalid='ignore'): - for dt in ['f','d','g'] : + for dt in ['f', 'd', 'g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) @@ -238,7 +238,7 @@ class TestLog(TestCase): def test_log_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f','d','g'] : + for dt in ['f', 'd', 'g'] : log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt)*log2_ @@ -249,7 +249,7 @@ class TestExp(TestCase): def test_exp_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f','d','g'] : + for dt in ['f', 'd', 'g'] : log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt)*log2_ @@ -261,7 +261,7 @@ class TestLogAddExp(_FilterInvalids): x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] z = [6, 6, 6, 6, 6] - for dt, dec in zip(['f','d','g'],[6, 15, 15]) : + for dt, dec in zip(['f', 'd', 'g'], [6, 15, 15]) : xf = np.log(np.array(x, dtype=dt)) yf = np.log(np.array(y, dtype=dt)) zf = np.log(np.array(z, dtype=dt)) @@ -271,7 +271,7 @@ class TestLogAddExp(_FilterInvalids): x = [1000000, -1000000, 1000200, -1000200] y = [1000200, -1000200, 1000000, -1000000] z = [1000200, -1000000, 1000200, -1000000] - for dt in ['f','d','g'] : + for dt in ['f', 'd', 'g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) @@ -283,7 +283,7 @@ class TestLogAddExp(_FilterInvalids): y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] z = [inf, inf, inf, -inf, inf, inf, 1, 1] with np.errstate(invalid='ignore'): - for dt in ['f','d','g'] : + for dt in ['f', 'd', 'g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) @@ -479,8 +479,8 @@ class TestMaximum(_FilterInvalids): assert_equal(func(tmp2), np.nan) def test_reduce_complex(self): - assert_equal(np.maximum.reduce([1,2j]),1) - assert_equal(np.maximum.reduce([1+3j,2j]),1+3j) + assert_equal(np.maximum.reduce([1, 2j]), 1) + assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j) def test_float_nans(self): nan = np.nan @@ -526,8 +526,8 @@ class TestMinimum(_FilterInvalids): assert_equal(func(tmp2), np.nan) def test_reduce_complex(self): - assert_equal(np.minimum.reduce([1,2j]),2j) - assert_equal(np.minimum.reduce([1+3j,2j]),2j) + assert_equal(np.minimum.reduce([1, 2j]), 2j) + assert_equal(np.minimum.reduce([1+3j, 2j]), 2j) def test_float_nans(self): nan = np.nan @@ -573,8 +573,8 @@ class TestFmax(_FilterInvalids): assert_equal(func(tmp2), 9) def test_reduce_complex(self): - assert_equal(np.fmax.reduce([1,2j]),1) - assert_equal(np.fmax.reduce([1+3j,2j]),1+3j) + assert_equal(np.fmax.reduce([1, 2j]), 1) + assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j) def test_float_nans(self): nan = np.nan @@ -615,8 +615,8 @@ class TestFmin(_FilterInvalids): assert_equal(func(tmp2), 1) def test_reduce_complex(self): - assert_equal(np.fmin.reduce([1,2j]),2j) - assert_equal(np.fmin.reduce([1+3j,2j]),2j) + assert_equal(np.fmin.reduce([1, 2j]), 2j) + assert_equal(np.fmin.reduce([1+3j, 2j]), 2j) def test_float_nans(self): nan = np.nan @@ -784,21 +784,21 @@ class TestSpecialMethods(TestCase): b = B() c = C() f = ncu.minimum - self.assertTrue(type(f(x,x)) is np.ndarray) - self.assertTrue(type(f(x,a)) is A) - self.assertTrue(type(f(x,b)) is B) - self.assertTrue(type(f(x,c)) is C) - self.assertTrue(type(f(a,x)) is A) - self.assertTrue(type(f(b,x)) is B) - self.assertTrue(type(f(c,x)) is C) - - self.assertTrue(type(f(a,a)) is A) - self.assertTrue(type(f(a,b)) is B) - self.assertTrue(type(f(b,a)) is B) - self.assertTrue(type(f(b,b)) is B) - self.assertTrue(type(f(b,c)) is C) - self.assertTrue(type(f(c,b)) is C) - self.assertTrue(type(f(c,c)) is C) + self.assertTrue(type(f(x, x)) is np.ndarray) + self.assertTrue(type(f(x, a)) is A) + self.assertTrue(type(f(x, b)) is B) + self.assertTrue(type(f(x, c)) is C) + self.assertTrue(type(f(a, x)) is A) + self.assertTrue(type(f(b, x)) is B) + self.assertTrue(type(f(c, x)) is C) + + self.assertTrue(type(f(a, a)) is A) + self.assertTrue(type(f(a, b)) is B) + self.assertTrue(type(f(b, a)) is B) + self.assertTrue(type(f(b, b)) is B) + self.assertTrue(type(f(b, c)) is C) + self.assertTrue(type(f(c, b)) is C) + self.assertTrue(type(f(c, c)) is C) self.assertTrue(type(ncu.exp(a) is A)) self.assertTrue(type(ncu.exp(b) is B)) @@ -871,9 +871,9 @@ class TestSpecialMethods(TestCase): class TestChoose(TestCase): def test_mixed(self): - c = np.array([True,True]) - a = np.array([True,True]) - assert_equal(np.choose(c, (a, 1)), np.array([1,1])) + c = np.array([True, True]) + a = np.array([True, True]) + assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) def is_longdouble_finfo_bogus(): @@ -966,7 +966,7 @@ class TestComplexFunctions(object): for p in points: a = complex(func(np.complex_(p))) b = cfunc(p) - assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname,p,a,b)) + assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s"%(fname, p, a, b)) def check_loss_of_precision(self, dtype): """Check loss of precision in complex arc* functions""" @@ -1046,8 +1046,8 @@ class TestComplexFunctions(object): good = (abs(func(zp) - func(zm)) < 2*eps) assert_(np.all(good), (func, z0[~good])) - for func in (np.arcsinh,np.arcsinh,np.arcsin,np.arctanh,np.arctan): - pts = [rp+1j*ip for rp in (-1e-3,0,1e-3) for ip in(-1e-3,0,1e-3) + for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): + pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3) if rp != 0 or ip != 0] check(func, pts, 1) check(func, pts, 1j) @@ -1081,7 +1081,7 @@ class TestSubclass(TestCase): self = np.ndarray.__new__(subtype, shape, dtype=object) self.fill(0) return self - a = simple((3,4)) + a = simple((3, 4)) assert_equal(a+a, a) def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, @@ -1237,12 +1237,12 @@ def test_pos_nan(): def test_reduceat(): """Test bug in reduceat when structured arrays are not copied.""" - db = np.dtype([('name', 'S11'),('time', np.int64), ('value', np.float32)]) + db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)]) a = np.empty([100], dtype=db) a['name'] = 'Simple' a['time'] = 10 a['value'] = 100 - indx = [0,7,15,25] + indx = [0, 7, 15, 25] h2 = [] val1 = indx[0] @@ -1271,7 +1271,7 @@ def test_reduceat_empty(): assert_equal(result.dtype, x.dtype) assert_equal(result.shape, (0,)) # Another case with a slightly different zero-sized shape - x = np.ones((5,2)) + x = np.ones((5, 2)) result = np.add.reduceat(x, [], axis=0) assert_equal(result.dtype, x.dtype) assert_equal(result.shape, (0, 2)) diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py index ebf805c6d..4f3da4397 100644 --- a/numpy/core/tests/test_umath_complex.py +++ b/numpy/core/tests/test_umath_complex.py @@ -484,7 +484,7 @@ class TestCarg(object): # carg(+- 0 + yi) returns -pi/2 for y < 0 yield check_real_value, ncu._arg, np.PZERO, -1, 0.5 * np.pi, False - yield check_real_value, ncu._arg, np.NZERO, -1,-0.5 * np.pi, False + yield check_real_value, ncu._arg, np.NZERO, -1, -0.5 * np.pi, False #def test_branch_cuts(self): # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) diff --git a/numpy/core/tests/test_unicode.py b/numpy/core/tests/test_unicode.py index b29fe2010..d184b3a9f 100644 --- a/numpy/core/tests/test_unicode.py +++ b/numpy/core/tests/test_unicode.py @@ -84,9 +84,9 @@ class create_zeros(object): def test_zerosMD(self): """Check creation of multi-dimensional objects""" - ua = zeros((2,3,4), dtype='U%s' % self.ulen) - self.content_check(ua, ua[0,0,0], 4*self.ulen*2*3*4) - self.content_check(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4) + ua = zeros((2, 3, 4), dtype='U%s' % self.ulen) + self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) + self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) class test_create_zeros_1(create_zeros, TestCase): @@ -145,8 +145,8 @@ class create_values(object): def test_valuesMD(self): """Check creation of multi-dimensional objects with values""" ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) - self.content_check(ua, ua[0,0,0], 4*self.ulen*2*3*4) - self.content_check(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4) + self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) + self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) class test_create_values_1_ucs2(create_values, TestCase): @@ -232,11 +232,11 @@ class assign_values(object): def test_valuesMD(self): """Check assignment of multi-dimensional objects with values""" - ua = zeros((2,3,4), dtype='U%s' % self.ulen) - ua[0,0,0] = self.ucs_value*self.ulen - self.content_check(ua, ua[0,0,0], 4*self.ulen*2*3*4) - ua[-1,-1,-1] = self.ucs_value*self.ulen - self.content_check(ua, ua[-1,-1,-1], 4*self.ulen*2*3*4) + ua = zeros((2, 3, 4), dtype='U%s' % self.ulen) + ua[0, 0, 0] = self.ucs_value*self.ulen + self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4) + ua[-1, -1, -1] = self.ucs_value*self.ulen + self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) class test_assign_values_1_ucs2(assign_values, TestCase): @@ -310,8 +310,8 @@ class byteorder_values: ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) ua2 = ua.newbyteorder() - self.assertTrue(ua[0,0,0] != ua2[0,0,0]) - self.assertTrue(ua[-1,-1,-1] != ua2[-1,-1,-1]) + self.assertTrue(ua[0, 0, 0] != ua2[0, 0, 0]) + self.assertTrue(ua[-1, -1, -1] != ua2[-1, -1, -1]) ua3 = ua2.newbyteorder() # Arrays must be equal after the round-trip assert_equal(ua, ua3) diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index 48f6f09b6..8484685c0 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -56,7 +56,7 @@ def CCompiler_spawn(self, cmd, display=None): if is_sequence(display): display = ' '.join(list(display)) log.info(display) - s,o = exec_command(cmd) + s, o = exec_command(cmd) if s: if is_sequence(cmd): cmd = ' '.join(list(cmd)) @@ -114,7 +114,7 @@ def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=' raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) if strip_dir: base = os.path.basename(base) - obj_name = os.path.join(output_dir,base + self.obj_extension) + obj_name = os.path.join(output_dir, base + self.obj_extension) obj_names.append(obj_name) return obj_names @@ -170,8 +170,8 @@ def CCompiler_compile(self, sources, output_dir=None, macros=None, from numpy.distutils.fcompiler import FCompiler if isinstance(self, FCompiler): display = [] - for fc in ['f77','f90','fix']: - fcomp = getattr(self,'compiler_'+fc) + for fc in ['f77', 'f90', 'fix']: + fcomp = getattr(self, 'compiler_'+fc) if fcomp is None: continue display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) @@ -236,7 +236,7 @@ def CCompiler_customize_cmd(self, cmd, ignore=()): if allow('include_dirs'): self.set_include_dirs(cmd.include_dirs) if allow('define'): - for (name,value) in cmd.define: + for (name, value) in cmd.define: self.define_macro(name, value) if allow('undef'): for macro in cmd.undef: @@ -256,16 +256,16 @@ def _compiler_to_string(compiler): props = [] mx = 0 keys = list(compiler.executables.keys()) - for key in ['version','libraries','library_dirs', - 'object_switch','compile_switch', - 'include_dirs','define','undef','rpath','link_objects']: + for key in ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch', + 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: if key not in keys: keys.append(key) for key in keys: - if hasattr(compiler,key): + if hasattr(compiler, key): v = getattr(compiler, key) - mx = max(mx,len(key)) - props.append((key,repr(v))) + mx = max(mx, len(key)) + props.append((key, repr(v))) lines = [] format = '%-' + repr(mx+1) + 's = %s' for prop in props: @@ -290,13 +290,13 @@ def CCompiler_show_customization(self): """ if 0: - for attrname in ['include_dirs','define','undef', - 'libraries','library_dirs', - 'rpath','link_objects']: - attr = getattr(self,attrname,None) + for attrname in ['include_dirs', 'define', 'undef', + 'libraries', 'library_dirs', + 'rpath', 'link_objects']: + attr = getattr(self, attrname, None) if not attr: continue - log.info("compiler '%s' is set to %s" % (attrname,attr)) + log.info("compiler '%s' is set to %s" % (attrname, attr)) try: self.get_version() except: @@ -351,16 +351,16 @@ def CCompiler_customize(self, dist, need_cxx=0): except (AttributeError, ValueError): pass - if hasattr(self,'compiler') and 'cc' in self.compiler[0]: + if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: if not self.compiler_cxx: if self.compiler[0].startswith('gcc'): a, b = 'gcc', 'g++' else: a, b = 'cc', 'c++' - self.compiler_cxx = [self.compiler[0].replace(a,b)]\ + self.compiler_cxx = [self.compiler[0].replace(a, b)]\ + self.compiler[1:] else: - if hasattr(self,'compiler'): + if hasattr(self, 'compiler'): log.warn("#### %s #######" % (self.compiler,)) log.warn('Missing compiler_cxx fix for '+self.__class__.__name__) return @@ -396,7 +396,7 @@ def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): def matcher(self, version_string): # version string may appear in the second line, so getting rid # of new lines: - version_string = version_string.replace('\n',' ') + version_string = version_string.replace('\n', ' ') pos = 0 if start: m = re.match(start, version_string) @@ -434,7 +434,7 @@ def CCompiler_get_version(self, force=False, ok_status=[0]): Version string, in the format of `distutils.version.LooseVersion`. """ - if not force and hasattr(self,'version'): + if not force and hasattr(self, 'version'): return self.version self.find_executables() try: @@ -457,7 +457,7 @@ def CCompiler_get_version(self, force=False, ok_status=[0]): version = m.group('version') return version - status, output = exec_command(version_cmd,use_tee=0) + status, output = exec_command(version_cmd, use_tee=0) version = None if status in ok_status: @@ -496,18 +496,18 @@ def CCompiler_cxx_compiler(self): replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) -compiler_class['intel'] = ('intelccompiler','IntelCCompiler', +compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', "Intel C Compiler for 32-bit applications") -compiler_class['intele'] = ('intelccompiler','IntelItaniumCCompiler', +compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', "Intel C Itanium Compiler for Itanium-based applications") -compiler_class['intelem'] = ('intelccompiler','IntelEM64TCCompiler', +compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', "Intel C Compiler for 64-bit applications") -compiler_class['pathcc'] = ('pathccompiler','PathScaleCCompiler', +compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', "PathScale Compiler for SiCortex-based applications") -ccompiler._default_compilers += (('linux.*','intel'), - ('linux.*','intele'), - ('linux.*','intelem'), - ('linux.*','pathcc')) +ccompiler._default_compilers += (('linux.*', 'intel'), + ('linux.*', 'intele'), + ('linux.*', 'intelem'), + ('linux.*', 'pathcc')) if sys.platform == 'win32': compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py index 94a7185bb..76a260072 100644 --- a/numpy/distutils/command/__init__.py +++ b/numpy/distutils/command/__init__.py @@ -24,7 +24,7 @@ distutils_all = [ #'build_py', 'bdist_wininst', ] -__import__('distutils.command',globals(),locals(),distutils_all) +__import__('distutils.command', globals(), locals(), distutils_all) __all__ = ['build', 'config_compiler', diff --git a/numpy/distutils/command/bdist_rpm.py b/numpy/distutils/command/bdist_rpm.py index e24072ff4..3e52a503b 100644 --- a/numpy/distutils/command/bdist_rpm.py +++ b/numpy/distutils/command/bdist_rpm.py @@ -19,6 +19,6 @@ class bdist_rpm(old_bdist_rpm): return spec_file new_spec_file = [] for line in spec_file: - line = line.replace('setup.py',setup_py) + line = line.replace('setup.py', setup_py) new_spec_file.append(line) return new_spec_file diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py index 0577738bf..b6912be15 100644 --- a/numpy/distutils/command/build.py +++ b/numpy/distutils/command/build.py @@ -19,7 +19,7 @@ class build(old_build): ] help_options = old_build.help_options + [ - ('help-fcompiler',None, "list available Fortran compilers", + ('help-fcompiler', None, "list available Fortran compilers", show_fortran_compilers), ] diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py index 8b150d800..84ca87250 100644 --- a/numpy/distutils/command/build_clib.py +++ b/numpy/distutils/command/build_clib.py @@ -42,13 +42,13 @@ class build_clib(old_build_clib): def have_f_sources(self): for (lib_name, build_info) in self.libraries: - if has_f_sources(build_info.get('sources',[])): + if has_f_sources(build_info.get('sources', [])): return True return False def have_cxx_sources(self): for (lib_name, build_info) in self.libraries: - if has_cxx_sources(build_info.get('sources',[])): + if has_cxx_sources(build_info.get('sources', [])): return True return False @@ -63,7 +63,7 @@ class build_clib(old_build_clib): self.run_command('build_src') for (lib_name, build_info) in self.libraries: - l = build_info.get('language',None) + l = build_info.get('language', None) if l and l not in languages: languages.append(l) from distutils.ccompiler import new_compiler @@ -104,7 +104,7 @@ class build_clib(old_build_clib): if self.inplace: for l in self.distribution.installed_libraries: - libname = self.compiler.library_filename(l.name) + libname = self.compiler.library_filename(l.name) source = os.path.join(self.build_clib, libname) target = os.path.join(l.target_dir, libname) self.mkpath(l.target_dir) @@ -136,7 +136,7 @@ class build_clib(old_build_clib): c_sources, cxx_sources, f_sources, fmodule_sources \ = filter_sources(sources) requiref90 = not not fmodule_sources or \ - build_info.get('language','c')=='f90' + build_info.get('language', 'c')=='f90' # save source type information so that build_ext can use it. source_languages = [] @@ -148,14 +148,14 @@ class build_clib(old_build_clib): lib_file = compiler.library_filename(lib_name, output_dir=self.build_clib) - depends = sources + build_info.get('depends',[]) + depends = sources + build_info.get('depends', []) if not (self.force or newer_group(depends, lib_file, 'newer')): log.debug("skipping '%s' library (up-to-date)", lib_name) return else: log.info("building '%s' library", lib_name) - config_fc = build_info.get('config_fc',{}) + config_fc = build_info.get('config_fc', {}) if fcompiler is not None and config_fc: log.info('using additional config_fc from setup script '\ 'for fortran compiler: %s' \ @@ -229,7 +229,7 @@ class build_clib(old_build_clib): if fcompiler.module_dir_switch is None: existing_modules = glob('*.mod') extra_postargs += fcompiler.module_options(\ - module_dirs,module_build_dir) + module_dirs, module_build_dir) if fmodule_sources: log.info("compiling Fortran 90 module sources") @@ -276,9 +276,9 @@ class build_clib(old_build_clib): debug=self.debug) # fix library dependencies - clib_libraries = build_info.get('libraries',[]) + clib_libraries = build_info.get('libraries', []) for lname, binfo in libraries: if lname in clib_libraries: - clib_libraries.extend(binfo[1].get('libraries',[])) + clib_libraries.extend(binfo[1].get('libraries', [])) if clib_libraries: build_info['libraries'] = clib_libraries diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index 97bfa1613..b48e4227a 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -37,7 +37,7 @@ class build_ext (old_build_ext): ] help_options = old_build_ext.help_options + [ - ('help-fcompiler',None, "list available Fortran compilers", + ('help-fcompiler', None, "list available Fortran compilers", show_fortran_compilers), ] @@ -99,14 +99,14 @@ class build_ext (old_build_ext): # Create mapping of libraries built by build_clib: clibs = {} if build_clib is not None: - for libname,build_info in build_clib.libraries or []: + for libname, build_info in build_clib.libraries or []: if libname in clibs and clibs[libname] != build_info: log.warn('library %r defined more than once,'\ ' overwriting build_info\n%s... \nwith\n%s...' \ % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) clibs[libname] = build_info # .. and distribution libraries: - for libname,build_info in self.distribution.libraries or []: + for libname, build_info in self.distribution.libraries or []: if libname in clibs: # build_clib libraries have a precedence before distribution ones continue @@ -123,13 +123,13 @@ class build_ext (old_build_ext): for libname in ext.libraries: if libname in clibs: binfo = clibs[libname] - c_libs += binfo.get('libraries',[]) - c_lib_dirs += binfo.get('library_dirs',[]) - for m in binfo.get('macros',[]): + c_libs += binfo.get('libraries', []) + c_lib_dirs += binfo.get('library_dirs', []) + for m in binfo.get('macros', []): if m not in macros: macros.append(m) - for l in clibs.get(libname,{}).get('source_languages',[]): + for l in clibs.get(libname, {}).get('source_languages', []): ext_languages.add(l) if c_libs: new_c_libs = ext.libraries + c_libs @@ -161,7 +161,7 @@ class build_ext (old_build_ext): ext_language = 'c' # default if l and l != ext_language and ext.language: log.warn('resetting extension %r language from %r to %r.' % - (ext.name,l,ext_language)) + (ext.name, l, ext_language)) ext.language = ext_language # global language all_languages.update(ext_languages) @@ -177,7 +177,7 @@ class build_ext (old_build_ext): dry_run=self.dry_run, force=self.force) compiler = self._cxx_compiler - compiler.customize(self.distribution,need_cxx=need_cxx_compiler) + compiler.customize(self.distribution, need_cxx=need_cxx_compiler) compiler.customize_cmd(self) compiler.show_customization() self._cxx_compiler = compiler.cxx_compiler() @@ -297,8 +297,8 @@ class build_ext (old_build_ext): else: # in case ext.language is c++, for instance fcompiler = self._f90_compiler or self._f77_compiler if fcompiler is not None: - fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(ext,'extra_f77_compile_args') else [] - fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(ext,'extra_f90_compile_args') else [] + fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(ext, 'extra_f77_compile_args') else [] + fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(ext, 'extra_f90_compile_args') else [] cxx_compiler = self._cxx_compiler # check for the availability of required compilers @@ -308,7 +308,7 @@ class build_ext (old_build_ext): if (f_sources or fmodule_sources) and fcompiler is None: raise DistutilsError("extension %r has Fortran sources " \ "but no Fortran compiler found" % (ext.name)) - if ext.language in ['f77','f90'] and fcompiler is None: + if ext.language in ['f77', 'f90'] and fcompiler is None: self.warn("extension %r has Fortran libraries " \ "but no Fortran linker found, using default linker" % (ext.name)) if ext.language=='c++' and cxx_compiler is None: @@ -347,14 +347,14 @@ class build_ext (old_build_ext): log.info("compiling Fortran 90 module sources") module_dirs = ext.module_dirs[:] module_build_dir = os.path.join( - self.build_temp,os.path.dirname( + self.build_temp, os.path.dirname( self.get_ext_filename(fullname))) self.mkpath(module_build_dir) if fcompiler.module_dir_switch is None: existing_modules = glob('*.mod') extra_postargs += fcompiler.module_options( - module_dirs,module_build_dir) + module_dirs, module_build_dir) f_objects += fcompiler.compile(fmodule_sources, output_dir=self.build_temp, macros=macros, @@ -402,7 +402,7 @@ class build_ext (old_build_ext): # not using fcompiler linker self._libs_with_msvc_and_fortran(fcompiler, libraries, library_dirs) - elif ext.language in ['f77','f90'] and fcompiler is not None: + elif ext.language in ['f77', 'f90'] and fcompiler is not None: linker = fcompiler.link_shared_object if ext.language=='c++' and cxx_compiler is not None: linker = cxx_compiler.link_shared_object @@ -437,7 +437,7 @@ class build_ext (old_build_ext): if libname.startswith('msvc'): continue fileexists = False for libdir in c_library_dirs or []: - libfile = os.path.join(libdir,'%s.lib' % (libname)) + libfile = os.path.join(libdir, '%s.lib' % (libname)) if os.path.isfile(libfile): fileexists = True break @@ -445,7 +445,7 @@ class build_ext (old_build_ext): # make g77-compiled static libs available to MSVC fileexists = False for libdir in c_library_dirs: - libfile = os.path.join(libdir,'lib%s.a' % (libname)) + libfile = os.path.join(libdir, 'lib%s.a' % (libname)) if os.path.isfile(libfile): # copy libname.a file to name.lib so that MSVC linker # can find it @@ -465,7 +465,7 @@ class build_ext (old_build_ext): # correct path when compiling in Cygwin but with normal Win # Python if dir.startswith('/usr/lib'): - s,o = exec_command(['cygpath', '-w', dir], use_tee=False) + s, o = exec_command(['cygpath', '-w', dir], use_tee=False) if not s: dir = o f_lib_dirs.append(dir) diff --git a/numpy/distutils/command/build_py.py b/numpy/distutils/command/build_py.py index c25fa227d..54dcde435 100644 --- a/numpy/distutils/command/build_py.py +++ b/numpy/distutils/command/build_py.py @@ -16,7 +16,7 @@ class build_py(old_build_py): # Find build_src generated *.py files. build_src = self.get_finalized_command('build_src') - modules += build_src.py_modules_dict.get(package,[]) + modules += build_src.py_modules_dict.get(package, []) return modules diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py index 2a33e5175..7463a0e17 100644 --- a/numpy/distutils/command/build_src.py +++ b/numpy/distutils/command/build_src.py @@ -66,7 +66,7 @@ class build_src(build_ext.build_ext): "directory alongside your pure Python modules"), ] - boolean_options = ['force','inplace'] + boolean_options = ['force', 'inplace'] help_options = [] @@ -136,14 +136,14 @@ class build_src(build_ext.build_ext): self.inplace = build_ext.inplace if self.swig_cpp is None: self.swig_cpp = build_ext.swig_cpp - for c in ['swig','swig_opt']: - o = '--'+c.replace('_','-') - v = getattr(build_ext,c,None) + for c in ['swig', 'swig_opt']: + o = '--'+c.replace('_', '-') + v = getattr(build_ext, c, None) if v: - if getattr(self,c): + if getattr(self, c): log.warn('both build_src and build_ext define %s option' % (o)) else: - log.info('using "%s=%s" option from build_ext command' % (o,v)) + log.info('using "%s=%s" option from build_ext command' % (o, v)) setattr(self, c, v) def run(self): @@ -179,14 +179,14 @@ class build_src(build_ext.build_ext): from numpy.distutils.misc_util import get_data_files new_data_files = [] for data in self.data_files: - if isinstance(data,str): + if isinstance(data, str): new_data_files.append(data) - elif isinstance(data,tuple): - d,files = data + elif isinstance(data, tuple): + d, files = data if self.inplace: build_dir = self.get_package_dir('.'.join(d.split(os.sep))) else: - build_dir = os.path.join(self.build_src,d) + build_dir = os.path.join(self.build_src, d) funcs = [f for f in files if hasattr(f, '__call__')] files = [f for f in files if not hasattr(f, '__call__')] for f in funcs: @@ -195,13 +195,13 @@ class build_src(build_ext.build_ext): else: s = f() if s is not None: - if isinstance(s,list): + if isinstance(s, list): files.extend(s) - elif isinstance(s,str): + elif isinstance(s, str): files.append(s) else: raise TypeError(repr(s)) - filenames = get_data_files((d,files)) + filenames = get_data_files((d, files)) new_data_files.append((d, filenames)) else: raise TypeError(repr(data)) @@ -289,7 +289,7 @@ class build_src(build_ext.build_ext): self.py_modules[:] = new_py_modules def build_library_sources(self, lib_name, build_info): - sources = list(build_info.get('sources',[])) + sources = list(build_info.get('sources', [])) if not sources: return @@ -396,10 +396,10 @@ class build_src(build_ext.build_ext): return new_sources def filter_py_files(self, sources): - return self.filter_files(sources,['.py']) + return self.filter_files(sources, ['.py']) def filter_h_files(self, sources): - return self.filter_files(sources,['.h','.hpp','.inc']) + return self.filter_files(sources, ['.h', '.hpp', '.inc']) def filter_files(self, sources, exts = []): new_sources = [] @@ -428,7 +428,7 @@ class build_src(build_ext.build_ext): else: target_dir = appendpath(self.build_src, os.path.dirname(base)) self.mkpath(target_dir) - target_file = os.path.join(target_dir,os.path.basename(base)) + target_file = os.path.join(target_dir, os.path.basename(base)) if (self.force or newer_group([source] + depends, target_file)): if _f_pyf_ext_match(base): log.info("from_template:> %s" % (target_file)) @@ -436,7 +436,7 @@ class build_src(build_ext.build_ext): else: log.info("conv_template:> %s" % (target_file)) outstr = process_c_file(source) - fid = open(target_file,'w') + fid = open(target_file, 'w') fid.write(outstr) fid.close() if _header_ext_match(target_file): @@ -515,20 +515,20 @@ class build_src(build_ext.build_ext): raise DistutilsSetupError('mismatch of extension names: %s ' 'provides %r but expected %r' % ( source, name, ext_name)) - target_file = os.path.join(target_dir,name+'module.c') + target_file = os.path.join(target_dir, name+'module.c') else: log.debug(' source %s does not exist: skipping f2py\'ing.' \ % (source)) name = ext_name skip_f2py = 1 - target_file = os.path.join(target_dir,name+'module.c') + target_file = os.path.join(target_dir, name+'module.c') if not os.path.isfile(target_file): log.warn(' target %s does not exist:\n '\ 'Assuming %smodule.c was generated with '\ '"build_src --inplace" command.' \ % (target_file, name)) target_dir = os.path.dirname(base) - target_file = os.path.join(target_dir,name+'module.c') + target_file = os.path.join(target_dir, name+'module.c') if not os.path.isfile(target_file): raise DistutilsSetupError("%r missing" % (target_file,)) log.info(' Yes! Using %r as up-to-date target.' \ @@ -551,9 +551,9 @@ class build_src(build_ext.build_ext): f2py_options = extension.f2py_options + self.f2py_opts if self.distribution.libraries: - for name,build_info in self.distribution.libraries: + for name, build_info in self.distribution.libraries: if name in extension.libraries: - f2py_options.extend(build_info.get('f2py_options',[])) + f2py_options.extend(build_info.get('f2py_options', [])) log.info("f2py options: %s" % (f2py_options)) @@ -566,12 +566,12 @@ class build_src(build_ext.build_ext): target_file = f2py_targets[source] target_dir = os.path.dirname(target_file) or '.' depends = [source] + extension.depends - if (self.force or newer_group(depends, target_file,'newer')) \ + if (self.force or newer_group(depends, target_file, 'newer')) \ and not skip_f2py: log.info("f2py: %s" % (source)) import numpy.f2py numpy.f2py.run_main(f2py_options - + ['--build-dir',target_dir,source]) + + ['--build-dir', target_dir, source]) else: log.debug(" skipping '%s' f2py interface (up-to-date)" % (source)) else: @@ -581,7 +581,7 @@ class build_src(build_ext.build_ext): else: name = extension.name target_dir = os.path.join(*([self.build_src]\ +name.split('.')[:-1])) - target_file = os.path.join(target_dir,ext_name + 'module.c') + target_file = os.path.join(target_dir, ext_name + 'module.c') new_sources.append(target_file) depends = f_sources + extension.depends if (self.force or newer_group(depends, target_file, 'newer')) \ @@ -590,8 +590,8 @@ class build_src(build_ext.build_ext): self.mkpath(target_dir) import numpy.f2py numpy.f2py.run_main(f2py_options + ['--lower', - '--build-dir',target_dir]+\ - ['-m',ext_name]+f_sources) + '--build-dir', target_dir]+\ + ['-m', ext_name]+f_sources) else: log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\ % (target_file)) @@ -599,8 +599,8 @@ class build_src(build_ext.build_ext): if not os.path.isfile(target_file): raise DistutilsError("f2py target file %r not generated" % (target_file,)) - target_c = os.path.join(self.build_src,'fortranobject.c') - target_h = os.path.join(self.build_src,'fortranobject.h') + target_c = os.path.join(self.build_src, 'fortranobject.c') + target_h = os.path.join(self.build_src, 'fortranobject.h') log.info(" adding '%s' to sources." % (target_c)) new_sources.append(target_c) if self.build_src not in extension.include_dirs: @@ -611,20 +611,20 @@ class build_src(build_ext.build_ext): if not skip_f2py: import numpy.f2py d = os.path.dirname(numpy.f2py.__file__) - source_c = os.path.join(d,'src','fortranobject.c') - source_h = os.path.join(d,'src','fortranobject.h') - if newer(source_c,target_c) or newer(source_h,target_h): + source_c = os.path.join(d, 'src', 'fortranobject.c') + source_h = os.path.join(d, 'src', 'fortranobject.h') + if newer(source_c, target_c) or newer(source_h, target_h): self.mkpath(os.path.dirname(target_c)) - self.copy_file(source_c,target_c) - self.copy_file(source_h,target_h) + self.copy_file(source_c, target_c) + self.copy_file(source_h, target_h) else: if not os.path.isfile(target_c): raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,)) if not os.path.isfile(target_h): raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,)) - for name_ext in ['-f2pywrappers.f','-f2pywrappers2.f90']: - filename = os.path.join(target_dir,ext_name + name_ext) + for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']: + filename = os.path.join(target_dir, ext_name + name_ext) if os.path.isfile(filename): log.info(" adding '%s' to sources." % (filename)) f_sources.append(filename) @@ -689,7 +689,7 @@ class build_src(build_ext.build_ext): log.warn('assuming that %r has c++ swig target' % (source)) if is_cpp: target_ext = '.cpp' - target_file = os.path.join(target_dir,'%s_wrap%s' \ + target_file = os.path.join(target_dir, '%s_wrap%s' \ % (name, target_ext)) else: log.warn(' source %s does not exist: skipping swig\'ing.' \ @@ -745,17 +745,17 @@ class build_src(build_ext.build_ext): return new_sources + py_files -_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z',re.I).match -_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z',re.I).match +_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match +_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match #### SWIG related auxiliary functions #### _swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P<package>[\w_]+)".*\)|)\s*(?P<name>[\w_]+)', re.I).match -_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-',re.I).search -_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-',re.I).search +_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search +_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search def get_swig_target(source): - f = open(source,'r') + f = open(source, 'r') result = None line = f.readline() if _has_cpp_header(line): @@ -766,7 +766,7 @@ def get_swig_target(source): return result def get_swig_modulename(source): - f = open(source,'r') + f = open(source, 'r') name = None for line in f: m = _swig_module_name_match(line) @@ -776,9 +776,9 @@ def get_swig_modulename(source): f.close() return name -def _find_swig_target(target_dir,name): - for ext in ['.cpp','.c']: - target = os.path.join(target_dir,'%s_wrap%s' % (name, ext)) +def _find_swig_target(target_dir, name): + for ext in ['.cpp', '.c']: + target = os.path.join(target_dir, '%s_wrap%s' % (name, ext)) if os.path.isfile(target): break return target @@ -788,7 +788,7 @@ def _find_swig_target(target_dir,name): _f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)', re.I).match _f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'\ - '__user__[\w_]*)',re.I).match + '__user__[\w_]*)', re.I).match def get_f2py_modulename(source): name = None diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py index fd8a135fb..5d363eb4e 100644 --- a/numpy/distutils/command/config.py +++ b/numpy/distutils/command/config.py @@ -80,15 +80,15 @@ class was %s self.fcompiler.customize_cmd(self) self.fcompiler.show_customization() - def _wrap_method(self,mth,lang,args): + def _wrap_method(self, mth, lang, args): from distutils.ccompiler import CompileError from distutils.errors import DistutilsExecError save_compiler = self.compiler - if lang in ['f77','f90']: + if lang in ['f77', 'f90']: self.compiler = self.fcompiler try: ret = mth(*((self,)+args)) - except (DistutilsExecError,CompileError): + except (DistutilsExecError, CompileError): msg = str(get_exception()) self.compiler = save_compiler raise CompileError @@ -96,7 +96,7 @@ class was %s return ret def _compile (self, body, headers, include_dirs, lang): - return self._wrap_method(old_config._compile,lang, + return self._wrap_method(old_config._compile, lang, (body, headers, include_dirs, lang)) def _link (self, body, @@ -105,14 +105,14 @@ class was %s if self.compiler.compiler_type=='msvc': libraries = (libraries or [])[:] library_dirs = (library_dirs or [])[:] - if lang in ['f77','f90']: + if lang in ['f77', 'f90']: lang = 'c' # always use system linker when using MSVC compiler if self.fcompiler: for d in self.fcompiler.library_dirs or []: # correct path when compiling in Cygwin but with # normal Win Python if d.startswith('/usr/lib'): - s,o = exec_command(['cygpath', '-w', d], + s, o = exec_command(['cygpath', '-w', d], use_tee=False) if not s: d = o library_dirs.append(d) @@ -123,7 +123,7 @@ class was %s if libname.startswith('msvc'): continue fileexists = False for libdir in library_dirs or []: - libfile = os.path.join(libdir,'%s.lib' % (libname)) + libfile = os.path.join(libdir, '%s.lib' % (libname)) if os.path.isfile(libfile): fileexists = True break @@ -131,11 +131,11 @@ class was %s # make g77-compiled static libs available to MSVC fileexists = False for libdir in library_dirs: - libfile = os.path.join(libdir,'lib%s.a' % (libname)) + libfile = os.path.join(libdir, 'lib%s.a' % (libname)) if os.path.isfile(libfile): # copy libname.a file to name.lib so that MSVC linker # can find it - libfile2 = os.path.join(libdir,'%s.lib' % (libname)) + libfile2 = os.path.join(libdir, '%s.lib' % (libname)) copy_file(libfile, libfile2) self.temp_files.append(libfile2) fileexists = True @@ -145,14 +145,14 @@ class was %s % (libname, library_dirs)) elif self.compiler.compiler_type == 'mingw32': generate_manifest(self) - return self._wrap_method(old_config._link,lang, + return self._wrap_method(old_config._link, lang, (body, headers, include_dirs, libraries, library_dirs, lang)) def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): self._check_compiler() return self.try_compile( - "/* we need a dummy line to make distutils happy */", + "/* we need a dummy line to make distutils happy */", [header], include_dirs) def check_decl(self, symbol, diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py index bf776dd02..5e638fecc 100644 --- a/numpy/distutils/command/config_compiler.py +++ b/numpy/distutils/command/config_compiler.py @@ -24,24 +24,24 @@ class config_fc(Command): description = "specify Fortran 77/Fortran 90 compiler information" user_options = [ - ('fcompiler=',None,"specify Fortran compiler type"), + ('fcompiler=', None, "specify Fortran compiler type"), ('f77exec=', None, "specify F77 compiler command"), ('f90exec=', None, "specify F90 compiler command"), - ('f77flags=',None,"specify F77 compiler flags"), - ('f90flags=',None,"specify F90 compiler flags"), - ('opt=',None,"specify optimization flags"), - ('arch=',None,"specify architecture specific optimization flags"), - ('debug','g',"compile with debugging information"), - ('noopt',None,"compile without optimization"), - ('noarch',None,"compile without arch-dependent optimization"), + ('f77flags=', None, "specify F77 compiler flags"), + ('f90flags=', None, "specify F90 compiler flags"), + ('opt=', None, "specify optimization flags"), + ('arch=', None, "specify architecture specific optimization flags"), + ('debug', 'g', "compile with debugging information"), + ('noopt', None, "compile without optimization"), + ('noarch', None, "compile without arch-dependent optimization"), ] help_options = [ - ('help-fcompiler',None, "list available Fortran compilers", + ('help-fcompiler', None, "list available Fortran compilers", show_fortran_compilers), ] - boolean_options = ['debug','noopt','noarch'] + boolean_options = ['debug', 'noopt', 'noarch'] def initialize_options(self): self.fcompiler = None @@ -65,7 +65,7 @@ class config_fc(Command): for a in ['fcompiler']: l = [] for c in cmd_list: - v = getattr(c,a) + v = getattr(c, a) if v is not None: if not isinstance(v, str): v = v.compiler_type if v not in l: l.append(v) @@ -76,7 +76,7 @@ class config_fc(Command): ', using first in list as default' % (a, l)) if v1: for c in cmd_list: - if getattr(c,a) is None: setattr(c, a, v1) + if getattr(c, a) is None: setattr(c, a, v1) def run(self): # Do nothing. @@ -90,7 +90,7 @@ class config_cc(Command): description = "specify C/C++ compiler information" user_options = [ - ('compiler=',None,"specify C/C++ compiler type"), + ('compiler=', None, "specify C/C++ compiler type"), ] def initialize_options(self): @@ -106,7 +106,7 @@ class config_cc(Command): for a in ['compiler']: l = [] for c in cmd_list: - v = getattr(c,a) + v = getattr(c, a) if v is not None: if not isinstance(v, str): v = v.compiler_type if v not in l: l.append(v) @@ -117,7 +117,7 @@ class config_cc(Command): ', using first in list as default' % (a, l)) if v1: for c in cmd_list: - if getattr(c,a) is None: setattr(c, a, v1) + if getattr(c, a) is None: setattr(c, a, v1) return def run(self): diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py index 9dd8dede9..2da21542f 100644 --- a/numpy/distutils/command/install.py +++ b/numpy/distutils/command/install.py @@ -41,7 +41,7 @@ class install(old_install): # work. # caller = sys._getframe(3) - caller_module = caller.f_globals.get('__name__','') + caller_module = caller.f_globals.get('__name__', '') caller_name = caller.f_code.co_name if caller_module != 'distutils.dist' or caller_name!='run_commands': @@ -61,7 +61,7 @@ class install(old_install): # bdist_rpm fails when INSTALLED_FILES contains # paths with spaces. Such paths must be enclosed # with double-quotes. - f = open(self.record,'r') + f = open(self.record, 'r') lines = [] need_rewrite = False for l in f: diff --git a/numpy/distutils/command/install_headers.py b/numpy/distutils/command/install_headers.py index 84eb5f6da..f3f58aa28 100644 --- a/numpy/distutils/command/install_headers.py +++ b/numpy/distutils/command/install_headers.py @@ -12,7 +12,7 @@ class install_headers (old_install_headers): prefix = os.path.dirname(self.install_dir) for header in headers: - if isinstance(header,tuple): + if isinstance(header, tuple): # Kind of a hack, but I don't know where else to change this... if header[0] == 'numpy.core': header = ('numpy', header[1]) diff --git a/numpy/distutils/command/sdist.py b/numpy/distutils/command/sdist.py index 7d8cc7826..bfaab1c8f 100644 --- a/numpy/distutils/command/sdist.py +++ b/numpy/distutils/command/sdist.py @@ -22,7 +22,7 @@ class sdist(old_sdist): if dist.has_headers(): headers = [] for h in dist.headers: - if isinstance(h,str): headers.append(h) + if isinstance(h, str): headers.append(h) else: headers.append(h[1]) self.filelist.extend(headers) diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py index cb03fc7c3..a67fe4e51 100644 --- a/numpy/distutils/conv_template.py +++ b/numpy/distutils/conv_template.py @@ -124,10 +124,10 @@ def parse_structure(astr, level): start = astr.find(loopbeg, ind) if start == -1: break - start2 = astr.find("*/",start) - start2 = astr.find("\n",start2) - fini1 = astr.find(loopend,start2) - fini2 = astr.find("\n",fini1) + start2 = astr.find("*/", start) + start2 = astr.find("\n", start2) + fini1 = astr.find(loopend, start2) + fini2 = astr.find("\n", fini1) line += astr.count("\n", ind, start2+1) spanlist.append((start, start2+1, fini1, fini2+1, line)) line += astr.count("\n", start2+1, fini2) @@ -150,7 +150,7 @@ def parse_values(astr): # split at ',' and a list of values returned. astr = parenrep.sub(paren_repl, astr) # replaces occurences of xxx*3 with xxx, xxx, xxx - astr = ','.join([plainrep.sub(paren_repl,x.strip()) + astr = ','.join([plainrep.sub(paren_repl, x.strip()) for x in astr.split(',')]) return astr.split(',') @@ -188,19 +188,19 @@ def parse_loop_header(loophead) : elif nsub != size : msg = "Mismatch in number of values:\n%s = %s" % (name, vals) raise ValueError(msg) - names.append((name,vals)) + names.append((name, vals)) # Find any exclude variables excludes = [] - + for obj in exclude_re.finditer(loophead): span = obj.span() # find next newline endline = loophead.find('\n', span[1]) substr = loophead[span[1]:endline] ex_names = exclude_vars_re.findall(substr) - excludes.append(dict(ex_names)) + excludes.append(dict(ex_names)) # generate list of dictionaries, one for each template iteration dlist = [] @@ -208,7 +208,7 @@ def parse_loop_header(loophead) : raise ValueError("No substitution variables found") for i in range(nsub) : tmp = {} - for name,vals in names : + for name, vals in names : tmp[name] = vals[i] dlist.append(tmp) return dlist @@ -276,9 +276,9 @@ def resolve_includes(source): if m: fn = m.group('name') if not os.path.isabs(fn): - fn = os.path.join(d,fn) + fn = os.path.join(d, fn) if os.path.isfile(fn): - print('Including file',fn) + print('Including file', fn) lines.extend(resolve_includes(fn)) else: lines.append(line) @@ -289,7 +289,7 @@ def resolve_includes(source): def process_file(source): lines = resolve_includes(source) - sourcefile = os.path.normcase(source).replace("\\","\\\\") + sourcefile = os.path.normcase(source).replace("\\", "\\\\") try: code = process_str(''.join(lines)) except ValueError: @@ -323,10 +323,10 @@ if __name__ == "__main__": fid = sys.stdin outfile = sys.stdout else: - fid = open(file,'r') + fid = open(file, 'r') (base, ext) = os.path.splitext(file) newname = base - outfile = open(newname,'w') + outfile = open(newname, 'w') allstr = fid.read() try: diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py index 3b5ae5630..3f0fd464a 100644 --- a/numpy/distutils/core.py +++ b/numpy/distutils/core.py @@ -55,7 +55,7 @@ if have_setuptools: numpy_cmdclass['egg_info'] = egg_info.egg_info def _dict_append(d, **kws): - for k,v in kws.items(): + for k, v in kws.items(): if k not in d: d[k] = v continue @@ -133,13 +133,13 @@ def setup(**attr): # create setup dictionary and append to new_attr config = configuration() - if hasattr(config,'todict'): + if hasattr(config, 'todict'): config = config.todict() _dict_append(new_attr, **config) # Move extension source libraries to libraries libraries = [] - for ext in new_attr.get('ext_modules',[]): + for ext in new_attr.get('ext_modules', []): new_libraries = [] for item in ext.libraries: if is_sequence(item): @@ -207,4 +207,4 @@ def _check_append_ext_library(libraries, lib_name, build_info): warnings.warn("[4] libraries list contains %r with" " no build_info" % (lib_name,)) break - libraries.append((lib_name,build_info)) + libraries.append((lib_name, build_info)) diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py index 64ad055d3..020f2c02f 100644 --- a/numpy/distutils/cpuinfo.py +++ b/numpy/distutils/cpuinfo.py @@ -72,16 +72,16 @@ class CPUInfoBase(object): the availability of various CPU features. """ - def _try_call(self,func): + def _try_call(self, func): try: return func() except: pass - def __getattr__(self,name): + def __getattr__(self, name): if not name.startswith('_'): - if hasattr(self,'_'+name): - attr = getattr(self,'_'+name) + if hasattr(self, '_'+name): + attr = getattr(self, '_'+name) if isinstance(attr, types.MethodType): return lambda func=self._try_call,attr=attr : func(attr) else: @@ -144,10 +144,10 @@ class LinuxCPUInfo(CPUInfoBase): return self._is_AMD() and self.info[0]['model'] == '3' def _is_AthlonK6(self): - return re.match(r'.*?AMD-K6',self.info[0]['model name']) is not None + return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None def _is_AthlonK7(self): - return re.match(r'.*?AMD-K7',self.info[0]['model name']) is not None + return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None def _is_AthlonMP(self): return re.match(r'.*?Athlon\(tm\) MP\b', @@ -246,7 +246,7 @@ class LinuxCPUInfo(CPUInfoBase): and (self.info[0]['cpu family'] == '6' \ or self.info[0]['cpu family'] == '15' ) \ and (self.has_sse3() and not self.has_ssse3())\ - and re.match(r'.*?\blm\b',self.info[0]['flags']) is not None + and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None def _is_Core2(self): return self.is_64bit() and self.is_Intel() and \ @@ -259,7 +259,7 @@ class LinuxCPUInfo(CPUInfoBase): def _is_XEON(self): return re.match(r'.*?XEON\b', - self.info[0]['model name'],re.IGNORECASE) is not None + self.info[0]['model name'], re.IGNORECASE) is not None _is_Xeon = _is_XEON @@ -278,25 +278,25 @@ class LinuxCPUInfo(CPUInfoBase): return self.info[0]['f00f_bug']=='yes' def _has_mmx(self): - return re.match(r'.*?\bmmx\b',self.info[0]['flags']) is not None + return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None def _has_sse(self): - return re.match(r'.*?\bsse\b',self.info[0]['flags']) is not None + return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None def _has_sse2(self): - return re.match(r'.*?\bsse2\b',self.info[0]['flags']) is not None + return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None def _has_sse3(self): - return re.match(r'.*?\bpni\b',self.info[0]['flags']) is not None + return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None def _has_ssse3(self): - return re.match(r'.*?\bssse3\b',self.info[0]['flags']) is not None + return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None def _has_3dnow(self): - return re.match(r'.*?\b3dnow\b',self.info[0]['flags']) is not None + return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None def _has_3dnowext(self): - return re.match(r'.*?\b3dnowext\b',self.info[0]['flags']) is not None + return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None class IRIXCPUInfo(CPUInfoBase): info = None @@ -305,7 +305,7 @@ class IRIXCPUInfo(CPUInfoBase): if self.info is not None: return info = key_value_from_command('sysconf', sep=' ', - successful_status=(0,1)) + successful_status=(0, 1)) self.__class__.info = info def _not_impl(self): pass @@ -316,7 +316,7 @@ class IRIXCPUInfo(CPUInfoBase): def _getNCPUs(self): return int(self.info.get('NUM_PROCESSORS', 1)) - def __cputype(self,n): + def __cputype(self, n): return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) def _is_r2000(self): return self.__cputype(2000) def _is_r3000(self): return self.__cputype(3000) @@ -337,7 +337,7 @@ class IRIXCPUInfo(CPUInfoBase): def get_ip(self): try: return self.info.get('MACHINE') except: pass - def __machine(self,n): + def __machine(self, n): return self.info.get('MACHINE').lower() == 'ip%s' % (n) def _is_IP19(self): return self.__machine(19) def _is_IP20(self): return self.__machine(20) @@ -380,7 +380,7 @@ class DarwinCPUInfo(CPUInfoBase): def _is_ppc(self): return self.info['arch']=='ppc' - def __machine(self,n): + def __machine(self, n): return self.info['machine'] == 'ppc%s'%n def _is_ppc601(self): return self.__machine(601) def _is_ppc602(self): return self.__machine(602) @@ -439,35 +439,35 @@ class SunOSCPUInfo(CPUInfoBase): return self.info['arch']=='sun4' def _is_SUNW(self): - return re.match(r'SUNW',self.info['uname_i']) is not None + return re.match(r'SUNW', self.info['uname_i']) is not None def _is_sparcstation5(self): - return re.match(r'.*SPARCstation-5',self.info['uname_i']) is not None + return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None def _is_ultra1(self): - return re.match(r'.*Ultra-1',self.info['uname_i']) is not None + return re.match(r'.*Ultra-1', self.info['uname_i']) is not None def _is_ultra250(self): - return re.match(r'.*Ultra-250',self.info['uname_i']) is not None + return re.match(r'.*Ultra-250', self.info['uname_i']) is not None def _is_ultra2(self): - return re.match(r'.*Ultra-2',self.info['uname_i']) is not None + return re.match(r'.*Ultra-2', self.info['uname_i']) is not None def _is_ultra30(self): - return re.match(r'.*Ultra-30',self.info['uname_i']) is not None + return re.match(r'.*Ultra-30', self.info['uname_i']) is not None def _is_ultra4(self): - return re.match(r'.*Ultra-4',self.info['uname_i']) is not None + return re.match(r'.*Ultra-4', self.info['uname_i']) is not None def _is_ultra5_10(self): - return re.match(r'.*Ultra-5_10',self.info['uname_i']) is not None + return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None def _is_ultra5(self): - return re.match(r'.*Ultra-5',self.info['uname_i']) is not None + return re.match(r'.*Ultra-5', self.info['uname_i']) is not None def _is_ultra60(self): - return re.match(r'.*Ultra-60',self.info['uname_i']) is not None + return re.match(r'.*Ultra-60', self.info['uname_i']) is not None def _is_ultra80(self): - return re.match(r'.*Ultra-80',self.info['uname_i']) is not None + return re.match(r'.*Ultra-80', self.info['uname_i']) is not None def _is_ultraenterprice(self): - return re.match(r'.*Ultra-Enterprise',self.info['uname_i']) is not None + return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None def _is_ultraenterprice10k(self): - return re.match(r'.*Ultra-Enterprise-10000',self.info['uname_i']) is not None + return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None def _is_sunfire(self): - return re.match(r'.*Sun-Fire',self.info['uname_i']) is not None + return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None def _is_ultra(self): - return re.match(r'.*Ultra',self.info['uname_i']) is not None + return re.match(r'.*Ultra', self.info['uname_i']) is not None def _is_cpusparcv7(self): return self.info['processor']=='sparcv7' @@ -496,22 +496,22 @@ class Win32CPUInfo(CPUInfoBase): import _winreg as winreg prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"\ - "\s+stepping\s+(?P<STP>\d+)",re.IGNORECASE) + "\s+stepping\s+(?P<STP>\d+)", re.IGNORECASE) chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) pnum=0 while True: try: - proc=winreg.EnumKey(chnd,pnum) + proc=winreg.EnumKey(chnd, pnum) except winreg.error: break else: pnum+=1 info.append({"Processor":proc}) - phnd=winreg.OpenKey(chnd,proc) + phnd=winreg.OpenKey(chnd, proc) pidx=0 while True: try: - name,value,vtpe=winreg.EnumValue(phnd,pidx) + name, value, vtpe=winreg.EnumValue(phnd, pidx) except winreg.error: break else: @@ -524,7 +524,7 @@ class Win32CPUInfo(CPUInfoBase): info[-1]["Model"]=int(srch.group("MDL")) info[-1]["Stepping"]=int(srch.group("STP")) except: - print(sys.exc_info()[1],'(ignoring)') + print(sys.exc_info()[1], '(ignoring)') self.__class__.info = info def _not_impl(self): pass @@ -542,11 +542,11 @@ class Win32CPUInfo(CPUInfoBase): def _is_AMDK5(self): return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [0,1,2,3] + and self.info[0]['Model'] in [0, 1, 2, 3] def _is_AMDK6(self): return self.is_AMD() and self.info[0]['Family']==5 \ - and self.info[0]['Model'] in [6,7] + and self.info[0]['Model'] in [6, 7] def _is_AMDK6_2(self): return self.is_AMD() and self.info[0]['Family']==5 \ @@ -596,11 +596,11 @@ class Win32CPUInfo(CPUInfoBase): def _is_PentiumII(self): return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [3,5,6] + and self.info[0]['Model'] in [3, 5, 6] def _is_PentiumIII(self): return self.is_Intel() and self.info[0]['Family']==6 \ - and self.info[0]['Model'] in [7,8,9,10,11] + and self.info[0]['Model'] in [7, 8, 9, 10, 11] def _is_PentiumIV(self): return self.is_Intel() and self.info[0]['Family']==15 @@ -624,20 +624,20 @@ class Win32CPUInfo(CPUInfoBase): def _has_mmx(self): if self.is_Intel(): return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ - or (self.info[0]['Family'] in [6,15]) + or (self.info[0]['Family'] in [6, 15]) elif self.is_AMD(): - return self.info[0]['Family'] in [5,6,15] + return self.info[0]['Family'] in [5, 6, 15] else: return False def _has_sse(self): if self.is_Intel(): return (self.info[0]['Family']==6 and \ - self.info[0]['Model'] in [7,8,9,10,11]) \ + self.info[0]['Model'] in [7, 8, 9, 10, 11]) \ or self.info[0]['Family']==15 elif self.is_AMD(): return (self.info[0]['Family']==6 and \ - self.info[0]['Model'] in [6,7,8,10]) \ + self.info[0]['Model'] in [6, 7, 8, 10]) \ or self.info[0]['Family']==15 else: return False @@ -652,10 +652,10 @@ class Win32CPUInfo(CPUInfoBase): return False def _has_3dnow(self): - return self.is_AMD() and self.info[0]['Family'] in [5,6,15] + return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] def _has_3dnowext(self): - return self.is_AMD() and self.info[0]['Family'] in [6,15] + return self.is_AMD() and self.info[0]['Family'] in [6, 15] if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) cpuinfo = LinuxCPUInfo diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py index 0af60ec34..648a98efb 100644 --- a/numpy/distutils/exec_command.py +++ b/numpy/distutils/exec_command.py @@ -46,7 +46,7 @@ Known bugs: """ from __future__ import division, absolute_import, print_function -__all__ = ['exec_command','find_executable'] +__all__ = ['exec_command', 'find_executable'] import os import sys @@ -65,10 +65,10 @@ def temp_file_name(): def get_pythonexe(): pythonexe = sys.executable - if os.name in ['nt','dos']: - fdir,fn = os.path.split(pythonexe) - fn = fn.upper().replace('PYTHONW','PYTHON') - pythonexe = os.path.join(fdir,fn) + if os.name in ['nt', 'dos']: + fdir, fn = os.path.split(pythonexe) + fn = fn.upper().replace('PYTHONW', 'PYTHON') + pythonexe = os.path.join(fdir, fn) assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) return pythonexe @@ -92,7 +92,7 @@ def find_executable(exe, path=None, _cache={}): orig_exe = exe if path is None: - path = os.environ.get('PATH',os.defpath) + path = os.environ.get('PATH', os.defpath) if os.name=='posix': realpath = os.path.realpath else: @@ -102,9 +102,9 @@ def find_executable(exe, path=None, _cache={}): exe = exe[1:-1] suffixes = [''] - if os.name in ['nt','dos','os2']: - fn,ext = os.path.splitext(exe) - extra_suffixes = ['.exe','.com','.bat'] + if os.name in ['nt', 'dos', 'os2']: + fn, ext = os.path.splitext(exe) + extra_suffixes = ['.exe', '.com', '.bat'] if ext.lower() not in extra_suffixes: suffixes = extra_suffixes @@ -138,7 +138,7 @@ def _preserve_environment( names ): def _update_environment( **env ): log.debug('_update_environment(...)') - for name,value in env.items(): + for name, value in env.items(): os.environ[name] = value or '' def _supports_fileno(stream): @@ -248,11 +248,11 @@ def _exec_command_posix( command, if use_tee == 2: filter = r'| tr -cd "\n" | tr "\n" "."; echo' command_posix = '( %s ; echo $? > %s ) 2>&1 | tee %s %s'\ - % (command_str,stsfile,tmpfile,filter) + % (command_str, stsfile, tmpfile, filter) else: stsfile = temp_file_name() command_posix = '( %s ; echo $? > %s ) > %s 2>&1'\ - % (command_str,stsfile,tmpfile) + % (command_str, stsfile, tmpfile) #command_posix = '( %s ) > %s 2>&1' % (command_str,tmpfile) log.debug('Running os.system(%r)' % (command_posix)) @@ -265,13 +265,13 @@ def _exec_command_posix( command, return _exec_command(command, use_shell=use_shell, **env) if stsfile is not None: - f = open_latin1(stsfile,'r') + f = open_latin1(stsfile, 'r') status_text = f.read() status = int(status_text) f.close() os.remove(stsfile) - f = open_latin1(tmpfile,'r') + f = open_latin1(tmpfile, 'r') text = f.read() f.close() os.remove(tmpfile) @@ -291,7 +291,7 @@ def _exec_command_python(command, stsfile = temp_file_name() outfile = temp_file_name() - f = open(cmdfile,'w') + f = open(cmdfile, 'w') f.write('import os\n') f.write('import sys\n') f.write('sys.path.insert(0,%r)\n' % (exec_command_dir)) @@ -310,12 +310,12 @@ def _exec_command_python(command, raise RuntimeError("%r failed" % (cmd,)) os.remove(cmdfile) - f = open_latin1(stsfile,'r') + f = open_latin1(stsfile, 'r') status = int(f.read()) f.close() os.remove(stsfile) - f = open_latin1(outfile,'r') + f = open_latin1(outfile, 'r') text = f.read() f.close() os.remove(outfile) @@ -338,11 +338,11 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ): if use_shell: # We use shell (unless use_shell==0) so that wildcards can be # used. - sh = os.environ.get('SHELL','/bin/sh') + sh = os.environ.get('SHELL', '/bin/sh') if is_sequence(command): - argv = [sh,'-c',' '.join(list(command))] + argv = [sh, '-c', ' '.join(list(command))] else: - argv = [sh,'-c',command] + argv = [sh, '-c', command] else: # On NT, DOS we avoid using command.com as it's exit status is # not related to the exit status of a command. @@ -351,16 +351,16 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ): else: argv = shlex.split(command) - if hasattr(os,'spawnvpe'): + if hasattr(os, 'spawnvpe'): spawn_command = os.spawnvpe else: spawn_command = os.spawnve argv[0] = find_executable(argv[0]) or argv[0] if not os.path.isfile(argv[0]): log.warn('Executable %s does not exist' % (argv[0])) - if os.name in ['nt','dos']: + if os.name in ['nt', 'dos']: # argv[0] might be internal command - argv = [os.environ['COMSPEC'],'/C'] + argv + argv = [os.environ['COMSPEC'], '/C'] + argv using_command = 1 _so_has_fileno = _supports_fileno(sys.stdout) @@ -375,13 +375,13 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ): se_dup = os.dup(se_fileno) outfile = temp_file_name() - fout = open(outfile,'w') + fout = open(outfile, 'w') if using_command: errfile = temp_file_name() - ferr = open(errfile,'w') + ferr = open(errfile, 'w') log.debug('Running %s(%s,%r,%r,os.environ)' \ - % (spawn_command.__name__,os.P_WAIT,argv[0],argv)) + % (spawn_command.__name__, os.P_WAIT, argv[0], argv)) argv0 = argv[0] if not using_command: @@ -390,38 +390,38 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ): so_flush() se_flush() if _so_has_fileno: - os.dup2(fout.fileno(),so_fileno) + os.dup2(fout.fileno(), so_fileno) if _se_has_fileno: if using_command: #XXX: disabled for now as it does not work from cmd under win32. # Tests fail on msys - os.dup2(ferr.fileno(),se_fileno) + os.dup2(ferr.fileno(), se_fileno) else: - os.dup2(fout.fileno(),se_fileno) + os.dup2(fout.fileno(), se_fileno) try: - status = spawn_command(os.P_WAIT,argv0,argv,os.environ) + status = spawn_command(os.P_WAIT, argv0, argv, os.environ) except OSError: errmess = str(get_exception()) status = 999 - sys.stderr.write('%s: %s'%(errmess,argv[0])) + sys.stderr.write('%s: %s'%(errmess, argv[0])) so_flush() se_flush() if _so_has_fileno: - os.dup2(so_dup,so_fileno) + os.dup2(so_dup, so_fileno) if _se_has_fileno: - os.dup2(se_dup,se_fileno) + os.dup2(se_dup, se_fileno) fout.close() - fout = open_latin1(outfile,'r') + fout = open_latin1(outfile, 'r') text = fout.read() fout.close() os.remove(outfile) if using_command: ferr.close() - ferr = open_latin1(errfile,'r') + ferr = open_latin1(errfile, 'r') errmess = ferr.read() ferr.close() os.remove(errfile) @@ -453,120 +453,120 @@ def test_nt(**kws): if using_cygwin_echo: log.warn('Using cygwin echo in win32 environment is not supported') - s,o=exec_command(pythonexe\ + s, o=exec_command(pythonexe\ +' -c "import os;print os.environ.get(\'AAA\',\'\')"') - assert s==0 and o=='',(s,o) + assert s==0 and o=='', (s, o) - s,o=exec_command(pythonexe\ + s, o=exec_command(pythonexe\ +' -c "import os;print os.environ.get(\'AAA\')"', AAA='Tere') - assert s==0 and o=='Tere',(s,o) + assert s==0 and o=='Tere', (s, o) os.environ['BBB'] = 'Hi' - s,o=exec_command(pythonexe\ + s, o=exec_command(pythonexe\ +' -c "import os;print os.environ.get(\'BBB\',\'\')"') - assert s==0 and o=='Hi',(s,o) + assert s==0 and o=='Hi', (s, o) - s,o=exec_command(pythonexe\ + s, o=exec_command(pythonexe\ +' -c "import os;print os.environ.get(\'BBB\',\'\')"', BBB='Hey') - assert s==0 and o=='Hey',(s,o) + assert s==0 and o=='Hey', (s, o) - s,o=exec_command(pythonexe\ + s, o=exec_command(pythonexe\ +' -c "import os;print os.environ.get(\'BBB\',\'\')"') - assert s==0 and o=='Hi',(s,o) + assert s==0 and o=='Hi', (s, o) elif 0: - s,o=exec_command('echo Hello') - assert s==0 and o=='Hello',(s,o) + s, o=exec_command('echo Hello') + assert s==0 and o=='Hello', (s, o) - s,o=exec_command('echo a%AAA%') - assert s==0 and o=='a',(s,o) + s, o=exec_command('echo a%AAA%') + assert s==0 and o=='a', (s, o) - s,o=exec_command('echo a%AAA%',AAA='Tere') - assert s==0 and o=='aTere',(s,o) + s, o=exec_command('echo a%AAA%', AAA='Tere') + assert s==0 and o=='aTere', (s, o) os.environ['BBB'] = 'Hi' - s,o=exec_command('echo a%BBB%') - assert s==0 and o=='aHi',(s,o) + s, o=exec_command('echo a%BBB%') + assert s==0 and o=='aHi', (s, o) - s,o=exec_command('echo a%BBB%',BBB='Hey') - assert s==0 and o=='aHey', (s,o) - s,o=exec_command('echo a%BBB%') - assert s==0 and o=='aHi',(s,o) + s, o=exec_command('echo a%BBB%', BBB='Hey') + assert s==0 and o=='aHey', (s, o) + s, o=exec_command('echo a%BBB%') + assert s==0 and o=='aHi', (s, o) - s,o=exec_command('this_is_not_a_command') - assert s and o!='',(s,o) + s, o=exec_command('this_is_not_a_command') + assert s and o!='', (s, o) - s,o=exec_command('type not_existing_file') - assert s and o!='',(s,o) + s, o=exec_command('type not_existing_file') + assert s and o!='', (s, o) - s,o=exec_command('echo path=%path%') - assert s==0 and o!='',(s,o) + s, o=exec_command('echo path=%path%') + assert s==0 and o!='', (s, o) - s,o=exec_command('%s -c "import sys;sys.stderr.write(sys.platform)"' \ + s, o=exec_command('%s -c "import sys;sys.stderr.write(sys.platform)"' \ % pythonexe) - assert s==0 and o=='win32',(s,o) + assert s==0 and o=='win32', (s, o) - s,o=exec_command('%s -c "raise \'Ignore me.\'"' % pythonexe) - assert s==1 and o,(s,o) + s, o=exec_command('%s -c "raise \'Ignore me.\'"' % pythonexe) + assert s==1 and o, (s, o) - s,o=exec_command('%s -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"'\ + s, o=exec_command('%s -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"'\ % pythonexe) - assert s==0 and o=='012',(s,o) + assert s==0 and o=='012', (s, o) - s,o=exec_command('%s -c "import sys;sys.exit(15)"' % pythonexe) - assert s==15 and o=='',(s,o) + s, o=exec_command('%s -c "import sys;sys.exit(15)"' % pythonexe) + assert s==15 and o=='', (s, o) - s,o=exec_command('%s -c "print \'Heipa\'"' % pythonexe) - assert s==0 and o=='Heipa',(s,o) + s, o=exec_command('%s -c "print \'Heipa\'"' % pythonexe) + assert s==0 and o=='Heipa', (s, o) print ('ok') def test_posix(**kws): - s,o=exec_command("echo Hello",**kws) - assert s==0 and o=='Hello',(s,o) + s, o=exec_command("echo Hello",**kws) + assert s==0 and o=='Hello', (s, o) - s,o=exec_command('echo $AAA',**kws) - assert s==0 and o=='',(s,o) + s, o=exec_command('echo $AAA',**kws) + assert s==0 and o=='', (s, o) - s,o=exec_command('echo "$AAA"',AAA='Tere',**kws) - assert s==0 and o=='Tere',(s,o) + s, o=exec_command('echo "$AAA"',AAA='Tere',**kws) + assert s==0 and o=='Tere', (s, o) - s,o=exec_command('echo "$AAA"',**kws) - assert s==0 and o=='',(s,o) + s, o=exec_command('echo "$AAA"',**kws) + assert s==0 and o=='', (s, o) os.environ['BBB'] = 'Hi' - s,o=exec_command('echo "$BBB"',**kws) - assert s==0 and o=='Hi',(s,o) + s, o=exec_command('echo "$BBB"',**kws) + assert s==0 and o=='Hi', (s, o) - s,o=exec_command('echo "$BBB"',BBB='Hey',**kws) - assert s==0 and o=='Hey',(s,o) + s, o=exec_command('echo "$BBB"',BBB='Hey',**kws) + assert s==0 and o=='Hey', (s, o) - s,o=exec_command('echo "$BBB"',**kws) - assert s==0 and o=='Hi',(s,o) + s, o=exec_command('echo "$BBB"',**kws) + assert s==0 and o=='Hi', (s, o) - s,o=exec_command('this_is_not_a_command',**kws) - assert s!=0 and o!='',(s,o) + s, o=exec_command('this_is_not_a_command',**kws) + assert s!=0 and o!='', (s, o) - s,o=exec_command('echo path=$PATH',**kws) - assert s==0 and o!='',(s,o) + s, o=exec_command('echo path=$PATH',**kws) + assert s==0 and o!='', (s, o) - s,o=exec_command('python -c "import sys,os;sys.stderr.write(os.name)"',**kws) - assert s==0 and o=='posix',(s,o) + s, o=exec_command('python -c "import sys,os;sys.stderr.write(os.name)"',**kws) + assert s==0 and o=='posix', (s, o) - s,o=exec_command('python -c "raise \'Ignore me.\'"',**kws) - assert s==1 and o,(s,o) + s, o=exec_command('python -c "raise \'Ignore me.\'"',**kws) + assert s==1 and o, (s, o) - s,o=exec_command('python -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"',**kws) - assert s==0 and o=='012',(s,o) + s, o=exec_command('python -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"',**kws) + assert s==0 and o=='012', (s, o) - s,o=exec_command('python -c "import sys;sys.exit(15)"',**kws) - assert s==15 and o=='',(s,o) + s, o=exec_command('python -c "import sys;sys.exit(15)"',**kws) + assert s==15 and o=='', (s, o) - s,o=exec_command('python -c "print \'Heipa\'"',**kws) - assert s==0 and o=='Heipa',(s,o) + s, o=exec_command('python -c "print \'Heipa\'"',**kws) + assert s==0 and o=='Heipa', (s, o) print ('ok') @@ -575,33 +575,33 @@ def test_execute_in(**kws): tmpfile = temp_file_name() fn = os.path.basename(tmpfile) tmpdir = os.path.dirname(tmpfile) - f = open(tmpfile,'w') + f = open(tmpfile, 'w') f.write('Hello') f.close() - s,o = exec_command('%s -c "print \'Ignore the following IOError:\','\ - 'open(%r,\'r\')"' % (pythonexe,fn),**kws) - assert s and o!='',(s,o) - s,o = exec_command('%s -c "print open(%r,\'r\').read()"' % (pythonexe,fn), + s, o = exec_command('%s -c "print \'Ignore the following IOError:\','\ + 'open(%r,\'r\')"' % (pythonexe, fn),**kws) + assert s and o!='', (s, o) + s, o = exec_command('%s -c "print open(%r,\'r\').read()"' % (pythonexe, fn), execute_in = tmpdir,**kws) - assert s==0 and o=='Hello',(s,o) + assert s==0 and o=='Hello', (s, o) os.remove(tmpfile) print ('ok') def test_svn(**kws): - s,o = exec_command(['svn','status'],**kws) - assert s,(s,o) + s, o = exec_command(['svn', 'status'],**kws) + assert s, (s, o) print ('svn ok') def test_cl(**kws): if os.name=='nt': - s,o = exec_command(['cl','/V'],**kws) - assert s,(s,o) + s, o = exec_command(['cl', '/V'],**kws) + assert s, (s, o) print ('cl ok') if os.name=='posix': test = test_posix -elif os.name in ['nt','dos']: +elif os.name in ['nt', 'dos']: test = test_nt else: raise NotImplementedError('exec_command tests for ', os.name) diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py index 7fc6e1ae7..344c66da0 100644 --- a/numpy/distutils/extension.py +++ b/numpy/distutils/extension.py @@ -16,8 +16,8 @@ if sys.version_info[0] >= 3: basestring = str -cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match -fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z',re.I).match +cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match +fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match class Extension(old_Extension): def __init__ (self, name, sources, @@ -39,7 +39,7 @@ class Extension(old_Extension): extra_f77_compile_args=None, extra_f90_compile_args=None, ): - old_Extension.__init__(self,name, [], + old_Extension.__init__(self, name, [], include_dirs, define_macros, undef_macros, @@ -72,7 +72,7 @@ class Extension(old_Extension): self.module_dirs = module_dirs or [] self.extra_f77_compile_args = extra_f77_compile_args or [] self.extra_f90_compile_args = extra_f90_compile_args or [] - + return def has_cxx_sources(self): diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py index 37852b0fb..d00228dcd 100644 --- a/numpy/distutils/fcompiler/__init__.py +++ b/numpy/distutils/fcompiler/__init__.py @@ -15,7 +15,7 @@ But note that FCompiler.executables is actually a dictionary of commands. """ from __future__ import division, absolute_import, print_function -__all__ = ['FCompiler','new_fcompiler','show_fcompilers', +__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', 'dummy_fortran_file'] import os @@ -144,16 +144,16 @@ class FCompiler(CCompiler): ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist), ) - language_map = {'.f':'f77', - '.for':'f77', - '.F':'f77', # XXX: needs preprocessor - '.ftn':'f77', - '.f77':'f77', - '.f90':'f90', - '.F90':'f90', # XXX: needs preprocessor - '.f95':'f90', + language_map = {'.f': 'f77', + '.for': 'f77', + '.F': 'f77', # XXX: needs preprocessor + '.ftn': 'f77', + '.f77': 'f77', + '.f90': 'f90', + '.F90': 'f90', # XXX: needs preprocessor + '.f95': 'f90', } - language_order = ['f90','f77'] + language_order = ['f90', 'f77'] # These will be set by the subclass @@ -164,14 +164,14 @@ class FCompiler(CCompiler): possible_executables = [] executables = { - 'version_cmd' : ["f77", "-v"], - 'compiler_f77' : ["f77"], - 'compiler_f90' : ["f90"], - 'compiler_fix' : ["f90", "-fixed"], - 'linker_so' : ["f90", "-shared"], - 'linker_exe' : ["f90"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : None, + 'version_cmd': ["f77", "-v"], + 'compiler_f77': ["f77"], + 'compiler_f90': ["f90"], + 'compiler_fix': ["f90", "-fixed"], + 'linker_so': ["f90", "-shared"], + 'linker_exe': ["f90"], + 'archiver': ["ar", "-cr"], + 'ranlib': None, } # If compiler does not support compiling Fortran 90 then it can @@ -196,7 +196,7 @@ class FCompiler(CCompiler): pic_flags = [] # Flags to create position-independent code - src_extensions = ['.for','.ftn','.f77','.f','.f90','.f95','.F','.F90'] + src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90'] obj_extension = ".o" shared_lib_extension = get_shared_lib_extension() @@ -543,10 +543,10 @@ class FCompiler(CCompiler): """Print out the attributes of a compiler instance.""" props = [] for key in list(self.executables.keys()) + \ - ['version','libraries','library_dirs', - 'object_switch','compile_switch']: - if hasattr(self,key): - v = getattr(self,key) + ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch']: + if hasattr(self, key): + v = getattr(self, key) props.append((key, None, '= '+repr(v))) props.sort() @@ -572,17 +572,17 @@ class FCompiler(CCompiler): compiler = self.compiler_f90 if compiler is None: raise DistutilsExecError('f90 not supported by %s needed for %s'\ - % (self.__class__.__name__,src)) + % (self.__class__.__name__, src)) extra_compile_args = self.extra_f90_compile_args or [] else: flavor = ':fix' compiler = self.compiler_fix if compiler is None: raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ - % (self.__class__.__name__,src)) + % (self.__class__.__name__, src)) extra_compile_args = self.extra_f90_compile_args or [] if self.object_switch[-1]==' ': - o_args = [self.object_switch.strip(),obj] + o_args = [self.object_switch.strip(), obj] else: o_args = [self.object_switch.strip()+obj] @@ -593,7 +593,7 @@ class FCompiler(CCompiler): log.info('extra %s options: %r' \ % (flavor[1:], ' '.join(extra_compile_args))) - extra_flags = src_flags.get(self.compiler_type,[]) + extra_flags = src_flags.get(self.compiler_type, []) if extra_flags: log.info('using compile options from source: %r' \ % ' '.join(extra_flags)) @@ -604,7 +604,7 @@ class FCompiler(CCompiler): display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, src) try: - self.spawn(command,display=display) + self.spawn(command, display=display) except DistutilsExecError: msg = str(get_exception()) raise CompileError(msg) @@ -613,18 +613,18 @@ class FCompiler(CCompiler): options = [] if self.module_dir_switch is not None: if self.module_dir_switch[-1]==' ': - options.extend([self.module_dir_switch.strip(),module_build_dir]) + options.extend([self.module_dir_switch.strip(), module_build_dir]) else: options.append(self.module_dir_switch.strip()+module_build_dir) else: print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) - print('XXX: Fix module_dir_switch for ',self.__class__.__name__) + print('XXX: Fix module_dir_switch for ', self.__class__.__name__) if self.module_include_switch is not None: for d in [module_build_dir]+module_dirs: options.append('%s%s' % (self.module_include_switch, d)) else: print('XXX: module_dirs=%r option ignored' % (module_dirs)) - print('XXX: Fix module_include_switch for ',self.__class__.__name__) + print('XXX: Fix module_include_switch for ', self.__class__.__name__) return options def library_option(self, lib): @@ -650,7 +650,7 @@ class FCompiler(CCompiler): if self._need_link(objects, output_filename): if self.library_switch[-1]==' ': - o_args = [self.library_switch.strip(),output_filename] + o_args = [self.library_switch.strip(), output_filename] else: o_args = [self.library_switch.strip()+output_filename] @@ -705,19 +705,19 @@ class FCompiler(CCompiler): _default_compilers = ( # sys.platform mappings - ('win32', ('gnu','intelv','absoft','compaqv','intelev','gnu95','g95', + ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', 'intelvem', 'intelem')), - ('cygwin.*', ('gnu','intelv','absoft','compaqv','intelev','gnu95','g95')), - ('linux.*', ('gnu95','intel','lahey','pg','absoft','nag','vast','compaq', - 'intele','intelem','gnu','g95','pathf95')), + ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), + ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq', + 'intele', 'intelem', 'gnu', 'g95', 'pathf95')), ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), - ('sunos.*', ('sun','gnu','gnu95','g95')), - ('irix.*', ('mips','gnu','gnu95',)), - ('aix.*', ('ibm','gnu','gnu95',)), + ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), + ('irix.*', ('mips', 'gnu', 'gnu95',)), + ('aix.*', ('ibm', 'gnu', 'gnu95',)), # os.name mappings - ('posix', ('gnu','gnu95',)), - ('nt', ('gnu','gnu95',)), - ('mac', ('gnu95','gnu','pg')), + ('posix', ('gnu', 'gnu95',)), + ('nt', ('gnu', 'gnu95',)), + ('mac', ('gnu95', 'gnu', 'pg')), ) fcompiler_class = None @@ -925,18 +925,18 @@ def dummy_fortran_file(): return name[:-2] -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search -_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]',re.I).match +is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match +_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search +_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search +_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search +_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match def is_free_format(file): """Check if file is in free format Fortran.""" # f90 allows both fixed and free format, assuming fixed unless # signs of free format are detected. result = 0 - f = open_latin1(file,'r') + f = open_latin1(file, 'r') line = f.readline() n = 10000 # the number of non-comment lines to scan for hints if _has_f_header(line): @@ -956,12 +956,12 @@ def is_free_format(file): return result def has_f90_header(src): - f = open_latin1(src,'r') + f = open_latin1(src, 'r') line = f.readline() f.close() return _has_f90_header(line) or _has_fix_header(line) -_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)',re.I) +_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I) def get_f77flags(src): """ Search the first 20 lines of fortran 77 code for line pattern @@ -969,7 +969,7 @@ def get_f77flags(src): Return a dictionary {<fcompiler type>:<f77 flags>}. """ flags = {} - f = open_latin1(src,'r') + f = open_latin1(src, 'r') i = 0 for line in f: i += 1 diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py index 03430fb0e..bde0529be 100644 --- a/numpy/distutils/fcompiler/absoft.py +++ b/numpy/distutils/fcompiler/absoft.py @@ -61,12 +61,12 @@ class AbsoftFCompiler(FCompiler): elif self.get_version() >= '9.0': opt = ['-shared'] else: - opt = ["-K","shared"] + opt = ["-K", "shared"] return opt def library_dir_option(self, dir): if os.name=='nt': - return ['-link','/PATH:"%s"' % (dir)] + return ['-link', '/PATH:"%s"' % (dir)] return "-L" + dir def library_option(self, lib): @@ -97,9 +97,9 @@ class AbsoftFCompiler(FCompiler): elif self.get_version() >= '10.0': opt.extend(['af90math', 'afio', 'af77math', 'U77']) elif self.get_version() >= '8.0': - opt.extend(['f90math','fio','f77math','U77']) + opt.extend(['f90math', 'fio', 'f77math', 'U77']) else: - opt.extend(['fio','f90math','fmath','U77']) + opt.extend(['fio', 'f90math', 'fmath', 'U77']) if os.name =='nt': opt.append('COMDLG32') return opt @@ -115,11 +115,11 @@ class AbsoftFCompiler(FCompiler): def get_flags_f77(self): opt = FCompiler.get_flags_f77(self) - opt.extend(['-N22','-N90','-N110']) + opt.extend(['-N22', '-N90', '-N110']) v = self.get_version() if os.name == 'nt': if v and v>='8.0': - opt.extend(['-f','-N15']) + opt.extend(['-f', '-N15']) else: opt.append('-f') if v: @@ -133,8 +133,8 @@ class AbsoftFCompiler(FCompiler): def get_flags_f90(self): opt = FCompiler.get_flags_f90(self) - opt.extend(["-YCFRL=1","-YCOM_NAMES=LCS","-YCOM_PFX","-YEXT_PFX", - "-YCOM_SFX=_","-YEXT_SFX=_","-YEXT_NAMES=LCS"]) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) if self.get_version(): if self.get_version()>'4.6': opt.extend(["-YDEALLOC=ALL"]) @@ -142,9 +142,9 @@ class AbsoftFCompiler(FCompiler): def get_flags_fix(self): opt = FCompiler.get_flags_fix(self) - opt.extend(["-YCFRL=1","-YCOM_NAMES=LCS","-YCOM_PFX","-YEXT_PFX", - "-YCOM_SFX=_","-YEXT_SFX=_","-YEXT_NAMES=LCS"]) - opt.extend(["-f","fixed"]) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + opt.extend(["-f", "fixed"]) return opt def get_flags_opt(self): diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py index 3831f88f7..5162b168c 100644 --- a/numpy/distutils/fcompiler/compaq.py +++ b/numpy/distutils/fcompiler/compaq.py @@ -29,7 +29,7 @@ class CompaqFCompiler(FCompiler): executables = { 'version_cmd' : ['<F90>', "-version"], - 'compiler_f77' : [fc_exe, "-f77rtl","-fixed"], + 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], 'compiler_fix' : [fc_exe, "-fixed"], 'compiler_f90' : [fc_exe], 'linker_so' : ['<F90>'], @@ -41,18 +41,18 @@ class CompaqFCompiler(FCompiler): module_include_switch = '-I' def get_flags(self): - return ['-assume no2underscore','-nomixed_str_len_arg'] + return ['-assume no2underscore', '-nomixed_str_len_arg'] def get_flags_debug(self): - return ['-g','-check bounds'] + return ['-g', '-check bounds'] def get_flags_opt(self): - return ['-O4','-align dcommons','-assume bigarrays', - '-assume nozsize','-math_library fast'] + return ['-O4', '-align dcommons', '-assume bigarrays', + '-assume nozsize', '-math_library fast'] def get_flags_arch(self): return ['-arch host', '-tune host'] def get_flags_linker_so(self): if sys.platform[:5]=='linux': return ['-shared'] - return ['-shared','-Wl,-expect_unresolved,*'] + return ['-shared', '-Wl,-expect_unresolved,*'] class CompaqVisualFCompiler(FCompiler): @@ -101,7 +101,7 @@ class CompaqVisualFCompiler(FCompiler): executables = { 'version_cmd' : ['<F90>', "/what"], - 'compiler_f77' : [fc_exe, "/f77rtl","/fixed"], + 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], 'compiler_fix' : [fc_exe, "/fixed"], 'compiler_f90' : [fc_exe], 'linker_so' : ['<F90>'], @@ -110,10 +110,10 @@ class CompaqVisualFCompiler(FCompiler): } def get_flags(self): - return ['/nologo','/MD','/WX','/iface=(cref,nomixed_str_len_arg)', - '/names:lowercase','/assume:underscore'] + return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', + '/names:lowercase', '/assume:underscore'] def get_flags_opt(self): - return ['/Ox','/fast','/optimize:5','/unroll:0','/math_library:fast'] + return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] def get_flags_arch(self): return ['/threads'] def get_flags_debug(self): diff --git a/numpy/distutils/fcompiler/g95.py b/numpy/distutils/fcompiler/g95.py index 20c2c0d03..26f73b530 100644 --- a/numpy/distutils/fcompiler/g95.py +++ b/numpy/distutils/fcompiler/g95.py @@ -22,7 +22,7 @@ class G95FCompiler(FCompiler): 'compiler_f77' : ["g95", "-ffixed-form"], 'compiler_fix' : ["g95", "-ffixed-form"], 'compiler_f90' : ["g95"], - 'linker_so' : ["<F90>","-shared"], + 'linker_so' : ["<F90>", "-shared"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py index 3402319e0..5ee3df2dc 100644 --- a/numpy/distutils/fcompiler/gnu.py +++ b/numpy/distutils/fcompiler/gnu.py @@ -170,7 +170,7 @@ class GnuFCompiler(FCompiler): if d is not None: g2c = self.g2c + '-pic' f = self.static_lib_format % (g2c, self.static_lib_extension) - if not os.path.isfile(os.path.join(d,f)): + if not os.path.isfile(os.path.join(d, f)): g2c = self.g2c else: g2c = self.g2c diff --git a/numpy/distutils/fcompiler/hpux.py b/numpy/distutils/fcompiler/hpux.py index 5560e199a..9004961e1 100644 --- a/numpy/distutils/fcompiler/hpux.py +++ b/numpy/distutils/fcompiler/hpux.py @@ -31,10 +31,10 @@ class HPUXFCompiler(FCompiler): def get_library_dirs(self): opt = ['/usr/lib/hpux64'] return opt - def get_version(self, force=0, ok_status=[256,0,1]): + def get_version(self, force=0, ok_status=[256, 0, 1]): # XXX status==256 may indicate 'unrecognized option' or # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self,force,ok_status) + return FCompiler.get_version(self, force, ok_status) if __name__ == '__main__': from distutils import log diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py index e5061bd18..cc65df972 100644 --- a/numpy/distutils/fcompiler/ibm.py +++ b/numpy/distutils/fcompiler/ibm.py @@ -35,7 +35,7 @@ class IBMFCompiler(FCompiler): lslpp = find_executable('lslpp') xlf = find_executable('xlf') if os.path.exists(xlf) and os.path.exists(lslpp): - s,o = exec_command(lslpp + ' -Lc xlfcmp') + s, o = exec_command(lslpp + ' -Lc xlfcmp') m = re.search('xlfcmp:(?P<version>\d+([.]\d+)+)', o) if m: version = m.group('version') @@ -47,7 +47,7 @@ class IBMFCompiler(FCompiler): # let's try another method: l = sorted(os.listdir(xlf_dir)) l.reverse() - l = [d for d in l if os.path.isfile(os.path.join(xlf_dir,d,'xlf.cfg'))] + l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] if l: from distutils.version import LooseVersion self.version = version = LooseVersion(l[0]) @@ -65,7 +65,7 @@ class IBMFCompiler(FCompiler): opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') else: opt.append('-bshared') - version = self.get_version(ok_status=[0,40]) + version = self.get_version(ok_status=[0, 40]) if version is not None: if sys.platform.startswith('aix'): xlf_cfg = '/etc/xlf.cfg' @@ -73,7 +73,7 @@ class IBMFCompiler(FCompiler): xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version fo, new_cfg = make_temp_file(suffix='_xlf.cfg') log.info('Creating '+new_cfg) - fi = open(xlf_cfg,'r') + fi = open(xlf_cfg, 'r') crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match for line in fi: m = crt1_match(line) diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py index f6aa687a8..21a3c5eaf 100644 --- a/numpy/distutils/fcompiler/intel.py +++ b/numpy/distutils/fcompiler/intel.py @@ -137,8 +137,8 @@ class IntelVisualFCompiler(BaseIntelFCompiler): executables = { 'version_cmd' : None, - 'compiler_f77' : [None,"-FI","-w90","-w95"], - 'compiler_fix' : [None,"-FI","-4L72","-w"], + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI", "-4L72", "-w"], 'compiler_f90' : [None], 'linker_so' : ['<F90>', "-shared"], 'archiver' : [ar_exe, "/verbose", "/OUT:"], @@ -152,14 +152,14 @@ class IntelVisualFCompiler(BaseIntelFCompiler): module_include_switch = '/I' def get_flags(self): - opt = ['/nologo','/MD','/nbs','/Qlowercase','/us'] + opt = ['/nologo', '/MD', '/nbs', '/Qlowercase', '/us'] return opt def get_flags_free(self): return ["-FR"] def get_flags_debug(self): - return ['/4Yb','/d2'] + return ['/4Yb', '/d2'] def get_flags_opt(self): return ['/O2'] @@ -178,10 +178,10 @@ class IntelItaniumVisualFCompiler(IntelVisualFCompiler): executables = { 'version_cmd' : None, - 'compiler_f77' : [None,"-FI","-w90","-w95"], - 'compiler_fix' : [None,"-FI","-4L72","-w"], + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI", "-4L72", "-w"], 'compiler_f90' : [None], - 'linker_so' : ['<F90>',"-shared"], + 'linker_so' : ['<F90>', "-shared"], 'archiver' : [ar_exe, "/verbose", "/OUT:"], 'ranlib' : None } diff --git a/numpy/distutils/fcompiler/lahey.py b/numpy/distutils/fcompiler/lahey.py index afca8e422..7a33b4b63 100644 --- a/numpy/distutils/fcompiler/lahey.py +++ b/numpy/distutils/fcompiler/lahey.py @@ -17,7 +17,7 @@ class LaheyFCompiler(FCompiler): 'compiler_f77' : ["lf95", "--fix"], 'compiler_fix' : ["lf95", "--fix"], 'compiler_f90' : ["lf95"], - 'linker_so' : ["lf95","-shared"], + 'linker_so' : ["lf95", "-shared"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } @@ -28,12 +28,12 @@ class LaheyFCompiler(FCompiler): def get_flags_opt(self): return ['-O'] def get_flags_debug(self): - return ['-g','--chk','--chkglobal'] + return ['-g', '--chk', '--chkglobal'] def get_library_dirs(self): opt = [] d = os.environ.get('LAHEY') if d: - opt.append(os.path.join(d,'lib')) + opt.append(os.path.join(d, 'lib')) return opt def get_libraries(self): opt = [] diff --git a/numpy/distutils/fcompiler/mips.py b/numpy/distutils/fcompiler/mips.py index 21fa00642..6a8d23099 100644 --- a/numpy/distutils/fcompiler/mips.py +++ b/numpy/distutils/fcompiler/mips.py @@ -16,7 +16,7 @@ class MIPSFCompiler(FCompiler): 'compiler_f77' : ["f77", "-f77"], 'compiler_fix' : ["f90", "-fixedform"], 'compiler_f90' : ["f90"], - 'linker_so' : ["f90","-shared"], + 'linker_so' : ["f90", "-shared"], 'archiver' : ["ar", "-cr"], 'ranlib' : None } @@ -31,7 +31,7 @@ class MIPSFCompiler(FCompiler): def get_flags_arch(self): opt = [] for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): - if getattr(cpu,'is_IP%s'%a)(): + if getattr(cpu, 'is_IP%s'%a)(): opt.append('-TARG:platform=IP%s' % a) break return opt diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py index b02cf43a6..ae1b96faf 100644 --- a/numpy/distutils/fcompiler/nag.py +++ b/numpy/distutils/fcompiler/nag.py @@ -23,7 +23,7 @@ class NAGFCompiler(FCompiler): def get_flags_linker_so(self): if sys.platform=='darwin': - return ['-unsharedf95','-Wl,-bundle,-flat_namespace,-undefined,suppress'] + return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] return ["-Wl,-shared"] def get_flags_opt(self): return ['-O4'] @@ -34,7 +34,7 @@ class NAGFCompiler(FCompiler): else: return [''] def get_flags_debug(self): - return ['-g','-gline','-g90','-nan','-C'] + return ['-g', '-gline', '-g90', '-nan', '-C'] if __name__ == '__main__': from distutils import log diff --git a/numpy/distutils/fcompiler/none.py b/numpy/distutils/fcompiler/none.py index 039957233..6f602d734 100644 --- a/numpy/distutils/fcompiler/none.py +++ b/numpy/distutils/fcompiler/none.py @@ -9,14 +9,14 @@ class NoneFCompiler(FCompiler): compiler_type = 'none' description = 'Fake Fortran compiler' - executables = {'compiler_f77' : None, - 'compiler_f90' : None, - 'compiler_fix' : None, - 'linker_so' : None, - 'linker_exe' : None, - 'archiver' : None, - 'ranlib' : None, - 'version_cmd' : None, + executables = {'compiler_f77': None, + 'compiler_f90': None, + 'compiler_fix': None, + 'linker_so': None, + 'linker_exe': None, + 'archiver': None, + 'ranlib': None, + 'version_cmd': None, } def find_executables(self): diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py index 1cc201f22..f3f5ea22b 100644 --- a/numpy/distutils/fcompiler/pg.py +++ b/numpy/distutils/fcompiler/pg.py @@ -29,7 +29,7 @@ class PGroupFCompiler(FCompiler): 'compiler_f77' : ["pgfortran"], 'compiler_fix' : ["pgfortran", "-Mfixed"], 'compiler_f90' : ["pgfortran"], - 'linker_so' : ["pgfortran","-shared","-fpic"], + 'linker_so' : ["pgfortran", "-shared", "-fpic"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } @@ -40,7 +40,7 @@ class PGroupFCompiler(FCompiler): module_include_switch = '-I' def get_flags(self): - opt = ['-Minform=inform','-Mnosecond_underscore'] + opt = ['-Minform=inform', '-Mnosecond_underscore'] return self.pic_flags + opt def get_flags_opt(self): return ['-fast'] diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py index 07d4eeb8e..0955f14a1 100644 --- a/numpy/distutils/fcompiler/sun.py +++ b/numpy/distutils/fcompiler/sun.py @@ -19,7 +19,7 @@ class SunFCompiler(FCompiler): 'compiler_f77' : ["f90"], 'compiler_fix' : ["f90", "-fixed"], 'compiler_f90' : ["f90"], - 'linker_so' : ["<F90>","-Bdynamic","-G"], + 'linker_so' : ["<F90>", "-Bdynamic", "-G"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } @@ -35,12 +35,12 @@ class SunFCompiler(FCompiler): ret.append("-fixed") return ret def get_opt(self): - return ['-fast','-dalign'] + return ['-fast', '-dalign'] def get_arch(self): return ['-xtarget=generic'] def get_libraries(self): opt = [] - opt.extend(['fsu','sunmath','mvec']) + opt.extend(['fsu', 'sunmath', 'mvec']) return opt if __name__ == '__main__': diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py index 9052cf74e..d10b50218 100644 --- a/numpy/distutils/from_template.py +++ b/numpy/distutils/from_template.py @@ -47,15 +47,15 @@ process_file(filename) """ from __future__ import division, absolute_import, print_function -__all__ = ['process_str','process_file'] +__all__ = ['process_str', 'process_file'] import os import sys import re -routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b',re.I) -routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)',re.I) -function_start_re = re.compile(r'\n (\$|\*)\s*function\b',re.I) +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) def parse_structure(astr): """ Return a list of tuples for each function or subroutine each @@ -66,22 +66,22 @@ def parse_structure(astr): spanlist = [] ind = 0 while True: - m = routine_start_re.search(astr,ind) + m = routine_start_re.search(astr, ind) if m is None: break start = m.start() - if function_start_re.match(astr,start,m.end()): + if function_start_re.match(astr, start, m.end()): while True: - i = astr.rfind('\n',ind,start) + i = astr.rfind('\n', ind, start) if i==-1: break start = i if astr[i:i+7]!='\n $': break start += 1 - m = routine_end_re.search(astr,m.end()) + m = routine_end_re.search(astr, m.end()) ind = end = m and m.end()-1 or len(astr) - spanlist.append((start,end)) + spanlist.append((start, end)) return spanlist template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") @@ -93,7 +93,7 @@ def find_repl_patterns(astr): names = {} for rep in reps: name = rep[0].strip() or unique_key(names) - repl = rep[1].replace('\,','@comma@') + repl = rep[1].replace('\,', '@comma@') thelist = conv(repl) names[name] = thelist return names @@ -124,14 +124,14 @@ def unique_key(adict): template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') -def expand_sub(substr,names): - substr = substr.replace('\>','@rightarrow@') - substr = substr.replace('\<','@leftarrow@') +def expand_sub(substr, names): + substr = substr.replace('\>', '@rightarrow@') + substr = substr.replace('\<', '@leftarrow@') lnames = find_repl_patterns(substr) - substr = named_re.sub(r"<\1>",substr) # get rid of definition templates + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates def listrepl(mobj): - thelist = conv(mobj.group(1).replace('\,','@comma@')) + thelist = conv(mobj.group(1).replace('\,', '@comma@')) if template_name_re.match(thelist): return "<%s>" % (thelist) name = None @@ -151,12 +151,12 @@ def expand_sub(substr,names): rules = {} for r in template_re.findall(substr): if r not in rules: - thelist = lnames.get(r,names.get(r,None)) + thelist = lnames.get(r, names.get(r, None)) if thelist is None: raise ValueError('No replicates found for <%s>' % (r)) if r not in names and not thelist.startswith('_'): names[r] = thelist - rule = [i.replace('@comma@',',') for i in thelist.split(',')] + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] num = len(rule) if numsubs is None: @@ -168,20 +168,20 @@ def expand_sub(substr,names): else: print("Mismatch in number of replacements (base <%s=%s>)" " for <%s=%s>. Ignoring." % - (base_rule, ','.join(rules[base_rule]), r,thelist)) + (base_rule, ','.join(rules[base_rule]), r, thelist)) if not rules: return substr def namerepl(mobj): name = mobj.group(1) - return rules.get(name,(k+1)*[name])[k] + return rules.get(name, (k+1)*[name])[k] newstr = '' for k in range(numsubs): newstr += template_re.sub(namerepl, substr) + '\n\n' - newstr = newstr.replace('@rightarrow@','>') - newstr = newstr.replace('@leftarrow@','<') + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') return newstr def process_str(allstr): @@ -196,13 +196,13 @@ def process_str(allstr): for sub in struct: writestr += newstr[oldend:sub[0]] names.update(find_repl_patterns(newstr[oldend:sub[0]])) - writestr += expand_sub(newstr[sub[0]:sub[1]],names) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) oldend = sub[1] writestr += newstr[oldend:] return writestr -include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]",re.I) +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]", re.I) def resolve_includes(source): d = os.path.dirname(source) @@ -213,7 +213,7 @@ def resolve_includes(source): if m: fn = m.group('name') if not os.path.isabs(fn): - fn = os.path.join(d,fn) + fn = os.path.join(d, fn) if os.path.isfile(fn): print('Including file', fn) lines.extend(resolve_includes(fn)) @@ -246,10 +246,10 @@ if __name__ == "__main__": fid = sys.stdin outfile = sys.stdout else: - fid = open(file,'r') + fid = open(file, 'r') (base, ext) = os.path.splitext(file) newname = base - outfile = open(newname,'w') + outfile = open(newname, 'w') allstr = fid.read() writestr = process_str(allstr) diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py index 16fe3a5fb..1d8dcd9fd 100644 --- a/numpy/distutils/intelccompiler.py +++ b/numpy/distutils/intelccompiler.py @@ -10,7 +10,7 @@ class IntelCCompiler(UnixCCompiler): cc_args = 'fPIC' def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose,dry_run, force) + UnixCCompiler.__init__ (self, verbose, dry_run, force) self.cc_exe = 'icc -fPIC' compiler = self.cc_exe self.set_executables(compiler=compiler, @@ -24,7 +24,7 @@ class IntelItaniumCCompiler(IntelCCompiler): # On Itanium, the Intel Compiler used to be called ecc, let's search for # it (now it's also icc, so ecc is last in the search). - for cc_exe in map(find_executable,['icc','ecc']): + for cc_exe in map(find_executable, ['icc', 'ecc']): if cc_exe: break @@ -35,7 +35,7 @@ class IntelEM64TCCompiler(UnixCCompiler): cc_exe = 'icc -m64 -fPIC' cc_args = "-fPIC" def __init__ (self, verbose=0, dry_run=0, force=0): - UnixCCompiler.__init__ (self, verbose,dry_run, force) + UnixCCompiler.__init__ (self, verbose, dry_run, force) self.cc_exe = 'icc -m64 -fPIC' compiler = self.cc_exe self.set_executables(compiler=compiler, diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py index fe1efdca6..5ecb104ff 100644 --- a/numpy/distutils/line_endings.py +++ b/numpy/distutils/line_endings.py @@ -26,16 +26,16 @@ def dos2unix(file): else: print(file, 'ok') -def dos2unix_one_dir(modified_files,dir_name,file_names): +def dos2unix_one_dir(modified_files, dir_name, file_names): for file in file_names: - full_path = os.path.join(dir_name,file) + full_path = os.path.join(dir_name, file) file = dos2unix(full_path) if file is not None: modified_files.append(file) def dos2unix_dir(dir_name): modified_files = [] - os.path.walk(dir_name,dos2unix_one_dir,modified_files) + os.path.walk(dir_name, dos2unix_one_dir, modified_files) return modified_files #---------------------------------- @@ -60,16 +60,16 @@ def unix2dos(file): else: print(file, 'ok') -def unix2dos_one_dir(modified_files,dir_name,file_names): +def unix2dos_one_dir(modified_files, dir_name, file_names): for file in file_names: - full_path = os.path.join(dir_name,file) + full_path = os.path.join(dir_name, file) unix2dos(full_path) if file is not None: modified_files.append(file) def unix2dos_dir(dir_name): modified_files = [] - os.path.walk(dir_name,unix2dos_one_dir,modified_files) + os.path.walk(dir_name, unix2dos_one_dir, modified_files) return modified_files if __name__ == "__main__": diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py index d77f98c63..37f9fe5dd 100644 --- a/numpy/distutils/log.py +++ b/numpy/distutils/log.py @@ -16,9 +16,9 @@ else: def _fix_args(args,flag=1): if is_string(args): - return args.replace('%','%%') + return args.replace('%', '%%') if flag and is_sequence(args): - return tuple([_fix_args(a,flag=0) for a in args]) + return tuple([_fix_args(a, flag=0) for a in args]) return args @@ -78,7 +78,7 @@ def set_verbosity(v, force=False): set_threshold(INFO, force) elif v >= 2: set_threshold(DEBUG, force) - return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level,1) + return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) _global_color_map = { diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index 75687be84..c720d142a 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -54,7 +54,7 @@ class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): force=0): distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, - verbose,dry_run, force) + verbose, dry_run, force) # we need to support 3.2 which doesn't match the standard # get_versions methods regex @@ -64,7 +64,7 @@ class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): stdout=subprocess.PIPE) out_string = p.stdout.read() p.stdout.close() - result = re.search('(\d+\.\d+)',out_string) + result = re.search('(\d+\.\d+)', out_string) if result: self.gcc_version = StrictVersion(result.group(1)) @@ -215,11 +215,11 @@ class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): # added these lines to strip off windows drive letters # without it, .o files are placed next to .c files # instead of the build directory - drv,base = os.path.splitdrive(base) + drv, base = os.path.splitdrive(base) if drv: base = base[1:] - if ext not in (self.src_extensions + ['.rc','.res']): + if ext not in (self.src_extensions + ['.rc', '.res']): raise UnknownFileError( "unknown file type '%s' (from '%s')" % \ (ext, src_name)) @@ -391,7 +391,7 @@ def _build_import_library_amd64(): return def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix,'libs',def_name) + def_file = os.path.join(sys.prefix, 'libs', def_name) log.info('Building import library (arch=AMD64): "%s" (from %s)' \ % (out_file, dll_file)) @@ -405,9 +405,9 @@ def _build_import_library_x86(): """ Build the import libraries for Mingw32-gcc on Windows """ lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) - lib_file = os.path.join(sys.prefix,'libs',lib_name) + lib_file = os.path.join(sys.prefix, 'libs', lib_name) out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) - out_file = os.path.join(sys.prefix,'libs',out_name) + out_file = os.path.join(sys.prefix, 'libs', out_name) if not os.path.isfile(lib_file): log.warn('Cannot build import library: "%s" not found' % (lib_file)) return @@ -419,14 +419,14 @@ def _build_import_library_x86(): from numpy.distutils import lib2def def_name = "python%d%d.def" % tuple(sys.version_info[:2]) - def_file = os.path.join(sys.prefix,'libs',def_name) + def_file = os.path.join(sys.prefix, 'libs', def_name) nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file) nm_output = lib2def.getnm(nm_cmd) dlist, flist = lib2def.parse_nm(nm_output) lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w')) dll_name = "python%d%d.dll" % tuple(sys.version_info[:2]) - args = (dll_name,def_file,out_file) + args = (dll_name, def_file, out_file) cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args status = os.system(cmd) # for now, fail silently @@ -465,7 +465,7 @@ if sys.platform == 'win32': # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 on Windows XP: _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): - major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".",2) + major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2) _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION del major, minor, rest except ImportError: diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index bc073aee4..f77e52592 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -25,11 +25,11 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', 'dict_append', 'appendpath', 'generate_config_py', 'get_cmd', 'allpath', 'get_mathlibs', 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', - 'blue_text', 'cyan_text', 'cyg2win32','mingw32','all_strings', + 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', 'has_f_sources', 'has_cxx_sources', 'filter_sources', 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', 'get_script_files', 'get_lib_source_files', 'get_data_files', - 'dot_join', 'get_frame', 'minrelpath','njoin', + 'dot_join', 'get_frame', 'minrelpath', 'njoin', 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info'] @@ -85,7 +85,7 @@ def rel_path(path, parent_path): if apath==pd: return '' if pd == apath[:len(pd)]: - assert apath[len(pd)] in [os.sep],repr((path,apath[len(pd)])) + assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) path = apath[len(pd)+1:] return path @@ -144,19 +144,19 @@ def njoin(*path): # njoin('a', 'b') joined = os.path.join(*path) if os.path.sep != '/': - joined = joined.replace('/',os.path.sep) + joined = joined.replace('/', os.path.sep) return minrelpath(joined) def get_mathlibs(path=None): """Return the MATHLIB line from numpyconfig.h """ if path is not None: - config_file = os.path.join(path,'_numpyconfig.h') + config_file = os.path.join(path, '_numpyconfig.h') else: # Look for the file in each of the numpy include directories. dirs = get_numpy_include_dirs() for path in dirs: - fn = os.path.join(path,'_numpyconfig.h') + fn = os.path.join(path, '_numpyconfig.h') if os.path.exists(fn): config_file = fn break @@ -185,34 +185,34 @@ def minrelpath(path): l = path.split(os.sep) while l: try: - i = l.index('.',1) + i = l.index('.', 1) except ValueError: break del l[i] j = 1 while l: try: - i = l.index('..',j) + i = l.index('..', j) except ValueError: break if l[i-1]=='..': j += 1 else: - del l[i],l[i-1] + del l[i], l[i-1] j = 1 if not l: return '' return os.sep.join(l) -def _fix_paths(paths,local_path,include_non_existing): +def _fix_paths(paths, local_path, include_non_existing): assert is_sequence(paths), repr(type(paths)) new_paths = [] - assert not is_string(paths),repr(paths) + assert not is_string(paths), repr(paths) for n in paths: if is_string(n): if '*' in n or '?' in n: p = glob.glob(n) - p2 = glob.glob(njoin(local_path,n)) + p2 = glob.glob(njoin(local_path, n)) if p2: new_paths.extend(p2) elif p: @@ -221,9 +221,9 @@ def _fix_paths(paths,local_path,include_non_existing): if include_non_existing: new_paths.append(n) print('could not resolve pattern in %r: %r' % - (local_path,n)) + (local_path, n)) else: - n2 = njoin(local_path,n) + n2 = njoin(local_path, n) if os.path.exists(n2): new_paths.append(n2) else: @@ -233,10 +233,10 @@ def _fix_paths(paths,local_path,include_non_existing): new_paths.append(n) if not os.path.exists(n): print('non-existing path in %r: %r' % - (local_path,n)) + (local_path, n)) elif is_sequence(n): - new_paths.extend(_fix_paths(n,local_path,include_non_existing)) + new_paths.extend(_fix_paths(n, local_path, include_non_existing)) else: new_paths.append(n) return [minrelpath(p) for p in new_paths] @@ -246,7 +246,7 @@ def gpaths(paths, local_path='', include_non_existing=True): """ if is_string(paths): paths = (paths,) - return _fix_paths(paths,local_path, include_non_existing) + return _fix_paths(paths, local_path, include_non_existing) _temporary_directory = None @@ -285,7 +285,7 @@ def terminal_has_colors(): # curses.version is 2.2 # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) return 0 - if hasattr(sys.stdout,'isatty') and sys.stdout.isatty(): + if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): try: import curses curses.setupterm() @@ -346,9 +346,9 @@ def mingw32(): """Return true when using mingw32 environment. """ if sys.platform=='win32': - if os.environ.get('OSTYPE','')=='msys': + if os.environ.get('OSTYPE', '')=='msys': return True - if os.environ.get('MSYSTEM','')=='MINGW32': + if os.environ.get('MSYSTEM', '')=='MINGW32': return True return False @@ -357,11 +357,11 @@ def msvc_runtime_library(): msc_pos = sys.version.find('MSC v.') if msc_pos != -1: msc_ver = sys.version[msc_pos+6:msc_pos+10] - lib = {'1300' : 'msvcr70', # MSVC 7.0 - '1310' : 'msvcr71', # MSVC 7.1 - '1400' : 'msvcr80', # MSVC 8 - '1500' : 'msvcr90', # MSVC 9 (VS 2008) - '1600' : 'msvcr100', # MSVC 10 (aka 2010) + lib = {'1300': 'msvcr70', # MSVC 7.0 + '1310': 'msvcr71', # MSVC 7.1 + '1400': 'msvcr80', # MSVC 8 + '1500': 'msvcr90', # MSVC 9 (VS 2008) + '1600': 'msvcr100', # MSVC 10 (aka 2010) }.get(msc_ver, None) else: lib = None @@ -371,10 +371,10 @@ def msvc_runtime_library(): ######################### #XXX need support for .C that is also C++ -cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match -fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z',re.I).match -f90_ext_match = re.compile(r'.*[.](f90|f95)\Z',re.I).match -f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)',re.I).match +cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match +fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z', re.I).match +f90_ext_match = re.compile(r'.*[.](f90|f95)\Z', re.I).match +f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match def _get_f90_modules(source): """Return a list of Fortran f90 module names that given source file defines. @@ -382,7 +382,7 @@ def _get_f90_modules(source): if not f90_ext_match(source): return [] modules = [] - f = open(source,'r') + f = open(source, 'r') for line in f: m = f90_module_name_match(line) if m: @@ -474,7 +474,7 @@ def _get_headers(directory_list): # get *.h files from list of directories headers = [] for d in directory_list: - head = glob.glob(os.path.join(d,"*.h")) #XXX: *.hpp files?? + head = glob.glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? headers.extend(head) return headers @@ -497,7 +497,7 @@ def is_local_src_dir(directory): if not is_string(directory): return False abs_dir = os.path.abspath(directory) - c = os.path.commonprefix([os.getcwd(),abs_dir]) + c = os.path.commonprefix([os.getcwd(), abs_dir]) new_dir = abs_dir[len(c):].split(os.sep) if new_dir and not new_dir[0]: new_dir = new_dir[1:] @@ -520,7 +520,7 @@ def general_source_directories_files(top_path): """Return a directory name relative to top_path and files contained. """ - pruned_directories = ['CVS','.svn','build'] + pruned_directories = ['CVS', '.svn', 'build'] prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): pruned = [ d for d in dirnames if d not in pruned_directories ] @@ -530,13 +530,13 @@ def general_source_directories_files(top_path): rpath = rel_path(dpath, top_path) files = [] for f in os.listdir(dpath): - fn = os.path.join(dpath,f) + fn = os.path.join(dpath, f) if os.path.isfile(fn) and not prune_file_pat.search(fn): files.append(fn) yield rpath, files dpath = top_path rpath = rel_path(dpath, top_path) - filenames = [os.path.join(dpath,f) for f in os.listdir(dpath) \ + filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ if not prune_file_pat.search(f)] files = [f for f in filenames if os.path.isfile(f)] yield rpath, files @@ -561,11 +561,11 @@ def get_script_files(scripts): def get_lib_source_files(lib): filenames = [] - sources = lib[1].get('sources',[]) + sources = lib[1].get('sources', []) sources = [_m for _m in sources if is_string(_m)] filenames.extend(sources) filenames.extend(get_dependencies(sources)) - depends = lib[1].get('depends',[]) + depends = lib[1].get('depends', []) for d in depends: if is_local_src_dir(d): filenames.extend(list(general_source_files(d))) @@ -701,8 +701,8 @@ class Configuration(object): self.local_path = '' if package_path is None: package_path = self.local_path - elif os.path.isdir(njoin(self.local_path,package_path)): - package_path = njoin(self.local_path,package_path) + elif os.path.isdir(njoin(self.local_path, package_path)): + package_path = njoin(self.local_path, package_path) if not os.path.isdir(package_path or '.'): raise ValueError("%r is not a directory" % (package_path,)) self.top_path = top_path @@ -727,7 +727,7 @@ class Configuration(object): if n in known_keys: continue a = attrs[n] - setattr(self,n,a) + setattr(self, n, a) if isinstance(a, list): self.list_keys.append(n) elif isinstance(a, dict): @@ -735,7 +735,7 @@ class Configuration(object): else: self.extra_keys.append(n) - if os.path.exists(njoin(package_path,'__init__.py')): + if os.path.exists(njoin(package_path, '__init__.py')): self.packages.append(self.name) self.package_dir[self.name] = package_path @@ -747,13 +747,13 @@ class Configuration(object): ) caller_instance = None - for i in range(1,3): + for i in range(1, 3): try: f = get_frame(i) except ValueError: break try: - caller_instance = eval('self',f.f_globals,f.f_locals) + caller_instance = eval('self', f.f_globals, f.f_locals) break except NameError: pass @@ -777,7 +777,7 @@ class Configuration(object): d = {} known_keys = self.list_keys + self.dict_keys + self.extra_keys for n in known_keys: - a = getattr(self,n) + a = getattr(self, n) if a: d[n] = a return d @@ -819,7 +819,7 @@ class Configuration(object): dirs = [_m for _m in glob.glob(subpackage_path) if os.path.isdir(_m)] config_list = [] for d in dirs: - if not os.path.isfile(njoin(d,'__init__.py')): + if not os.path.isfile(njoin(d, '__init__.py')): continue if 'build' in d.split(os.sep): continue @@ -836,17 +836,17 @@ class Configuration(object): parent_name, caller_level = 1): # In case setup_py imports local modules: - sys.path.insert(0,os.path.dirname(setup_py)) + sys.path.insert(0, os.path.dirname(setup_py)) try: fo_setup_py = open(setup_py, 'U') setup_name = os.path.splitext(os.path.basename(setup_py))[0] - n = dot_join(self.name,subpackage_name,setup_name) + n = dot_join(self.name, subpackage_name, setup_name) setup_module = imp.load_module('_'.join(n.split('.')), fo_setup_py, setup_py, ('.py', 'U', 1)) fo_setup_py.close() - if not hasattr(setup_module,'configuration'): + if not hasattr(setup_module, 'configuration'): if not self.options['assume_default_configuration']: self.warn('Assuming default configuration '\ '(%s does not define configuration())'\ @@ -870,9 +870,9 @@ class Configuration(object): else: args = fix_args_py3(args) config = setup_module.configuration(*args) - if config.name!=dot_join(parent_name,subpackage_name): + if config.name!=dot_join(parent_name, subpackage_name): self.warn('Subpackage %r configuration returned as %r' % \ - (dot_join(parent_name,subpackage_name), config.name)) + (dot_join(parent_name, subpackage_name), config.name)) finally: del sys.path[0] return config @@ -907,7 +907,7 @@ class Configuration(object): return self._wildcard_get_subpackage(subpackage_name, parent_name, caller_level = caller_level+1) - assert '*' not in subpackage_name,repr((subpackage_name, subpackage_path,parent_name)) + assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) if subpackage_path is None: subpackage_path = njoin([self.local_path] + l) else: @@ -961,7 +961,7 @@ class Configuration(object): parent_name = None else: parent_name = self.name - config_list = self.get_subpackage(subpackage_name,subpackage_path, + config_list = self.get_subpackage(subpackage_name, subpackage_path, parent_name = parent_name, caller_level = 2) if not config_list: @@ -970,7 +970,7 @@ class Configuration(object): d = config if isinstance(config, Configuration): d = config.todict() - assert isinstance(d,dict),repr(type(d)) + assert isinstance(d, dict), repr(type(d)) self.info('Appending %s configuration to %s' \ % (d.get('name'), self.name)) @@ -981,7 +981,7 @@ class Configuration(object): self.warn('distutils distribution has been initialized,'\ ' it may be too late to add a subpackage '+ subpackage_name) - def add_data_dir(self,data_path): + def add_data_dir(self, data_path): """Recursively add files under data_path to data_files list. Recursively add files under data_path to the list of data_files to be @@ -1040,7 +1040,7 @@ class Configuration(object): else: d = None if is_sequence(data_path): - [self.add_data_dir((d,p)) for p in data_path] + [self.add_data_dir((d, p)) for p in data_path] return if not is_string(data_path): raise TypeError("not a string: %r" % (data_path,)) @@ -1075,19 +1075,19 @@ class Configuration(object): % (d, path)) target_list.append(path_list[i]) else: - assert s==path_list[i],repr((s,path_list[i],data_path,d,path,rpath)) + assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) target_list.append(s) i += 1 if path_list[i:]: self.warn('mismatch of pattern_list=%s and path_list=%s'\ - % (pattern_list,path_list)) + % (pattern_list, path_list)) target_list.reverse() - self.add_data_dir((os.sep.join(target_list),path)) + self.add_data_dir((os.sep.join(target_list), path)) else: for path in paths: - self.add_data_dir((d,path)) + self.add_data_dir((d, path)) return - assert not is_glob_pattern(d),repr(d) + assert not is_glob_pattern(d), repr(d) dist = self.get_distribution() if dist is not None and dist.data_files is not None: @@ -1096,18 +1096,18 @@ class Configuration(object): data_files = self.data_files for path in paths: - for d1,f in list(general_source_directories_files(path)): - target_path = os.path.join(self.path_in_package,d,d1) + for d1, f in list(general_source_directories_files(path)): + target_path = os.path.join(self.path_in_package, d, d1) data_files.append((target_path, f)) def _optimize_data_files(self): data_dict = {} - for p,files in self.data_files: + for p, files in self.data_files: if p not in data_dict: data_dict[p] = set() for f in files: data_dict[p].add(f) - self.data_files[:] = [(p,list(files)) for p,files in data_dict.items()] + self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] def add_data_files(self,*files): """Add data files to configuration data_files. @@ -1203,7 +1203,7 @@ class Configuration(object): return assert len(files)==1 if is_sequence(files[0]): - d,files = files[0] + d, files = files[0] else: d = None if is_string(files): @@ -1213,7 +1213,7 @@ class Configuration(object): filepat = files[0] else: for f in files: - self.add_data_files((d,f)) + self.add_data_files((d, f)) return else: raise TypeError(repr(type(files))) @@ -1225,7 +1225,7 @@ class Configuration(object): d = '' else: d = os.path.dirname(filepat) - self.add_data_files((d,files)) + self.add_data_files((d, files)) return paths = self.paths(filepat, include_non_existing=False) @@ -1248,9 +1248,9 @@ class Configuration(object): target_list.reverse() self.add_data_files((os.sep.join(target_list), path)) else: - self.add_data_files((d,paths)) + self.add_data_files((d, paths)) return - assert not is_glob_pattern(d),repr((d,filepat)) + assert not is_glob_pattern(d), repr((d, filepat)) dist = self.get_distribution() if dist is not None and dist.data_files is not None: @@ -1258,7 +1258,7 @@ class Configuration(object): else: data_files = self.data_files - data_files.append((os.path.join(self.path_in_package,d),paths)) + data_files.append((os.path.join(self.path_in_package, d), paths)) ### XXX Implement add_py_modules @@ -1319,11 +1319,11 @@ class Configuration(object): headers = [] for path in files: if is_string(path): - [headers.append((self.name,p)) for p in self.paths(path)] + [headers.append((self.name, p)) for p in self.paths(path)] else: if not isinstance(path, (tuple, list)) or len(path) != 2: raise TypeError(repr(path)) - [headers.append((path[0],p)) for p in self.paths(path[1])] + [headers.append((path[0], p)) for p in self.paths(path[1])] dist = self.get_distribution() if dist is not None: if dist.headers is None: @@ -1342,16 +1342,16 @@ class Configuration(object): path-names be relative to the source directory. """ - include_non_existing = kws.get('include_non_existing',True) + include_non_existing = kws.get('include_non_existing', True) return gpaths(paths, local_path = self.local_path, include_non_existing=include_non_existing) - def _fix_paths_dict(self,kw): + def _fix_paths_dict(self, kw): for k in kw.keys(): v = kw[k] - if k in ['sources','depends','include_dirs','library_dirs', - 'module_dirs','extra_objects']: + if k in ['sources', 'depends', 'include_dirs', 'library_dirs', + 'module_dirs', 'extra_objects']: new_v = self.paths(v) kw[k] = new_v @@ -1404,7 +1404,7 @@ class Configuration(object): paths. """ ext_args = copy.copy(kw) - ext_args['name'] = dot_join(self.name,name) + ext_args['name'] = dot_join(self.name, name) ext_args['sources'] = sources if 'extra_info' in ext_args: @@ -1419,26 +1419,26 @@ class Configuration(object): self._fix_paths_dict(ext_args) # Resolve out-of-tree dependencies - libraries = ext_args.get('libraries',[]) + libraries = ext_args.get('libraries', []) libnames = [] ext_args['libraries'] = [] for libname in libraries: - if isinstance(libname,tuple): + if isinstance(libname, tuple): self._fix_paths_dict(libname[1]) # Handle library names of the form libname@relative/path/to/library if '@' in libname: - lname,lpath = libname.split('@',1) - lpath = os.path.abspath(njoin(self.local_path,lpath)) + lname, lpath = libname.split('@', 1) + lpath = os.path.abspath(njoin(self.local_path, lpath)) if os.path.isdir(lpath): - c = self.get_subpackage(None,lpath, + c = self.get_subpackage(None, lpath, caller_level = 2) - if isinstance(c,Configuration): + if isinstance(c, Configuration): c = c.todict() - for l in [l[0] for l in c.get('libraries',[])]: - llname = l.split('__OF__',1)[0] + for l in [l[0] for l in c.get('libraries', [])]: + llname = l.split('__OF__', 1)[0] if llname == lname: - c.pop('name',None) + c.pop('name', None) dict_append(ext_args,**c) break continue @@ -1653,23 +1653,23 @@ class Configuration(object): def dict_append(self,**dict): for key in self.list_keys: - a = getattr(self,key) - a.extend(dict.get(key,[])) + a = getattr(self, key) + a.extend(dict.get(key, [])) for key in self.dict_keys: - a = getattr(self,key) - a.update(dict.get(key,{})) + a = getattr(self, key) + a.update(dict.get(key, {})) known_keys = self.list_keys + self.dict_keys + self.extra_keys for key in dict.keys(): if key not in known_keys: a = getattr(self, key, None) if a and a==dict[key]: continue self.warn('Inheriting attribute %r=%r from %r' \ - % (key,dict[key],dict.get('name','?'))) - setattr(self,key,dict[key]) + % (key, dict[key], dict.get('name', '?'))) + setattr(self, key, dict[key]) self.extra_keys.append(key) elif key in self.extra_keys: self.info('Ignoring attempt to set %r (from %r to %r)' \ - % (key, getattr(self,key), dict[key])) + % (key, getattr(self, key), dict[key])) elif key in known_keys: # key is already processed above pass @@ -1683,9 +1683,9 @@ class Configuration(object): s += 'Configuration of '+self.name+':\n' known_keys.sort() for k in known_keys: - a = getattr(self,k,None) + a = getattr(self, k, None) if a: - s += '%s = %s\n' % (k,pformat(a)) + s += '%s = %s\n' % (k, pformat(a)) s += 5*'-' + '>' return s @@ -1699,7 +1699,7 @@ class Configuration(object): cmd.noisy = 0 old_path = os.environ.get('PATH') if old_path: - path = os.pathsep.join(['.',old_path]) + path = os.pathsep.join(['.', old_path]) os.environ['PATH'] = path return cmd @@ -1728,7 +1728,7 @@ class Configuration(object): end ''' config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f77') + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') return flag def have_f90c(self): @@ -1747,7 +1747,7 @@ class Configuration(object): end ''' config_cmd = self.get_config_cmd() - flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f90') + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') return flag def append_to(self, extlib): @@ -1760,11 +1760,11 @@ class Configuration(object): include_dirs=self.include_dirs) else: from numpy.distutils.core import Extension - assert isinstance(extlib,Extension), repr(extlib) + assert isinstance(extlib, Extension), repr(extlib) extlib.libraries.extend(self.libraries) extlib.include_dirs.extend(self.include_dirs) - def _get_svn_revision(self,path): + def _get_svn_revision(self, path): """Return path's SVN revision number. """ revision = None @@ -1783,16 +1783,16 @@ class Configuration(object): if m: revision = int(m.group('revision')) return revision - if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK',None): - entries = njoin(path,'_svn','entries') + if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): + entries = njoin(path, '_svn', 'entries') else: - entries = njoin(path,'.svn','entries') + entries = njoin(path, '.svn', 'entries') if os.path.isfile(entries): f = open(entries) fstr = f.read() f.close() if fstr[:5] == '<?xml': # pre 1.4 - m = re.search(r'revision="(?P<revision>\d+)"',fstr) + m = re.search(r'revision="(?P<revision>\d+)"', fstr) if m: revision = int(m.group('revision')) else: # non-xml entries file --- check to be sure that @@ -1801,7 +1801,7 @@ class Configuration(object): revision = int(m.group('revision')) return revision - def _get_hg_revision(self,path): + def _get_hg_revision(self, path): """Return path's Mercurial revision number. """ revision = None @@ -1820,8 +1820,8 @@ class Configuration(object): if m: revision = int(m.group('revision')) return revision - branch_fn = njoin(path,'.hg','branch') - branch_cache_fn = njoin(path,'.hg','branch.cache') + branch_fn = njoin(path, '.hg', 'branch') + branch_cache_fn = njoin(path, '.hg', 'branch.cache') if os.path.isfile(branch_fn): branch0 = None @@ -1857,7 +1857,7 @@ class Configuration(object): __svn_version__.py for string variables version, __version\__, and <packagename>_version, until a version number is found. """ - version = getattr(self,'version',None) + version = getattr(self, 'version', None) if version is not None: return version @@ -1877,11 +1877,11 @@ class Configuration(object): else: version_vars = [version_variable] for f in files: - fn = njoin(self.local_path,f) + fn = njoin(self.local_path, f) if os.path.isfile(fn): - info = (open(fn),fn,('.py','U',1)) + info = (open(fn), fn, ('.py', 'U', 1)) name = os.path.splitext(os.path.basename(fn))[0] - n = dot_join(self.name,name) + n = dot_join(self.name, name) try: version_module = imp.load_module('_'.join(n.split('.')),*info) except ImportError: @@ -1892,7 +1892,7 @@ class Configuration(object): continue for a in version_vars: - version = getattr(version_module,a,None) + version = getattr(version_module, a, None) if version is not None: break if version is not None: @@ -1929,7 +1929,7 @@ class Configuration(object): intended for working with source directories that are in an SVN repository. """ - target = njoin(self.local_path,'__svn_version__.py') + target = njoin(self.local_path, '__svn_version__.py') revision = self._get_svn_revision(self.local_path) if os.path.isfile(target) or revision is None: return @@ -1937,8 +1937,8 @@ class Configuration(object): def generate_svn_version_py(): if not os.path.isfile(target): version = str(revision) - self.info('Creating %s (version=%r)' % (target,version)) - f = open(target,'w') + self.info('Creating %s (version=%r)' % (target, version)) + f = open(target, 'w') f.write('version = %r\n' % (version)) f.close() @@ -1971,7 +1971,7 @@ class Configuration(object): This is intended for working with source directories that are in an Mercurial repository. """ - target = njoin(self.local_path,'__hg_version__.py') + target = njoin(self.local_path, '__hg_version__.py') revision = self._get_hg_revision(self.local_path) if os.path.isfile(target) or revision is None: return @@ -1979,8 +1979,8 @@ class Configuration(object): def generate_hg_version_py(): if not os.path.isfile(target): version = str(revision) - self.info('Creating %s (version=%r)' % (target,version)) - f = open(target,'w') + self.info('Creating %s (version=%r)' % (target, version)) + f = open(target, 'w') f.write('version = %r\n' % (version)) f.close() @@ -2006,7 +2006,7 @@ class Configuration(object): package installation directory. """ - self.py_modules.append((self.name,name,generate_config_py)) + self.py_modules.append((self.name, name, generate_config_py)) def get_info(self,*names): @@ -2183,7 +2183,7 @@ def dict_append(d, **kws): for k, v in kws.items(): if k in d: ov = d[k] - if isinstance(ov,str): + if isinstance(ov, str): d[k] = v else: d[k].extend(v) diff --git a/numpy/distutils/setup.py b/numpy/distutils/setup.py index e4dbe240b..82a53bd08 100644 --- a/numpy/distutils/setup.py +++ b/numpy/distutils/setup.py @@ -3,7 +3,7 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('distutils',parent_package,top_path) + config = Configuration('distutils', parent_package, top_path) config.add_subpackage('command') config.add_subpackage('fcompiler') config.add_data_dir('tests') diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 13f52801e..8ec1b8446 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1829,7 +1829,7 @@ class agg2_info(system_info): info = {'libraries': [('agg2_src', {'sources': agg2_srcs, - 'include_dirs':[os.path.join(src_dir, 'include')], + 'include_dirs': [os.path.join(src_dir, 'include')], } )], 'include_dirs': [os.path.join(src_dir, 'include')], diff --git a/numpy/distutils/tests/f2py_ext/setup.py b/numpy/distutils/tests/f2py_ext/setup.py index 658b01bb6..bb7d4bc1c 100644 --- a/numpy/distutils/tests/f2py_ext/setup.py +++ b/numpy/distutils/tests/f2py_ext/setup.py @@ -3,8 +3,8 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_ext',parent_package,top_path) - config.add_extension('fib2', ['src/fib2.pyf','src/fib1.f']) + config = Configuration('f2py_ext', parent_package, top_path) + config.add_extension('fib2', ['src/fib2.pyf', 'src/fib1.f']) config.add_data_dir('tests') return config diff --git a/numpy/distutils/tests/f2py_ext/tests/test_fib2.py b/numpy/distutils/tests/f2py_ext/tests/test_fib2.py index bee1b9870..5252db283 100644 --- a/numpy/distutils/tests/f2py_ext/tests/test_fib2.py +++ b/numpy/distutils/tests/f2py_ext/tests/test_fib2.py @@ -7,7 +7,7 @@ from f2py_ext import fib2 class TestFib2(TestCase): def test_fib(self): - assert_array_equal(fib2.fib(6),[0,1,1,2,3,5]) + assert_array_equal(fib2.fib(6), [0, 1, 1, 2, 3, 5]) if __name__ == "__main__": run_module_suite() diff --git a/numpy/distutils/tests/f2py_f90_ext/setup.py b/numpy/distutils/tests/f2py_f90_ext/setup.py index 4b4c2390c..7cca81637 100644 --- a/numpy/distutils/tests/f2py_f90_ext/setup.py +++ b/numpy/distutils/tests/f2py_f90_ext/setup.py @@ -3,7 +3,7 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('f2py_f90_ext',parent_package,top_path) + config = Configuration('f2py_f90_ext', parent_package, top_path) config.add_extension('foo', ['src/foo_free.f90'], include_dirs=['include'], diff --git a/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py b/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py index 92e559291..9653b9023 100644 --- a/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py +++ b/numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py @@ -6,7 +6,7 @@ from f2py_f90_ext import foo class TestFoo(TestCase): def test_foo_free(self): - assert_equal(foo.foo_free.bar13(),13) + assert_equal(foo.foo_free.bar13(), 13) if __name__ == "__main__": run_module_suite() diff --git a/numpy/distutils/tests/gen_ext/setup.py b/numpy/distutils/tests/gen_ext/setup.py index ccab2a45e..de6b941e0 100644 --- a/numpy/distutils/tests/gen_ext/setup.py +++ b/numpy/distutils/tests/gen_ext/setup.py @@ -28,16 +28,16 @@ C END FILE FIB3.F def source_func(ext, build_dir): import os from distutils.dep_util import newer - target = os.path.join(build_dir,'fib3.f') + target = os.path.join(build_dir, 'fib3.f') if newer(__file__, target): - f = open(target,'w') + f = open(target, 'w') f.write(fib3_f) f.close() return [target] def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('gen_ext',parent_package,top_path) + config = Configuration('gen_ext', parent_package, top_path) config.add_extension('fib3', [source_func] ) diff --git a/numpy/distutils/tests/gen_ext/tests/test_fib3.py b/numpy/distutils/tests/gen_ext/tests/test_fib3.py index 7bab5a6be..5fd9be439 100644 --- a/numpy/distutils/tests/gen_ext/tests/test_fib3.py +++ b/numpy/distutils/tests/gen_ext/tests/test_fib3.py @@ -6,7 +6,7 @@ from gen_ext import fib3 class TestFib3(TestCase): def test_fib(self): - assert_array_equal(fib3.fib(6),[0,1,1,2,3,5]) + assert_array_equal(fib3.fib(6), [0, 1, 1, 2, 3, 5]) if __name__ == "__main__": run_module_suite() diff --git a/numpy/distutils/tests/pyrex_ext/setup.py b/numpy/distutils/tests/pyrex_ext/setup.py index ad1f5207a..819dd3154 100644 --- a/numpy/distutils/tests/pyrex_ext/setup.py +++ b/numpy/distutils/tests/pyrex_ext/setup.py @@ -3,7 +3,7 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('pyrex_ext',parent_package,top_path) + config = Configuration('pyrex_ext', parent_package, top_path) config.add_extension('primes', ['primes.pyx']) config.add_data_dir('tests') diff --git a/numpy/distutils/tests/setup.py b/numpy/distutils/tests/setup.py index 61f6ba751..135de7c47 100644 --- a/numpy/distutils/tests/setup.py +++ b/numpy/distutils/tests/setup.py @@ -3,7 +3,7 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('testnumpydistutils',parent_package,top_path) + config = Configuration('testnumpydistutils', parent_package, top_path) config.add_subpackage('pyrex_ext') config.add_subpackage('f2py_ext') #config.add_subpackage('f2py_f90_ext') diff --git a/numpy/distutils/tests/swig_ext/setup.py b/numpy/distutils/tests/swig_ext/setup.py index 7ce3061a1..f6e07303b 100644 --- a/numpy/distutils/tests/swig_ext/setup.py +++ b/numpy/distutils/tests/swig_ext/setup.py @@ -3,12 +3,12 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('swig_ext',parent_package,top_path) + config = Configuration('swig_ext', parent_package, top_path) config.add_extension('_example', - ['src/example.i','src/example.c'] + ['src/example.i', 'src/example.c'] ) config.add_extension('_example2', - ['src/zoo.i','src/zoo.cc'], + ['src/zoo.i', 'src/zoo.cc'], depends=['src/zoo.h'], include_dirs=['src'] ) diff --git a/numpy/distutils/tests/swig_ext/tests/test_example.py b/numpy/distutils/tests/swig_ext/tests/test_example.py index 3164c0831..e81f98b1d 100644 --- a/numpy/distutils/tests/swig_ext/tests/test_example.py +++ b/numpy/distutils/tests/swig_ext/tests/test_example.py @@ -6,12 +6,12 @@ from swig_ext import example class TestExample(TestCase): def test_fact(self): - assert_equal(example.fact(10),3628800) + assert_equal(example.fact(10), 3628800) def test_cvar(self): - assert_equal(example.cvar.My_variable,3.0) + assert_equal(example.cvar.My_variable, 3.0) example.cvar.My_variable = 5 - assert_equal(example.cvar.My_variable,5.0) + assert_equal(example.cvar.My_variable, 5.0) if __name__ == "__main__": diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py index 33b6b1213..fd6af638f 100644 --- a/numpy/distutils/tests/test_misc_util.py +++ b/numpy/distutils/tests/test_misc_util.py @@ -11,49 +11,49 @@ ajoin = lambda *paths: join(*((sep,)+paths)) class TestAppendpath(TestCase): def test_1(self): - assert_equal(appendpath('prefix','name'),join('prefix','name')) - assert_equal(appendpath('/prefix','name'),ajoin('prefix','name')) - assert_equal(appendpath('/prefix','/name'),ajoin('prefix','name')) - assert_equal(appendpath('prefix','/name'),join('prefix','name')) + assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) + assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) + assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) + assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) def test_2(self): - assert_equal(appendpath('prefix/sub','name'), - join('prefix','sub','name')) - assert_equal(appendpath('prefix/sub','sup/name'), - join('prefix','sub','sup','name')) - assert_equal(appendpath('/prefix/sub','/prefix/name'), - ajoin('prefix','sub','name')) + assert_equal(appendpath('prefix/sub', 'name'), + join('prefix', 'sub', 'name')) + assert_equal(appendpath('prefix/sub', 'sup/name'), + join('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub', '/prefix/name'), + ajoin('prefix', 'sub', 'name')) def test_3(self): - assert_equal(appendpath('/prefix/sub','/prefix/sup/name'), - ajoin('prefix','sub','sup','name')) - assert_equal(appendpath('/prefix/sub/sub2','/prefix/sup/sup2/name'), - ajoin('prefix','sub','sub2','sup','sup2','name')) - assert_equal(appendpath('/prefix/sub/sub2','/prefix/sub/sup/name'), - ajoin('prefix','sub','sub2','sup','name')) + assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), + ajoin('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) class TestMinrelpath(TestCase): def test_1(self): - n = lambda path: path.replace('/',sep) - assert_equal(minrelpath(n('aa/bb')),n('aa/bb')) - assert_equal(minrelpath('..'),'..') - assert_equal(minrelpath(n('aa/..')),'') - assert_equal(minrelpath(n('aa/../bb')),'bb') - assert_equal(minrelpath(n('aa/bb/..')),'aa') - assert_equal(minrelpath(n('aa/bb/../..')),'') - assert_equal(minrelpath(n('aa/bb/../cc/../dd')),n('aa/dd')) - assert_equal(minrelpath(n('.././..')),n('../..')) - assert_equal(minrelpath(n('aa/bb/.././../dd')),n('dd')) + n = lambda path: path.replace('/', sep) + assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) + assert_equal(minrelpath('..'), '..') + assert_equal(minrelpath(n('aa/..')), '') + assert_equal(minrelpath(n('aa/../bb')), 'bb') + assert_equal(minrelpath(n('aa/bb/..')), 'aa') + assert_equal(minrelpath(n('aa/bb/../..')), '') + assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) + assert_equal(minrelpath(n('.././..')), n('../..')) + assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) class TestGpaths(TestCase): def test_gpaths(self): - local_path = minrelpath(join(dirname(__file__),'..')) + local_path = minrelpath(join(dirname(__file__), '..')) ls = gpaths('command/*.py', local_path) - assert_(join(local_path,'command','build_src.py') in ls,repr(ls)) + assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) f = gpaths('system_info.py', local_path) - assert_(join(local_path,'system_info.py')==f[0],repr(f)) + assert_(join(local_path, 'system_info.py')==f[0], repr(f)) class TestSharedExtension(TestCase): diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py index 95b676ecf..955407aa0 100644 --- a/numpy/distutils/unixccompiler.py +++ b/numpy/distutils/unixccompiler.py @@ -31,7 +31,7 @@ def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts ccomp += ['-AA'] self.compiler_so = ccomp - display = '%s: %s' % (os.path.basename(self.compiler_so[0]),src) + display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) try: self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + extra_postargs, display = display) diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py index 75f0995d8..86a3984c2 100644 --- a/numpy/doc/basics.py +++ b/numpy/doc/basics.py @@ -144,4 +144,3 @@ methods arrays do. """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py index 3826a70b3..43b725b83 100644 --- a/numpy/doc/broadcasting.py +++ b/numpy/doc/broadcasting.py @@ -176,4 +176,3 @@ for illustrations of broadcasting concepts. """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/byteswapping.py b/numpy/doc/byteswapping.py index 7baa648a1..8632e9794 100644 --- a/numpy/doc/byteswapping.py +++ b/numpy/doc/byteswapping.py @@ -136,4 +136,3 @@ False """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/creation.py b/numpy/doc/creation.py index e00c26e54..7979b51aa 100644 --- a/numpy/doc/creation.py +++ b/numpy/doc/creation.py @@ -142,4 +142,3 @@ diagonal). """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py index b9053cc07..3770f5761 100644 --- a/numpy/doc/glossary.py +++ b/numpy/doc/glossary.py @@ -416,4 +416,3 @@ Glossary """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/howtofind.py b/numpy/doc/howtofind.py index 89f8ac9b9..e080d263a 100644 --- a/numpy/doc/howtofind.py +++ b/numpy/doc/howtofind.py @@ -8,4 +8,3 @@ How to find things in NumPy. """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py index 997aee1b7..0439a7a76 100644 --- a/numpy/doc/indexing.py +++ b/numpy/doc/indexing.py @@ -406,4 +406,3 @@ converted to an array as a list would be. As an example: :: """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/internals.py b/numpy/doc/internals.py index 73ec51332..6bd6b1ae9 100644 --- a/numpy/doc/internals.py +++ b/numpy/doc/internals.py @@ -161,4 +161,3 @@ it is more in line with Python semantics and the natural order of the data. """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/io.py b/numpy/doc/io.py index aeffdaca0..e45bfc9b3 100644 --- a/numpy/doc/io.py +++ b/numpy/doc/io.py @@ -8,4 +8,3 @@ Placeholder for array I/O documentation. """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/jargon.py b/numpy/doc/jargon.py index 56b25ad7a..3fcbc7d23 100644 --- a/numpy/doc/jargon.py +++ b/numpy/doc/jargon.py @@ -8,4 +8,3 @@ Placeholder for computer science, engineering and other jargon. """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/methods_vs_functions.py b/numpy/doc/methods_vs_functions.py index 89c26b841..4149000bc 100644 --- a/numpy/doc/methods_vs_functions.py +++ b/numpy/doc/methods_vs_functions.py @@ -8,4 +8,3 @@ Placeholder for Methods vs. Functions documentation. """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py index 79c16a56e..311e6e0c8 100644 --- a/numpy/doc/misc.py +++ b/numpy/doc/misc.py @@ -227,4 +227,3 @@ Interfacing to C++: """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/performance.py b/numpy/doc/performance.py index 6b0b55042..b0c158bf3 100644 --- a/numpy/doc/performance.py +++ b/numpy/doc/performance.py @@ -8,4 +8,3 @@ Placeholder for Improving Performance documentation. """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py index 304184d0d..0444bdf90 100644 --- a/numpy/doc/structured_arrays.py +++ b/numpy/doc/structured_arrays.py @@ -194,7 +194,7 @@ Notice that `x` is created with a list of tuples. :: The fields are returned in the order they are asked for.:: >>> x[['y','x']] - array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)], + array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)], dtype=[('y', '<f4'), ('x', '<f4')]) Filling structured arrays @@ -221,4 +221,3 @@ You can find some more information on recarrays and structured arrays """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py index 31bf6e211..a62fc2d6d 100644 --- a/numpy/doc/subclassing.py +++ b/numpy/doc/subclassing.py @@ -558,4 +558,3 @@ how this can work, have a look at the ``memmap`` class in """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py index 0b94e0537..0132202ad 100644 --- a/numpy/doc/ufuncs.py +++ b/numpy/doc/ufuncs.py @@ -136,4 +136,3 @@ results in an error. There are two alternatives: """ from __future__ import division, absolute_import, print_function - diff --git a/numpy/dual.py b/numpy/dual.py index 00bdcb395..1517d8421 100644 --- a/numpy/dual.py +++ b/numpy/dual.py @@ -17,9 +17,9 @@ from __future__ import division, absolute_import, print_function # otherwise. # Usage --- from numpy.dual import fft, inv -__all__ = ['fft','ifft','fftn','ifftn','fft2','ifft2', - 'norm','inv','svd','solve','det','eig','eigvals', - 'eigh','eigvalsh','lstsq', 'pinv','cholesky','i0'] +__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2', + 'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals', + 'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0'] import numpy.linalg as linpkg import numpy.fft as fftpkg diff --git a/numpy/f2py/NEWS.txt b/numpy/f2py/NEWS.txt index a4a254405..c36bd49b5 100644 --- a/numpy/f2py/NEWS.txt +++ b/numpy/f2py/NEWS.txt @@ -1,2 +1,2 @@ -Read docs/HISTORY.txt
\ No newline at end of file +Read docs/HISTORY.txt diff --git a/numpy/f2py/TODO.txt b/numpy/f2py/TODO.txt index 093f0119e..a883f75d0 100644 --- a/numpy/f2py/TODO.txt +++ b/numpy/f2py/TODO.txt @@ -26,7 +26,7 @@ subroutine fun end subroutine fun ========================================================================= -Users Guide needs major revision. +Users Guide needs major revision. [DONE] ========================================================================= diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index 26a63daa8..ccdbd4e0b 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -1,7 +1,7 @@ #!/usr/bin/env python from __future__ import division, absolute_import, print_function -__all__ = ['run_main','compile','f2py_testing'] +__all__ = ['run_main', 'compile', 'f2py_testing'] import os import sys @@ -32,13 +32,13 @@ def compile(source, else: fname = source_fn - f = open(fname,'w') + f = open(fname, 'w') f.write(source) f.close() - args = ' -c -m %s %s %s'%(modulename,fname,extra_args) - c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' %(sys.executable,args) - s,o = exec_command(c) + args = ' -c -m %s %s %s'%(modulename, fname, extra_args) + c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' %(sys.executable, args) + s, o = exec_command(c) if source_fn is None: try: os.remove(fname) except OSError: pass diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index e835090f7..2e016e186 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -37,7 +37,7 @@ wrapfuncs = 1 def outmess(t): - if options.get('verbose',1): + if options.get('verbose', 1): sys.stdout.write(t) def debugcapi(var): @@ -68,7 +68,7 @@ def isscalar(var): return not (isarray(var) or isstring(var) or isexternal(var)) def iscomplex(var): - return isscalar(var) and var.get('typespec') in ['complex','double complex'] + return isscalar(var) and var.get('typespec') in ['complex', 'double complex'] def islogical(var): return isscalar(var) and var.get('typespec')=='logical' @@ -91,7 +91,7 @@ def get_kind(var): def islong_long(var): if not isscalar(var): return 0 - if var.get('typespec') not in ['integer','logical']: + if var.get('typespec') not in ['integer', 'logical']: return 0 return get_kind(var)=='8' @@ -143,7 +143,7 @@ def islong_complex(var): return get_kind(var)=='32' def iscomplexarray(var): - return isarray(var) and var.get('typespec') in ['complex','double complex'] + return isarray(var) and var.get('typespec') in ['complex', 'double complex'] def isint1array(var): return isarray(var) and var.get('typespec')=='integer' \ @@ -216,7 +216,7 @@ def hasassumedshape(rout): if rout.get('hasassumedshape'): return True for a in rout['args']: - for d in rout['vars'].get(a,{}).get('dimension',[]): + for d in rout['vars'].get(a, {}).get('dimension', []): if d==':': rout['hasassumedshape'] = True return True @@ -332,53 +332,53 @@ def isintent_inout(var): return 'intent' in var and ('inout' in var['intent'] or 'outin' in var['intent']) and 'in' not in var['intent'] and 'hide' not in var['intent'] and 'inplace' not in var['intent'] def isintent_out(var): - return 'out' in var.get('intent',[]) + return 'out' in var.get('intent', []) def isintent_hide(var): - return ('intent' in var and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout,isintent_inplace)(var))))) + return ('intent' in var and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout, isintent_inplace)(var))))) def isintent_nothide(var): return not isintent_hide(var) def isintent_c(var): - return 'c' in var.get('intent',[]) + return 'c' in var.get('intent', []) # def isintent_f(var): # return not isintent_c(var) def isintent_cache(var): - return 'cache' in var.get('intent',[]) + return 'cache' in var.get('intent', []) def isintent_copy(var): - return 'copy' in var.get('intent',[]) + return 'copy' in var.get('intent', []) def isintent_overwrite(var): - return 'overwrite' in var.get('intent',[]) + return 'overwrite' in var.get('intent', []) def isintent_callback(var): - return 'callback' in var.get('intent',[]) + return 'callback' in var.get('intent', []) def isintent_inplace(var): - return 'inplace' in var.get('intent',[]) + return 'inplace' in var.get('intent', []) def isintent_aux(var): - return 'aux' in var.get('intent',[]) + return 'aux' in var.get('intent', []) def isintent_aligned4(var): - return 'aligned4' in var.get('intent',[]) + return 'aligned4' in var.get('intent', []) def isintent_aligned8(var): - return 'aligned8' in var.get('intent',[]) + return 'aligned8' in var.get('intent', []) def isintent_aligned16(var): - return 'aligned16' in var.get('intent',[]) - -isintent_dict = {isintent_in:'INTENT_IN',isintent_inout:'INTENT_INOUT', - isintent_out:'INTENT_OUT',isintent_hide:'INTENT_HIDE', - isintent_cache:'INTENT_CACHE', - isintent_c:'INTENT_C',isoptional:'OPTIONAL', - isintent_inplace:'INTENT_INPLACE', - isintent_aligned4:'INTENT_ALIGNED4', - isintent_aligned8:'INTENT_ALIGNED8', - isintent_aligned16:'INTENT_ALIGNED16', + return 'aligned16' in var.get('intent', []) + +isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', + isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', + isintent_cache: 'INTENT_CACHE', + isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', + isintent_inplace: 'INTENT_INPLACE', + isintent_aligned4: 'INTENT_ALIGNED4', + isintent_aligned8: 'INTENT_ALIGNED8', + isintent_aligned16: 'INTENT_ALIGNED16', } def isprivate(var): @@ -390,7 +390,7 @@ def hasinitvalue(var): def hasinitvalueasstring(var): if not hasinitvalue(var): return 0 - return var['='][0] in ['"',"'"] + return var['='][0] in ['"', "'"] def hasnote(var): return 'note' in var @@ -444,25 +444,25 @@ class F2PYError(Exception): pass class throw_error: - def __init__(self,mess): + def __init__(self, mess): self.mess = mess - def __call__(self,var): - mess = '\n\n var = %s\n Message: %s\n' % (var,self.mess) + def __call__(self, var): + mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess) raise F2PYError(mess) def l_and(*f): - l,l2='lambda v',[] + l, l2='lambda v', [] for i in range(len(f)): - l='%s,f%d=f[%d]'%(l,i,i) + l='%s,f%d=f[%d]'%(l, i, i) l2.append('f%d(v)'%(i)) - return eval('%s:%s'%(l,' and '.join(l2))) + return eval('%s:%s'%(l, ' and '.join(l2))) def l_or(*f): - l,l2='lambda v',[] + l, l2='lambda v', [] for i in range(len(f)): - l='%s,f%d=f[%d]'%(l,i,i) + l='%s,f%d=f[%d]'%(l, i, i) l2.append('f%d(v)'%(i)) - return eval('%s:%s'%(l,' or '.join(l2))) + return eval('%s:%s'%(l, ' or '.join(l2))) def l_not(f): return eval('lambda v,f=f:not f(v)') @@ -508,22 +508,22 @@ def getmultilineblock(rout,blockname,comment=1,counter=0): r = r[:-3] else: errmess("%s multiline block should end with `'''`: %s\n" \ - % (blockname,repr(r))) + % (blockname, repr(r))) return r def getcallstatement(rout): - return getmultilineblock(rout,'callstatement') + return getmultilineblock(rout, 'callstatement') def getcallprotoargument(rout,cb_map={}): - r = getmultilineblock(rout,'callprotoargument',comment=0) + r = getmultilineblock(rout, 'callprotoargument', comment=0) if r: return r if hascallstatement(rout): outmess('warning: callstatement is defined without callprotoargument\n') return from .capi_maps import getctype - arg_types,arg_types2 = [],[] - if l_and(isstringfunction,l_not(isfunction_wrap))(rout): - arg_types.extend(['char*','size_t']) + arg_types, arg_types2 = [], [] + if l_and(isstringfunction, l_not(isfunction_wrap))(rout): + arg_types.extend(['char*', 'size_t']) for n in rout['args']: var = rout['vars'][n] if isintent_callback(var): @@ -532,7 +532,7 @@ def getcallprotoargument(rout,cb_map={}): ctype = cb_map[n]+'_typedef' else: ctype = getctype(var) - if l_and(isintent_c,l_or(isscalar,iscomplex))(var): + if l_and(isintent_c, l_or(isscalar, iscomplex))(var): pass elif isstring(var): pass @@ -550,16 +550,16 @@ def getcallprotoargument(rout,cb_map={}): return proto_args def getusercode(rout): - return getmultilineblock(rout,'usercode') + return getmultilineblock(rout, 'usercode') def getusercode1(rout): - return getmultilineblock(rout,'usercode',counter=1) + return getmultilineblock(rout, 'usercode', counter=1) def getpymethoddef(rout): - return getmultilineblock(rout,'pymethoddef') + return getmultilineblock(rout, 'pymethoddef') def getargs(rout): - sortargs,args=[],[] + sortargs, args=[], [] if 'args' in rout: args=rout['args'] if 'sortvars' in rout: @@ -569,10 +569,10 @@ def getargs(rout): if a not in sortargs: sortargs.append(a) else: sortargs=rout['args'] - return args,sortargs + return args, sortargs def getargs2(rout): - sortargs,args=[],rout.get('args',[]) + sortargs, args=[], rout.get('args', []) auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])\ and a not in args] args = auxvars + args @@ -583,23 +583,23 @@ def getargs2(rout): if a not in sortargs: sortargs.append(a) else: sortargs=auxvars + rout['args'] - return args,sortargs + return args, sortargs def getrestdoc(rout): if 'f2pymultilines' not in rout: return None k = None if rout['block']=='python module': - k = rout['block'],rout['name'] - return rout['f2pymultilines'].get(k,None) + k = rout['block'], rout['name'] + return rout['f2pymultilines'].get(k, None) def gentitle(name): l=(80-len(name)-6)//2 - return '/*%s %s %s*/'%(l*'*',name,l*'*') + return '/*%s %s %s*/'%(l*'*', name, l*'*') def flatlist(l): if isinstance(l, list): - return reduce(lambda x,y,f=flatlist:x+f(y),l,[]) + return reduce(lambda x,y,f=flatlist:x+f(y), l, []) return [l] def stripcomma(s): @@ -619,15 +619,15 @@ def replace(str,d,defaultsep=''): else: sep=defaultsep if isinstance(d[k], list): - str=str.replace('#%s#'%(k),sep.join(flatlist(d[k]))) + str=str.replace('#%s#'%(k), sep.join(flatlist(d[k]))) else: - str=str.replace('#%s#'%(k),d[k]) + str=str.replace('#%s#'%(k), d[k]) return str -def dictappend(rd,ar): +def dictappend(rd, ar): if isinstance(ar, list): for a in ar: - rd=dictappend(rd,a) + rd=dictappend(rd, a) return rd for k in ar.keys(): if k[0]=='_': @@ -647,7 +647,7 @@ def dictappend(rd,ar): if k1 not in rd[k]: rd[k][k1]=ar[k][k1] else: - rd[k]=dictappend(rd[k],ar[k]) + rd[k]=dictappend(rd[k], ar[k]) else: rd[k]=ar[k] return rd @@ -656,15 +656,15 @@ def applyrules(rules,d,var={}): ret={} if isinstance(rules, list): for r in rules: - rr=applyrules(r,d,var) - ret=dictappend(ret,rr) + rr=applyrules(r, d, var) + ret=dictappend(ret, rr) if '_break' in rr: break return ret if '_check' in rules and (not rules['_check'](var)): return ret if 'need' in rules: - res = applyrules({'needs':rules['need']},d,var) + res = applyrules({'needs':rules['need']}, d, var) if 'needs' in res: cfuncs.append_needs(res['needs']) @@ -672,11 +672,11 @@ def applyrules(rules,d,var={}): if k=='separatorsfor': ret[k]=rules[k]; continue if isinstance(rules[k], str): - ret[k]=replace(rules[k],d) + ret[k]=replace(rules[k], d) elif isinstance(rules[k], list): ret[k]=[] for i in rules[k]: - ar=applyrules({k:i},d,var) + ar=applyrules({k:i}, d, var) if k in ar: ret[k].append(ar[k]) elif k[0]=='_': @@ -688,19 +688,19 @@ def applyrules(rules,d,var={}): if isinstance(rules[k][k1], list): for i in rules[k][k1]: if isinstance(i, dict): - res=applyrules({'supertext':i},d,var) + res=applyrules({'supertext':i}, d, var) if 'supertext' in res: i=res['supertext'] else: i='' - ret[k].append(replace(i,d)) + ret[k].append(replace(i, d)) else: i=rules[k][k1] if isinstance(i, dict): - res=applyrules({'supertext':i},d) + res=applyrules({'supertext':i}, d) if 'supertext' in res: i=res['supertext'] else: i='' - ret[k].append(replace(i,d)) + ret[k].append(replace(i, d)) else: errmess('applyrules: ignoring rule %s.\n'%repr(rules[k])) if isinstance(ret[k], list): diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index 41bca45d2..536a576dd 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -34,22 +34,22 @@ lcb_map={} lcb2_map={} # forced casting: mainly caused by the fact that Python or Numeric # C/APIs do not support the corresponding C types. -c2py_map={'double':'float', - 'float':'float', # forced casting - 'long_double':'float', # forced casting - 'char':'int', # forced casting - 'signed_char':'int', # forced casting - 'unsigned_char':'int', # forced casting - 'short':'int', # forced casting - 'unsigned_short':'int', # forced casting - 'int':'int', # (forced casting) - 'long':'int', - 'long_long':'long', - 'unsigned':'int', # forced casting - 'complex_float':'complex', # forced casting - 'complex_double':'complex', - 'complex_long_double':'complex', # forced casting - 'string':'string', +c2py_map={'double': 'float', + 'float': 'float', # forced casting + 'long_double': 'float', # forced casting + 'char': 'int', # forced casting + 'signed_char': 'int', # forced casting + 'unsigned_char': 'int', # forced casting + 'short': 'int', # forced casting + 'unsigned_short': 'int', # forced casting + 'int': 'int', # (forced casting) + 'long': 'int', + 'long_long': 'long', + 'unsigned': 'int', # forced casting + 'complex_float': 'complex', # forced casting + 'complex_double': 'complex', + 'complex_long_double': 'complex', # forced casting + 'string': 'string', } c2capi_map={'double':'NPY_DOUBLE', 'float':'NPY_FLOAT', @@ -71,24 +71,24 @@ c2capi_map={'double':'NPY_DOUBLE', #These new maps aren't used anyhere yet, but should be by default # unless building numeric or numarray extensions. if using_newcore: - c2capi_map={'double':'NPY_DOUBLE', - 'float':'NPY_FLOAT', - 'long_double':'NPY_LONGDOUBLE', - 'char':'NPY_BYTE', - 'unsigned_char':'NPY_UBYTE', - 'signed_char':'NPY_BYTE', - 'short':'NPY_SHORT', - 'unsigned_short':'NPY_USHORT', - 'int':'NPY_INT', - 'unsigned':'NPY_UINT', - 'long':'NPY_LONG', - 'unsigned_long':'NPY_ULONG', - 'long_long':'NPY_LONGLONG', - 'unsigned_long_long':'NPY_ULONGLONG', - 'complex_float':'NPY_CFLOAT', - 'complex_double':'NPY_CDOUBLE', - 'complex_long_double':'NPY_CDOUBLE', - 'string':'NPY_CHAR', # f2py 2e is not ready for NPY_STRING (must set itemisize etc) + c2capi_map={'double': 'NPY_DOUBLE', + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_LONGDOUBLE', + 'char': 'NPY_BYTE', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'unsigned_long': 'NPY_ULONG', + 'long_long': 'NPY_LONGLONG', + 'unsigned_long_long': 'NPY_ULONGLONG', + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', + 'string': 'NPY_CHAR', # f2py 2e is not ready for NPY_STRING (must set itemisize etc) #'string':'NPY_STRING' } @@ -175,10 +175,10 @@ if os.path.isfile('.f2py_f2cmap'): # in type specifications. try: outmess('Reading .f2py_f2cmap ...\n') - f = open('.f2py_f2cmap','r') - d = eval(f.read(),{},{}) + f = open('.f2py_f2cmap', 'r') + d = eval(f.read(), {}, {}) f.close() - for k,d1 in d.items(): + for k, d1 in d.items(): for k1 in d1.keys(): d1[k1.lower()] = d1[k1] d[k.lower()] = d[k] @@ -188,31 +188,31 @@ if os.path.isfile('.f2py_f2cmap'): for k1 in d[k].keys(): if d[k][k1] in c2py_map: if k1 in f2cmap_all[k]: - outmess("\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"%(k,k1,f2cmap_all[k][k1],d[k][k1])) + outmess("\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"%(k, k1, f2cmap_all[k][k1], d[k][k1])) f2cmap_all[k][k1] = d[k][k1] - outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k,k1,d[k][k1])) + outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, d[k][k1])) else: - errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k,k1,d[k][k1],d[k][k1],list(c2py_map.keys()))) + errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k, k1, d[k][k1], d[k][k1], list(c2py_map.keys()))) outmess('Succesfully applied user defined changes from .f2py_f2cmap\n') except Exception as msg: errmess('Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg)) -cformat_map={'double':'%g', - 'float':'%g', - 'long_double':'%Lg', - 'char':'%d', - 'signed_char':'%d', - 'unsigned_char':'%hhu', - 'short':'%hd', - 'unsigned_short':'%hu', - 'int':'%d', - 'unsigned':'%u', - 'long':'%ld', - 'unsigned_long':'%lu', - 'long_long':'%ld', - 'complex_float':'(%g,%g)', - 'complex_double':'(%g,%g)', - 'complex_long_double':'(%Lg,%Lg)', - 'string':'%s', +cformat_map={'double': '%g', + 'float': '%g', + 'long_double': '%Lg', + 'char': '%d', + 'signed_char': '%d', + 'unsigned_char': '%hhu', + 'short': '%hd', + 'unsigned_short': '%hu', + 'int': '%d', + 'unsigned': '%u', + 'long': '%ld', + 'unsigned_long': '%lu', + 'long_long': '%ld', + 'complex_float': '(%g,%g)', + 'complex_double': '(%g,%g)', + 'complex_long_double': '(%Lg,%Lg)', + 'string': '%s', } ############### Auxiliary functions @@ -241,7 +241,7 @@ def getctype(var): try: ctype=f2cmap[var['kindselector']['*']] except KeyError: - errmess('getctype: "%s %s %s" not supported.\n'%(var['typespec'],'*',var['kindselector']['*'])) + errmess('getctype: "%s %s %s" not supported.\n'%(var['typespec'], '*', var['kindselector']['*'])) elif 'kind' in var['kindselector']: if typespec+'kind' in f2cmap_all: f2cmap=f2cmap_all[typespec+'kind'] @@ -254,8 +254,8 @@ def getctype(var): ctype=f2cmap[str(var['kindselector']['kind'])] except KeyError: errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="<C typespec>")) in %s/.f2py_f2cmap file).\n'\ - %(typespec,var['kindselector']['kind'], ctype, - typespec,var['kindselector']['kind'], os.getcwd())) + %(typespec, var['kindselector']['kind'], ctype, + typespec, var['kindselector']['kind'], os.getcwd())) else: if not isexternal(var): @@ -281,7 +281,7 @@ def getstrlength(var): len=a['*'] elif 'len' in a: len=a['len'] - if re.match(r'\(\s*([*]|[:])\s*\)',len) or re.match(r'([*]|[:])',len): + if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len): #if len in ['(*)','*','(:)',':']: if isintent_hide(var): errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n'%(repr(var))) @@ -314,35 +314,35 @@ def getarrdims(a,var,verbose=0): if dim[i] in depargs: v=[dim[i]] else: for va in depargs: - if re.match(r'.*?\b%s\b.*'%va,dim[i]): + if re.match(r'.*?\b%s\b.*'%va, dim[i]): v.append(va) for va in v: if depargs.index(va)>depargs.index(a): dim[i]='*' break - ret['setdims'],i='',-1 + ret['setdims'], i='', -1 for d in dim: i=i+1 - if d not in ['*',':','(*)','(:)']: - ret['setdims']='%s#varname#_Dims[%d]=%s,'%(ret['setdims'],i,d) + if d not in ['*', ':', '(*)', '(:)']: + ret['setdims']='%s#varname#_Dims[%d]=%s,'%(ret['setdims'], i, d) if ret['setdims']: ret['setdims']=ret['setdims'][:-1] - ret['cbsetdims'],i='',-1 + ret['cbsetdims'], i='', -1 for d in var['dimension']: i=i+1 - if d not in ['*',':','(*)','(:)']: - ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'],i,d) + if d not in ['*', ':', '(*)', '(:)']: + ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, d) elif isintent_in(var): outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' \ % (d)) - ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'],i,0) + ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, 0) elif verbose : - errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(repr(a),repr(d))) + errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(repr(a), repr(d))) if ret['cbsetdims']: ret['cbsetdims']=ret['cbsetdims'][:-1] # if not isintent_c(var): # var['dimension'].reverse() return ret -def getpydocsign(a,var): +def getpydocsign(a, var): global lcb_map if isfunction(var): if 'result' in var: @@ -350,11 +350,11 @@ def getpydocsign(a,var): else: af=var['name'] if af in var['vars']: - return getpydocsign(af,var['vars'][af]) + return getpydocsign(af, var['vars'][af]) else: errmess('getctype: function %s has no return value?!\n'%af) - return '','' - sig,sigout=a,a + return '', '' + sig, sigout=a, a opt='' if isintent_in(var): opt='input' elif isintent_inout(var): opt='in/output' @@ -368,63 +368,63 @@ def getpydocsign(a,var): ctype=getctype(var) if hasinitvalue(var): - init,showinit=getinit(a,var) + init, showinit=getinit(a, var) init = ', optional\\n Default: %s' % showinit if isscalar(var): if isintent_inout(var): - sig='%s : %s rank-0 array(%s,\'%s\')%s'%(a,opt,c2py_map[ctype], + sig='%s : %s rank-0 array(%s,\'%s\')%s'%(a, opt, c2py_map[ctype], c2pycode_map[ctype], init) else: - sig='%s : %s %s%s'%(a,opt,c2py_map[ctype],init) - sigout='%s : %s'%(out_a,c2py_map[ctype]) + sig='%s : %s %s%s'%(a, opt, c2py_map[ctype], init) + sigout='%s : %s'%(out_a, c2py_map[ctype]) elif isstring(var): if isintent_inout(var): - sig='%s : %s rank-0 array(string(len=%s),\'c\')%s'%(a,opt,getstrlength(var),init) + sig='%s : %s rank-0 array(string(len=%s),\'c\')%s'%(a, opt, getstrlength(var), init) else: - sig='%s : %s string(len=%s)%s'%(a,opt,getstrlength(var),init) - sigout='%s : string(len=%s)'%(out_a,getstrlength(var)) + sig='%s : %s string(len=%s)%s'%(a, opt, getstrlength(var), init) + sigout='%s : string(len=%s)'%(out_a, getstrlength(var)) elif isarray(var): dim=var['dimension'] rank=repr(len(dim)) - sig='%s : %s rank-%s array(\'%s\') with bounds (%s)%s'%(a,opt,rank, + sig='%s : %s rank-%s array(\'%s\') with bounds (%s)%s'%(a, opt, rank, c2pycode_map[ctype], ','.join(dim), init) if a==out_a: sigout='%s : rank-%s array(\'%s\') with bounds (%s)'\ - %(a,rank,c2pycode_map[ctype],','.join(dim)) + %(a, rank, c2pycode_map[ctype], ','.join(dim)) else: sigout='%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ - %(out_a,rank,c2pycode_map[ctype],','.join(dim),a) + %(out_a, rank, c2pycode_map[ctype], ','.join(dim), a) elif isexternal(var): ua='' if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: ua=lcb2_map[lcb_map[a]]['argname'] if not ua==a: ua=' => %s'%ua else: ua='' - sig='%s : call-back function%s'%(a,ua) + sig='%s : call-back function%s'%(a, ua) sigout=sig else: errmess('getpydocsign: Could not resolve docsignature for "%s".\\n'%a) - return sig,sigout + return sig, sigout -def getarrdocsign(a,var): +def getarrdocsign(a, var): ctype=getctype(var) if isstring(var) and (not isarray(var)): - sig='%s : rank-0 array(string(len=%s),\'c\')'%(a,getstrlength(var)) + sig='%s : rank-0 array(string(len=%s),\'c\')'%(a, getstrlength(var)) elif isscalar(var): - sig='%s : rank-0 array(%s,\'%s\')'%(a,c2py_map[ctype], + sig='%s : rank-0 array(%s,\'%s\')'%(a, c2py_map[ctype], c2pycode_map[ctype],) elif isarray(var): dim=var['dimension'] rank=repr(len(dim)) - sig='%s : rank-%s array(\'%s\') with bounds (%s)'%(a,rank, + sig='%s : rank-%s array(\'%s\') with bounds (%s)'%(a, rank, c2pycode_map[ctype], ','.join(dim)) return sig -def getinit(a,var): - if isstring(var): init,showinit='""',"''" - else: init,showinit='','' +def getinit(a, var): + if isstring(var): init, showinit='""', "''" + else: init, showinit='', '' if hasinitvalue(var): init=var['='] showinit=init @@ -434,22 +434,22 @@ def getinit(a,var): try: v = var["="] if ',' in v: - ret['init.r'],ret['init.i']=markoutercomma(v[1:-1]).split('@,@') + ret['init.r'], ret['init.i']=markoutercomma(v[1:-1]).split('@,@') else: - v = eval(v,{},{}) - ret['init.r'],ret['init.i']=str(v.real),str(v.imag) + v = eval(v, {}, {}) + ret['init.r'], ret['init.i']=str(v.real), str(v.imag) except: raise ValueError('getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) if isarray(var): - init='(capi_c.r=%s,capi_c.i=%s,capi_c)'%(ret['init.r'],ret['init.i']) + init='(capi_c.r=%s,capi_c.i=%s,capi_c)'%(ret['init.r'], ret['init.i']) elif isstring(var): - if not init: init,showinit='""',"''" + if not init: init, showinit='""', "''" if init[0]=="'": - init='"%s"'%(init[1:-1].replace('"','\\"')) + init='"%s"'%(init[1:-1].replace('"', '\\"')) if init[0]=='"': showinit="'%s'"%(init[1:-1]) - return init,showinit + return init, showinit -def sign2map(a,var): +def sign2map(a, var): """ varname,ctype,atype init,init.r,init.i,pytype @@ -457,7 +457,7 @@ def sign2map(a,var): varrfromat intent """ - global lcb_map,cb_map + global lcb_map, cb_map out_a = a if isintent_out(var): for k in var['intent']: @@ -467,7 +467,7 @@ def sign2map(a,var): ret={'varname':a,'outvarname':out_a} ret['ctype']=getctype(var) intent_flags = [] - for f,s in isintent_dict.items(): + for f, s in isintent_dict.items(): if f(var): intent_flags.append('F2PY_%s'%s) if intent_flags: #XXX: Evaluate intent_flags here. @@ -478,9 +478,9 @@ def sign2map(a,var): elif ret['ctype'] in c2buildvalue_map: ret['varrformat']=c2buildvalue_map[ret['ctype']] else: ret['varrformat']='O' - ret['init'],ret['showinit']=getinit(a,var) + ret['init'], ret['showinit']=getinit(a, var) if hasinitvalue(var) and iscomplex(var) and not isarray(var): - ret['init.r'],ret['init.i'] = markoutercomma(ret['init'][1:-1]).split('@,@') + ret['init.r'], ret['init.i'] = markoutercomma(ret['init'][1:-1]).split('@,@') if isexternal(var): ret['cbnamekey']=a if a in lcb_map: @@ -491,59 +491,59 @@ def sign2map(a,var): ret['cblatexdocstr']=lcb2_map[lcb_map[a]]['latexdocstr'] else: ret['cbname']=a - errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a,list(lcb_map.keys()))) + errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a, list(lcb_map.keys()))) if isstring(var): ret['length']=getstrlength(var) if isarray(var): - ret=dictappend(ret,getarrdims(a,var)) + ret=dictappend(ret, getarrdims(a, var)) dim=copy.copy(var['dimension']) if ret['ctype'] in c2capi_map: ret['atype']=c2capi_map[ret['ctype']] # Debug info if debugcapi(var): - il=[isintent_in,'input',isintent_out,'output', - isintent_inout,'inoutput',isrequired,'required', - isoptional,'optional',isintent_hide,'hidden', - iscomplex,'complex scalar', - l_and(isscalar,l_not(iscomplex)),'scalar', - isstring,'string',isarray,'array', - iscomplexarray,'complex array',isstringarray,'string array', - iscomplexfunction,'complex function', - l_and(isfunction,l_not(iscomplexfunction)),'function', - isexternal,'callback', - isintent_callback,'callback', - isintent_aux,'auxiliary', + il=[isintent_in, 'input', isintent_out, 'output', + isintent_inout, 'inoutput', isrequired, 'required', + isoptional, 'optional', isintent_hide, 'hidden', + iscomplex, 'complex scalar', + l_and(isscalar, l_not(iscomplex)), 'scalar', + isstring, 'string', isarray, 'array', + iscomplexarray, 'complex array', isstringarray, 'string array', + iscomplexfunction, 'complex function', + l_and(isfunction, l_not(iscomplexfunction)), 'function', + isexternal, 'callback', + isintent_callback, 'callback', + isintent_aux, 'auxiliary', #ismutable,'mutable',l_not(ismutable),'immutable', ] rl=[] - for i in range(0,len(il),2): + for i in range(0, len(il), 2): if il[i](var): rl.append(il[i+1]) if isstring(var): - rl.append('slen(%s)=%s'%(a,ret['length'])) + rl.append('slen(%s)=%s'%(a, ret['length'])) if isarray(var): # if not isintent_c(var): # var['dimension'].reverse() - ddim=','.join(map(lambda x,y:'%s|%s'%(x,y),var['dimension'],dim)) + ddim=','.join(map(lambda x, y:'%s|%s'%(x, y), var['dimension'], dim)) rl.append('dims(%s)'%ddim) # if not isintent_c(var): # var['dimension'].reverse() if isexternal(var): - ret['vardebuginfo']='debug-capi:%s=>%s:%s'%(a,ret['cbname'],','.join(rl)) + ret['vardebuginfo']='debug-capi:%s=>%s:%s'%(a, ret['cbname'], ','.join(rl)) else: - ret['vardebuginfo']='debug-capi:%s %s=%s:%s'%(ret['ctype'],a,ret['showinit'],','.join(rl)) + ret['vardebuginfo']='debug-capi:%s %s=%s:%s'%(ret['ctype'], a, ret['showinit'], ','.join(rl)) if isscalar(var): if ret['ctype'] in cformat_map: - ret['vardebugshowvalue']='debug-capi:%s=%s'%(a,cformat_map[ret['ctype']]) + ret['vardebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']]) if isstring(var): - ret['vardebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a,a) + ret['vardebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) if isexternal(var): ret['vardebugshowvalue']='debug-capi:%s=%%p'%(a) if ret['ctype'] in cformat_map: - ret['varshowvalue']='#name#:%s=%s'%(a,cformat_map[ret['ctype']]) + ret['varshowvalue']='#name#:%s=%s'%(a, cformat_map[ret['ctype']]) ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) if isstring(var): - ret['varshowvalue']='#name#:slen(%s)=%%d %s=\\"%%s\\"'%(a,a) - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var) + ret['varshowvalue']='#name#:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) + ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) if hasnote(var): ret['note']=var['note'] return ret @@ -557,17 +557,17 @@ def routsign2map(rout): global lcb_map name = rout['name'] fname = getfortranname(rout) - ret={'name':name, - 'texname':name.replace('_','\\_'), - 'name_lower':name.lower(), - 'NAME':name.upper(), - 'begintitle':gentitle(name), - 'endtitle':gentitle('end of %s'%name), - 'fortranname':fname, - 'FORTRANNAME':fname.upper(), - 'callstatement':getcallstatement(rout) or '', - 'usercode':getusercode(rout) or '', - 'usercode1':getusercode1(rout) or '', + ret={'name': name, + 'texname': name.replace('_', '\\_'), + 'name_lower': name.lower(), + 'NAME': name.upper(), + 'begintitle': gentitle(name), + 'endtitle': gentitle('end of %s'%name), + 'fortranname': fname, + 'FORTRANNAME': fname.upper(), + 'callstatement': getcallstatement(rout) or '', + 'usercode': getusercode(rout) or '', + 'usercode1': getusercode1(rout) or '', } if '_' in fname: ret['F_FUNC'] = 'F_FUNC_US' @@ -590,15 +590,15 @@ def routsign2map(rout): #else: # errmess('routsign2map: cb_map does not contain module "%s" used in "use" statement.\n'%(u)) elif 'externals' in rout and rout['externals']: - errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'],repr(rout['externals']))) - ret['callprotoargument'] = getcallprotoargument(rout,lcb_map) or '' + errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'], repr(rout['externals']))) + ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' if isfunction(rout): if 'result' in rout: a=rout['result'] else: a=rout['name'] ret['rname']=a - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,rout) + ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout) ret['ctype']=getctype(rout['vars'][a]) if hasresultnote(rout): ret['resultnote']=rout['vars'][a]['note'] @@ -610,9 +610,9 @@ def routsign2map(rout): errmess('routsign2map: no c2buildvalue key for type %s\n'%(repr(ret['ctype']))) if debugcapi(rout): if ret['ctype'] in cformat_map: - ret['routdebugshowvalue']='debug-capi:%s=%s'%(a,cformat_map[ret['ctype']]) + ret['routdebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']]) if isstringfunction(rout): - ret['routdebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a,a) + ret['routdebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) if isstringfunction(rout): ret['rlength']=getstrlength(rout['vars'][a]) if ret['rlength']=='-1': @@ -630,11 +630,11 @@ def modsign2map(m): if ismodule(m): ret={'f90modulename':m['name'], 'F90MODULENAME':m['name'].upper(), - 'texf90modulename':m['name'].replace('_','\\_')} + 'texf90modulename':m['name'].replace('_', '\\_')} else: ret={'modulename':m['name'], 'MODULENAME':m['name'].upper(), - 'texmodulename':m['name'].replace('_','\\_')} + 'texmodulename':m['name'].replace('_', '\\_')} ret['restdoc'] = getrestdoc(m) or [] if hasnote(m): ret['note']=m['note'] @@ -664,19 +664,19 @@ def cb_sign2map(a,var,index=None): if ret['ctype'] in cformat_map: ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) if isarray(var): - ret=dictappend(ret,getarrdims(a,var)) - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var) + ret=dictappend(ret, getarrdims(a, var)) + ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) if hasnote(var): ret['note']=var['note'] var['note']=['See elsewhere.'] return ret -def cb_routsign2map(rout,um): +def cb_routsign2map(rout, um): """ name,begintitle,endtitle,argname ctype,rctype,maxnofargs,nofoptargs,returncptr """ - ret={'name':'cb_%s_in_%s'%(rout['name'],um), + ret={'name':'cb_%s_in_%s'%(rout['name'], um), 'returncptr':''} if isintent_callback(rout): if '_' in rout['name']: @@ -722,7 +722,7 @@ return_value= ret['note']=rout['vars'][a]['note'] rout['vars'][a]['note']=['See elsewhere.'] ret['rname']=a - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,rout) + ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout) if iscomplexfunction(rout): ret['rctype']=""" #ifdef F2PY_CB_RETURNCOMPLEX @@ -740,7 +740,7 @@ void if 'args' in rout and 'vars' in rout: for a in rout['args']: var=rout['vars'][a] - if l_or(isintent_in,isintent_inout)(var): + if l_or(isintent_in, isintent_inout)(var): nofargs=nofargs+1 if isoptional(var): nofoptargs=nofoptargs+1 @@ -751,7 +751,7 @@ void rout['note']=['See elsewhere.'] return ret -def common_sign2map(a,var): # obsolute +def common_sign2map(a, var): # obsolute ret={'varname':a} ret['ctype']=getctype(var) if isstringarray(var): @@ -761,13 +761,13 @@ def common_sign2map(a,var): # obsolute if ret['ctype'] in cformat_map: ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) if isarray(var): - ret=dictappend(ret,getarrdims(a,var)) + ret=dictappend(ret, getarrdims(a, var)) elif isstring(var): ret['size']=getstrlength(var) ret['rank']='1' - ret['pydocsign'],ret['pydocsignout']=getpydocsign(a,var) + ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) if hasnote(var): ret['note']=var['note'] var['note']=['See elsewhere.'] - ret['arrdocstr']=getarrdocsign(a,var) # for strings this returns 0-rank but actually is 1-rank + ret['arrdocstr']=getarrdocsign(a, var) # for strings this returns 0-rank but actually is 1-rank return ret diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py index f80ab06f0..f3bf848a7 100644 --- a/numpy/f2py/cb_rules.py +++ b/numpy/f2py/cb_rules.py @@ -127,7 +127,7 @@ capi_return_pt: } #endtitle# """, - 'need':['setjmp.h','CFUNCSMESS'], + 'need':['setjmp.h', 'CFUNCSMESS'], 'maxnofargs':'#maxnofargs#', 'nofoptargs':'#nofoptargs#', 'docstr':"""\ @@ -142,42 +142,42 @@ capi_return_pt: } cb_rout_rules=[ {# Init - 'separatorsfor':{'decl':'\n', - 'args':',','optargs':'','pyobjfrom':'\n','freemem':'\n', - 'args_td':',','optargs_td':'', - 'args_nm':',','optargs_nm':'', - 'frompyobj':'\n','setdims':'\n', - 'docstrsigns':'\\n"\n"', - 'latexdocstrsigns':'\n', - 'latexdocstrreq':'\n','latexdocstropt':'\n', - 'latexdocstrout':'\n','latexdocstrcbs':'\n', + 'separatorsfor': {'decl': '\n', + 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', + 'args_td': ',', 'optargs_td': '', + 'args_nm': ',', 'optargs_nm': '', + 'frompyobj': '\n', 'setdims': '\n', + 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', }, - 'decl':'/*decl*/','pyobjfrom':'/*pyobjfrom*/','frompyobj':'/*frompyobj*/', - 'args':[],'optargs':'','return':'','strarglens':'','freemem':'/*freemem*/', - 'args_td':[],'optargs_td':'','strarglens_td':'', - 'args_nm':[],'optargs_nm':'','strarglens_nm':'', - 'noargs':'', - 'setdims':'/*setdims*/', - 'docstrsigns':'','latexdocstrsigns':'', - 'docstrreq':'\tRequired arguments:', - 'docstropt':'\tOptional arguments:', - 'docstrout':'\tReturn objects:', - 'docstrcbs':'\tCall-back functions:', - 'docreturn':'','docsign':'','docsignopt':'', - 'latexdocstrreq':'\\noindent Required arguments:', - 'latexdocstropt':'\\noindent Optional arguments:', - 'latexdocstrout':'\\noindent Return objects:', - 'latexdocstrcbs':'\\noindent Call-back functions:', - 'routnote':{hasnote:'--- #note#',l_not(hasnote):''}, - },{ # Function + 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', + 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', + 'args_td': [], 'optargs_td': '', 'strarglens_td': '', + 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', + 'noargs': '', + 'setdims': '/*setdims*/', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\tRequired arguments:', + 'docstropt': '\tOptional arguments:', + 'docstrout': '\tReturn objects:', + 'docstrcbs': '\tCall-back functions:', + 'docreturn': '', 'docsign': '', 'docsignopt': '', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'routnote': {hasnote:'--- #note#',l_not(hasnote):''}, + }, { # Function 'decl':'\t#ctype# return_value;', 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'}, '\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");', {debugcapi:'\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'} ], - 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'},'GETSCALARFROMPYTUPLE'], + 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], 'return':'\treturn return_value;', - '_check':l_and(isfunction,l_not(isstringfunction),l_not(iscomplexfunction)) + '_check':l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) }, {# String function 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, @@ -189,8 +189,8 @@ cb_rout_rules=[ \t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""", {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} ], - 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'}, - 'string.h','GETSTRFROMPYTUPLE'], + 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, + 'string.h', 'GETSTRFROMPYTUPLE'], 'return':'return;', '_check':isstringfunction }, @@ -240,8 +240,8 @@ return_value \treturn; #endif """, - 'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'}, - 'string.h','GETSCALARFROMPYTUPLE','#ctype#'], + 'need':['#ctype#_from_pyobj', {debugcapi:'CFUNCSMESS'}, + 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], '_check':iscomplexfunction }, {'docstrout':'\t\t#pydocsignout#', @@ -254,59 +254,59 @@ return_value cb_arg_rules=[ { # Doc - 'docstropt':{l_and(isoptional,isintent_nothide):'\t\t#pydocsign#'}, - 'docstrreq':{l_and(isrequired,isintent_nothide):'\t\t#pydocsign#'}, + 'docstropt':{l_and(isoptional, isintent_nothide):'\t\t#pydocsign#'}, + 'docstrreq':{l_and(isrequired, isintent_nothide):'\t\t#pydocsign#'}, 'docstrout':{isintent_out:'\t\t#pydocsignout#'}, - 'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', + 'latexdocstropt':{l_and(isoptional, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', {hasnote:'--- #note#'}]}, - 'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', + 'latexdocstrreq':{l_and(isrequired, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', {hasnote:'--- #note#'}]}, 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}', - {l_and(hasnote,isintent_hide):'--- #note#', - l_and(hasnote,isintent_nothide):'--- See above.'}]}, - 'docsign':{l_and(isrequired,isintent_nothide):'#varname#,'}, - 'docsignopt':{l_and(isoptional,isintent_nothide):'#varname#,'}, + {l_and(hasnote, isintent_hide):'--- #note#', + l_and(hasnote, isintent_nothide):'--- See above.'}]}, + 'docsign':{l_and(isrequired, isintent_nothide):'#varname#,'}, + 'docsignopt':{l_and(isoptional, isintent_nothide):'#varname#,'}, 'depend':'' }, { - 'args':{ - l_and (isscalar,isintent_c):'#ctype# #varname_i#', - l_and (isscalar,l_not(isintent_c)):'#ctype# *#varname_i#_cb_capi', + 'args': { + l_and (isscalar, isintent_c):'#ctype# #varname_i#', + l_and (isscalar, l_not(isintent_c)):'#ctype# *#varname_i#_cb_capi', isarray:'#ctype# *#varname_i#', isstring:'#ctype# #varname_i#' }, - 'args_nm':{ - l_and (isscalar,isintent_c):'#varname_i#', - l_and (isscalar,l_not(isintent_c)):'#varname_i#_cb_capi', + 'args_nm': { + l_and (isscalar, isintent_c):'#varname_i#', + l_and (isscalar, l_not(isintent_c)):'#varname_i#_cb_capi', isarray:'#varname_i#', isstring:'#varname_i#' }, - 'args_td':{ - l_and (isscalar,isintent_c):'#ctype#', - l_and (isscalar,l_not(isintent_c)):'#ctype# *', + 'args_td': { + l_and (isscalar, isintent_c):'#ctype#', + l_and (isscalar, l_not(isintent_c)):'#ctype# *', isarray:'#ctype# *', isstring:'#ctype#' }, - 'strarglens':{isstring:',int #varname_i#_cb_len'}, # untested with multiple args - 'strarglens_td':{isstring:',int'}, # untested with multiple args - 'strarglens_nm':{isstring:',#varname_i#_cb_len'}, # untested with multiple args + 'strarglens': {isstring:',int #varname_i#_cb_len'}, # untested with multiple args + 'strarglens_td': {isstring:',int'}, # untested with multiple args + 'strarglens_nm': {isstring:',#varname_i#_cb_len'}, # untested with multiple args }, { # Scalars 'decl':{l_not(isintent_c):'\t#ctype# #varname_i#=(*#varname_i#_cb_capi);'}, - 'error': {l_and(isintent_c,isintent_out, + 'error': {l_and(isintent_c, isintent_out, throw_error('intent(c,out) is forbidden for callback scalar arguments')):\ ''}, 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, {isintent_out:'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, - {l_and(debugcapi,l_and(l_not(iscomplex),isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, - {l_and(debugcapi,l_and(l_not(iscomplex),l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, - {l_and(debugcapi,l_and(iscomplex,isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, - {l_and(debugcapi,l_and(iscomplex,l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, + {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, + {l_and(debugcapi, l_and(l_not(iscomplex), l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, + {l_and(debugcapi, l_and(iscomplex, isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, + {l_and(debugcapi, l_and(iscomplex, l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, ], - 'need':[{isintent_out:['#ctype#_from_pyobj','GETSCALARFROMPYTUPLE']}, + 'need':[{isintent_out:['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, {debugcapi:'CFUNCSMESS'}], '_check':isscalar - },{ + }, { 'pyobjfrom':[{isintent_in:"""\ \tif (#name#_nofargs>capi_i) \t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname_i#))) @@ -318,18 +318,18 @@ cb_arg_rules=[ 'need':[{isintent_in:'pyobj_from_#ctype#1'}, {isintent_inout:'pyarr_from_p_#ctype#1'}, {iscomplex:'#ctype#'}], - '_check':l_and(isscalar,isintent_nothide), + '_check':l_and(isscalar, isintent_nothide), '_optional':'' - },{# String + }, {# String 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->\\"");'}, """\tif (capi_j>capi_i) \t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", {debugcapi:'\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, ], - 'need':['#ctype#','GETSTRFROMPYTUPLE', - {debugcapi:'CFUNCSMESS'},'string.h'], - '_check':l_and(isstring,isintent_out) - },{ + 'need':['#ctype#', 'GETSTRFROMPYTUPLE', + {debugcapi:'CFUNCSMESS'}, 'string.h'], + '_check':l_and(isstring, isintent_out) + }, { 'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'}, {isintent_in:"""\ \tif (#name#_nofargs>capi_i) @@ -343,7 +343,7 @@ cb_arg_rules=[ \t}"""}], 'need':[{isintent_in:'pyobj_from_#ctype#1size'}, {isintent_inout:'pyarr_from_p_#ctype#1'}], - '_check':l_and(isstring,isintent_nothide), + '_check':l_and(isstring, isintent_nothide), '_optional':'' }, # Array ... @@ -354,12 +354,12 @@ cb_arg_rules=[ '_depend':'' }, { - 'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'}, - {isintent_c:"""\ + 'pyobjfrom': [{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'}, + {isintent_c: """\ \tif (#name#_nofargs>capi_i) { \t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ """, - l_not(isintent_c):"""\ + l_not(isintent_c): """\ \tif (#name#_nofargs>capi_i) { \t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ """, @@ -370,9 +370,9 @@ cb_arg_rules=[ \t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,(PyObject *)tmp_arr)) \t\t\tgoto capi_fail; }"""], - '_check':l_and(isarray,isintent_nothide,l_or(isintent_in,isintent_inout)), - '_optional':'', - },{ + '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), + '_optional': '', + }, { 'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'}, """\tif (capi_j>capi_i) { \t\tPyArrayObject *rv_cb_arr = NULL; @@ -391,9 +391,9 @@ cb_arg_rules=[ \t}""", {debugcapi:'\tfprintf(stderr,"<-.\\n");'}, ], - 'need':['MEMCOPY',{iscomplexarray:'#ctype#'}], - '_check':l_and(isarray,isintent_out) - },{ + 'need':['MEMCOPY', {iscomplexarray:'#ctype#'}], + '_check':l_and(isarray, isintent_out) + }, { 'docreturn':'#varname#,', '_check':isintent_out } @@ -408,28 +408,28 @@ def buildcallbacks(m): if bi['block']=='interface': for b in bi['body']: if b: - buildcallback(b,m['name']) + buildcallback(b, m['name']) else: errmess('warning: empty body for %s\n' % (m['name'])) -def buildcallback(rout,um): +def buildcallback(rout, um): global cb_map from . import capi_maps - outmess('\tConstructing call-back function "cb_%s_in_%s"\n'%(rout['name'],um)) - args,depargs=getargs(rout) + outmess('\tConstructing call-back function "cb_%s_in_%s"\n'%(rout['name'], um)) + args, depargs=getargs(rout) capi_maps.depargs=depargs var=rout['vars'] - vrd=capi_maps.cb_routsign2map(rout,um) - rd=dictappend({},vrd) - cb_map[um].append([rout['name'],rd['name']]) + vrd=capi_maps.cb_routsign2map(rout, um) + rd=dictappend({}, vrd) + cb_map[um].append([rout['name'], rd['name']]) for r in cb_rout_rules: if ('_check' in r and r['_check'](rout)) or ('_check' not in r): - ar=applyrules(r,vrd,rout) - rd=dictappend(rd,ar) + ar=applyrules(r, vrd, rout) + rd=dictappend(rd, ar) savevrd={} - for i,a in enumerate(args): - vrd=capi_maps.cb_sign2map(a,var[a], index=i) + for i, a in enumerate(args): + vrd=capi_maps.cb_sign2map(a, var[a], index=i) savevrd[a]=vrd for r in cb_arg_rules: if '_depend' in r: @@ -437,8 +437,8 @@ def buildcallback(rout,um): if '_optional' in r and isoptional(var[a]): continue if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) + ar=applyrules(r, vrd, var[a]) + rd=dictappend(rd, ar) if '_break' in r: break for a in args: @@ -449,8 +449,8 @@ def buildcallback(rout,um): if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): continue if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) + ar=applyrules(r, vrd, var[a]) + rd=dictappend(rd, ar) if '_break' in r: break for a in depargs: @@ -461,8 +461,8 @@ def buildcallback(rout,um): if '_optional' in r: continue if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) + ar=applyrules(r, vrd, var[a]) + rd=dictappend(rd, ar) if '_break' in r: break if 'args' in rd and 'optargs' in rd: @@ -483,22 +483,22 @@ def buildcallback(rout,um): #endif """] if isinstance(rd['docreturn'], list): - rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']})) + rd['docreturn']=stripcomma(replace('#docreturn#', {'docreturn':rd['docreturn']})) optargs=stripcomma(replace('#docsignopt#', {'docsignopt':rd['docsignopt']} )) if optargs=='': - rd['docsignature']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']})) + rd['docsignature']=stripcomma(replace('#docsign#', {'docsign':rd['docsign']})) else: rd['docsignature']=replace('#docsign#[#docsignopt#]', - {'docsign':rd['docsign'], - 'docsignopt':optargs, + {'docsign': rd['docsign'], + 'docsignopt': optargs, }) - rd['latexdocsignature']=rd['docsignature'].replace('_','\\_') - rd['latexdocsignature']=rd['latexdocsignature'].replace(',',', ') + rd['latexdocsignature']=rd['docsignature'].replace('_', '\\_') + rd['latexdocsignature']=rd['latexdocsignature'].replace(',', ', ') rd['docstrsigns']=[] rd['latexdocstrsigns']=[] - for k in ['docstrreq','docstropt','docstrout','docstrcbs']: + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: if k in rd and isinstance(rd[k], list): rd['docstrsigns']=rd['docstrsigns']+rd[k] k='latex'+k @@ -513,7 +513,7 @@ def buildcallback(rout,um): if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): rd['noargs'] = 'void' - ar=applyrules(cb_routine_rules,rd) + ar=applyrules(cb_routine_rules, rd) cfuncs.callbacks[rd['name']]=ar['body'] if isinstance(ar['need'], str): ar['need']=[ar['need']] diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 2229c3e24..c8b529056 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -39,12 +39,12 @@ typedefs_generated={'typedefs_generated':'/*need_typedefs_generated*/'} cppmacros={'cppmacros':'/*need_cppmacros*/'} cfuncs={'cfuncs':'/*need_cfuncs*/'} callbacks={'callbacks':'/*need_callbacks*/'} -f90modhooks={'f90modhooks':'/*need_f90modhooks*/', - 'initf90modhooksstatic':'/*initf90modhooksstatic*/', - 'initf90modhooksdynamic':'/*initf90modhooksdynamic*/', +f90modhooks={'f90modhooks': '/*need_f90modhooks*/', + 'initf90modhooksstatic': '/*initf90modhooksstatic*/', + 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', } -commonhooks={'commonhooks':'/*need_commonhooks*/', - 'initcommonhooks':'/*need_initcommonhooks*/', +commonhooks={'commonhooks': '/*need_commonhooks*/', + 'initcommonhooks': '/*need_initcommonhooks*/', } ############ Includes ################### @@ -419,7 +419,7 @@ cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\ ## """ -needs['GETSTRFROMPYTUPLE']=['STRINGCOPYN','PRINTPYOBJERR'] +needs['GETSTRFROMPYTUPLE']=['STRINGCOPYN', 'PRINTPYOBJERR'] cppmacros['GETSTRFROMPYTUPLE']="""\ #define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ \t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ @@ -587,7 +587,7 @@ static int *nextforcomb(void) { if (forcombcache.tr) return i_tr; return i; }""" -needs['try_pyarr_from_string']=['STRINGCOPYN','PRINTPYOBJERR','string'] +needs['try_pyarr_from_string']=['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] cfuncs['try_pyarr_from_string']="""\ static int try_pyarr_from_string(PyObject *obj,const string str) { \tPyArrayObject *arr = NULL; @@ -600,7 +600,7 @@ capi_fail: \treturn 0; } """ -needs['string_from_pyobj']=['string','STRINGMALLOC','STRINGCOPYN'] +needs['string_from_pyobj']=['string', 'STRINGMALLOC', 'STRINGCOPYN'] cfuncs['string_from_pyobj']="""\ static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) { \tPyArrayObject *arr = NULL; @@ -680,7 +680,7 @@ static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) { \treturn 0; } """ -needs['signed_char_from_pyobj']=['int_from_pyobj','signed_char'] +needs['signed_char_from_pyobj']=['int_from_pyobj', 'signed_char'] cfuncs['signed_char_from_pyobj']="""\ static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) { \tint i=0; @@ -803,7 +803,7 @@ static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) \treturn 0; } """ -needs['long_double_from_pyobj']=['double_from_pyobj','long_double'] +needs['long_double_from_pyobj']=['double_from_pyobj', 'long_double'] cfuncs['long_double_from_pyobj']="""\ static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) { \tdouble d=0; @@ -875,7 +875,7 @@ static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) { \treturn 0; } """ -needs['complex_long_double_from_pyobj']=['complex_long_double','long_double', +needs['complex_long_double_from_pyobj']=['complex_long_double', 'long_double', 'complex_double_from_pyobj'] cfuncs['complex_long_double_from_pyobj']="""\ static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) { @@ -976,7 +976,7 @@ static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char \treturn 0; } """ -needs['complex_float_from_pyobj']=['complex_float','complex_double_from_pyobj'] +needs['complex_float_from_pyobj']=['complex_float', 'complex_double_from_pyobj'] cfuncs['complex_float_from_pyobj']="""\ static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { \tcomplex_double cd={0.0,0.0}; @@ -988,30 +988,30 @@ static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *e \treturn 0; } """ -needs['try_pyarr_from_char']=['pyobj_from_char1','TRYPYARRAYTEMPLATE'] +needs['try_pyarr_from_char']=['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] cfuncs['try_pyarr_from_char']='static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n' -needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','unsigned_char'] +needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE', 'unsigned_char'] cfuncs['try_pyarr_from_unsigned_char']='static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' -needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE','signed_char'] +needs['try_pyarr_from_signed_char']=['TRYPYARRAYTEMPLATE', 'signed_char'] cfuncs['try_pyarr_from_signed_char']='static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' -needs['try_pyarr_from_short']=['pyobj_from_short1','TRYPYARRAYTEMPLATE'] +needs['try_pyarr_from_short']=['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] cfuncs['try_pyarr_from_short']='static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n' -needs['try_pyarr_from_int']=['pyobj_from_int1','TRYPYARRAYTEMPLATE'] +needs['try_pyarr_from_int']=['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] cfuncs['try_pyarr_from_int']='static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n' -needs['try_pyarr_from_long']=['pyobj_from_long1','TRYPYARRAYTEMPLATE'] +needs['try_pyarr_from_long']=['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] cfuncs['try_pyarr_from_long']='static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n' -needs['try_pyarr_from_long_long']=['pyobj_from_long_long1','TRYPYARRAYTEMPLATE','long_long'] +needs['try_pyarr_from_long_long']=['pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] cfuncs['try_pyarr_from_long_long']='static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' -needs['try_pyarr_from_float']=['pyobj_from_float1','TRYPYARRAYTEMPLATE'] +needs['try_pyarr_from_float']=['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] cfuncs['try_pyarr_from_float']='static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n' -needs['try_pyarr_from_double']=['pyobj_from_double1','TRYPYARRAYTEMPLATE'] +needs['try_pyarr_from_double']=['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] cfuncs['try_pyarr_from_double']='static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n' -needs['try_pyarr_from_complex_float']=['pyobj_from_complex_float1','TRYCOMPLEXPYARRAYTEMPLATE','complex_float'] +needs['try_pyarr_from_complex_float']=['pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] cfuncs['try_pyarr_from_complex_float']='static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' -needs['try_pyarr_from_complex_double']=['pyobj_from_complex_double1','TRYCOMPLEXPYARRAYTEMPLATE','complex_double'] +needs['try_pyarr_from_complex_double']=['pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] cfuncs['try_pyarr_from_complex_double']='static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' -needs['create_cb_arglist']=['CFUNCSMESS','PRINTPYOBJERR','MINMAX'] +needs['create_cb_arglist']=['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] cfuncs['create_cb_arglist']="""\ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) { \tPyObject *tmp = NULL; @@ -1118,7 +1118,7 @@ def buildcfuncs(): from .capi_maps import c2capi_map for k in c2capi_map.keys(): m='pyarr_from_p_%s1'%k - cppmacros[m]='#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))'%(m,c2capi_map[k]) + cppmacros[m]='#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))'%(m, c2capi_map[k]) k='string' m='pyarr_from_p_%s1'%k cppmacros[m]='#define %s(v,dims) (PyArray_SimpleNewFromData(1,dims,NPY_CHAR,(char *)v))'%(m) @@ -1127,10 +1127,10 @@ def buildcfuncs(): ############ Auxiliary functions for sorting needs ################### def append_needs(need,flag=1): - global outneeds,needs + global outneeds, needs if isinstance(need, list): for n in need: - append_needs(n,flag) + append_needs(n, flag) elif isinstance(need, str): if not need: return if need in includes0: @@ -1159,7 +1159,7 @@ def append_needs(need,flag=1): tmp={} if need in needs: for nn in needs[need]: - t=append_needs(nn,0) + t=append_needs(nn, 0) if isinstance(t, dict): for nnn in t.keys(): if nnn in tmp: @@ -1175,7 +1175,7 @@ def append_needs(need,flag=1): tmp={} if need in needs: for nn in needs[need]: - t=append_needs(nn,flag) + t=append_needs(nn, flag) if isinstance(t, dict): for nnn in t.keys(): if nnn in tmp: @@ -1190,7 +1190,7 @@ def append_needs(need,flag=1): errmess('append_needs: expected list or string but got :%s\n'%(repr(need))) def get_needs(): - global outneeds,needs + global outneeds, needs res={} for n in outneeds.keys(): out=[] @@ -1210,9 +1210,9 @@ def get_needs(): else: out.append(outneeds[n][0]) del outneeds[n][0] - if saveout and (0 not in map(lambda x,y:x==y, saveout, outneeds[n])) \ + if saveout and (0 not in map(lambda x, y:x==y, saveout, outneeds[n])) \ and outneeds[n] != []: - print(n,saveout) + print(n, saveout) errmess('get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') out=out+saveout break diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py index dd44e3326..d3b7f6dc2 100644 --- a/numpy/f2py/common_rules.py +++ b/numpy/f2py/common_rules.py @@ -39,10 +39,10 @@ def findcommonblocks(block,top=1): vars={} for v in block['common'][n]: vars[v]=block['vars'][v] - ret.append((n,block['common'][n],vars)) + ret.append((n, block['common'][n], vars)) elif hasbody(block): for b in block['body']: - ret=ret+findcommonblocks(b,0) + ret=ret+findcommonblocks(b, 0) if top: tret=[] names=[] @@ -56,31 +56,31 @@ def findcommonblocks(block,top=1): def buildhooks(m): ret = {'commonhooks':[],'initcommonhooks':[],'docs':['"COMMON blocks:\\n"']} fwrap = [''] - def fadd(line,s=fwrap): s[0] = '%s\n %s'%(s[0],line) + def fadd(line,s=fwrap): s[0] = '%s\n %s'%(s[0], line) chooks = [''] - def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0],line) + def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0], line) ihooks = [''] - def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0],line) + def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0], line) doc = [''] - def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0],line) - for (name,vnames,vars) in findcommonblocks(m): + def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0], line) + for (name, vnames, vars) in findcommonblocks(m): lower_name = name.lower() - hnames,inames = [],[] + hnames, inames = [], [] for n in vnames: if isintent_hide(vars[n]): hnames.append(n) else: inames.append(n) if hnames: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n'%(name,','.join(inames),','.join(hnames))) + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n'%(name, ','.join(inames), ','.join(hnames))) else: - outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n'%(name,','.join(inames))) + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n'%(name, ','.join(inames))) fadd('subroutine f2pyinit%s(setupfunc)'%name) fadd('external setupfunc') for n in vnames: - fadd(func2subr.var2fixfortran(vars,n)) + fadd(func2subr.var2fixfortran(vars, n)) if name=='_BLNK_': fadd('common %s'%(','.join(vnames))) else: - fadd('common /%s/ %s'%(name,','.join(vnames))) + fadd('common /%s/ %s'%(name, ','.join(vnames))) fadd('call setupfunc(%s)'%(','.join(inames))) fadd('end\n') cadd('static FortranDataDef f2py_%s_def[] = {'%(name)) @@ -88,45 +88,45 @@ def buildhooks(m): for n in inames: ct = capi_maps.getctype(vars[n]) at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n,vars[n]) + dm = capi_maps.getarrdims(n, vars[n]) if dm['dims']: idims.append('(%s)'%(dm['dims'])) else: idims.append('') dms=dm['dims'].strip() if not dms: dms='-1' - cadd('\t{\"%s\",%s,{{%s}},%s},'%(n,dm['rank'],dms,at)) + cadd('\t{\"%s\",%s,{{%s}},%s},'%(n, dm['rank'], dms, at)) cadd('\t{NULL}\n};') inames1 = rmbadname(inames) inames1_tps = ','.join(['char *'+s for s in inames1]) - cadd('static void f2py_setup_%s(%s) {'%(name,inames1_tps)) + cadd('static void f2py_setup_%s(%s) {'%(name, inames1_tps)) cadd('\tint i_f2py=0;') for n in inames1: - cadd('\tf2py_%s_def[i_f2py++].data = %s;'%(name,n)) + cadd('\tf2py_%s_def[i_f2py++].data = %s;'%(name, n)) cadd('}') if '_' in lower_name: F_FUNC='F_FUNC_US' else: F_FUNC='F_FUNC' cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));'\ - %(F_FUNC,lower_name,name.upper(), + %(F_FUNC, lower_name, name.upper(), ','.join(['char*']*len(inames1)))) cadd('static void f2py_init_%s(void) {'%name) cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\ - %(F_FUNC,lower_name,name.upper(),name)) + %(F_FUNC, lower_name, name.upper(), name)) cadd('}\n') - iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(name,name,name)) - tname = name.replace('_','\\_') + iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(name, name, name)) + tname = name.replace('_', '\\_') dadd('\\subsection{Common block \\texttt{%s}}\n'%(tname)) dadd('\\begin{description}') for n in inames: - dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,vars[n]))) + dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n, vars[n]))) if hasnote(vars[n]): note = vars[n]['note'] if isinstance(note, list): note='\n'.join(note) dadd('--- %s'%(note)) dadd('\\end{description}') - ret['docs'].append('"\t/%s/ %s\\n"'%(name,','.join(map(lambda v,d:v+d,inames,idims)))) + ret['docs'].append('"\t/%s/ %s\\n"'%(name, ','.join(map(lambda v, d:v+d, inames, idims)))) ret['commonhooks']=chooks ret['initcommonhooks']=ihooks ret['latexdoc']=doc[0] if len(ret['docs'])<=1: ret['docs']='' - return ret,fwrap[0] + return ret, fwrap[0] diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 2ee8eb9a1..34b6192e4 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -204,21 +204,21 @@ for c in "ijklmn": defaultimplicitrules[c]={'typespec':'integer'} del c badnames={} invbadnames={} -for n in ['int','double','float','char','short','long','void','case','while', - 'return','signed','unsigned','if','for','typedef','sizeof','union', - 'struct','static','register','new','break','do','goto','switch', - 'continue','else','inline','extern','delete','const','auto', - 'len','rank','shape','index','slen','size','_i', +for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', + 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', + 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', + 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', + 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', 'max', 'min', - 'flen','fshape', - 'string','complex_double','float_double','stdin','stderr','stdout', - 'type','default']: + 'flen', 'fshape', + 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', + 'type', 'default']: badnames[n]=n+'_bn' invbadnames[n+'_bn']=n def rmbadname1(name): if name in badnames: - errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name,badnames[name])) + errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name, badnames[name])) return badnames[name] return name @@ -227,7 +227,7 @@ def rmbadname(names): return [rmbadname1(_m) for _m in names] def undo_rmbadname1(name): if name in invbadnames: errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'\ - %(name,invbadnames[name])) + %(name, invbadnames[name])) return invbadnames[name] return name @@ -240,17 +240,17 @@ def getextension(name): if '/' in name[i:]: return '' return name[i+1:] -is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match -_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search -_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search -_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search -_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]',re.I).match +is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match +_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search +_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search +_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search +_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match def is_free_format(file): """Check if file is in free format Fortran.""" # f90 allows both fixed and free format, assuming fixed unless # signs of free format are detected. result = 0 - f = open(file,'r') + f = open(file, 'r') line = f.readline() n = 15 # the number of non-comment lines to scan for hints if _has_f_header(line): @@ -277,23 +277,23 @@ def readfortrancode(ffile,dowithline=show,istop=1): 2) Call dowithline(line) on every line. 3) Recursively call itself when statement \"include '<filename>'\" is met. """ - global gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\ - beginpattern,quiet,verbose,dolowercase,include_paths + global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase, include_paths if not istop: - saveglobals=gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\ - beginpattern,quiet,verbose,dolowercase + saveglobals=gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase if ffile==[]: return localdolowercase = dolowercase cont=0 finalline='' ll='' commentline=re.compile(r'(?P<line>([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P<rest>.*)') - includeline=re.compile(r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")',re.I) + includeline=re.compile(r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I) cont1=re.compile(r'(?P<line>.*)&\s*\Z') cont2=re.compile(r'(\s*&|)(?P<line>.*)') mline_mark = re.compile(r".*?'''") - if istop: dowithline('',-1) - ll,l1='','' + if istop: dowithline('', -1) + ll, l1='', '' spacedigits=[' '] + [str(_m) for _m in range(10)] filepositiontext='' fin=fileinput.FileInput(ffile) @@ -316,10 +316,10 @@ def readfortrancode(ffile,dowithline=show,istop=1): if strictf77: beginpattern=beginpattern77 else: beginpattern=beginpattern90 outmess('\tReading file %s (format:%s%s)\n'\ - %(repr(currentfilename),sourcecodeform, + %(repr(currentfilename), sourcecodeform, strictf77 and ',strict' or '')) - l=l.expandtabs().replace('\xa0',' ') + l=l.expandtabs().replace('\xa0', ' ') while not l=='': # Get rid of newline characters if l[-1] not in "\n\r\f": break l=l[:-1] @@ -337,7 +337,7 @@ def readfortrancode(ffile,dowithline=show,istop=1): cont=0 continue if sourcecodeform=='fix': - if l[0] in ['*','c','!','C','#']: + if l[0] in ['*', 'c', '!', 'C', '#']: if l[1:5].lower()=='f2py': # f2py directive l=' '+l[5:] else: # Skip comment line @@ -404,20 +404,20 @@ def readfortrancode(ffile,dowithline=show,istop=1): cont=(r is not None) else: raise ValueError("Flag sourcecodeform must be either 'fix' or 'free': %s"%repr(sourcecodeform)) - filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1) + filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1, currentfilename, l1) m=includeline.match(origfinalline) if m: fn=m.group('name') if os.path.isfile(fn): - readfortrancode(fn,dowithline=dowithline,istop=0) + readfortrancode(fn, dowithline=dowithline, istop=0) else: include_dirs = [os.path.dirname(currentfilename)] + include_paths foundfile = 0 for inc_dir in include_dirs: - fn1 = os.path.join(inc_dir,fn) + fn1 = os.path.join(inc_dir, fn) if os.path.isfile(fn1): foundfile = 1 - readfortrancode(fn1,dowithline=dowithline,istop=0) + readfortrancode(fn1, dowithline=dowithline, istop=0) break if not foundfile: outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs))) @@ -428,20 +428,20 @@ def readfortrancode(ffile,dowithline=show,istop=1): finalline=ll.lower() else: finalline=ll origfinalline = ll - filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1) + filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1, currentfilename, l1) m=includeline.match(origfinalline) if m: fn=m.group('name') if os.path.isfile(fn): - readfortrancode(fn,dowithline=dowithline,istop=0) + readfortrancode(fn, dowithline=dowithline, istop=0) else: include_dirs = [os.path.dirname(currentfilename)] + include_paths foundfile = 0 for inc_dir in include_dirs: - fn1 = os.path.join(inc_dir,fn) + fn1 = os.path.join(inc_dir, fn) if os.path.isfile(fn1): foundfile = 1 - readfortrancode(fn1,dowithline=dowithline,istop=0) + readfortrancode(fn1, dowithline=dowithline, istop=0) break if not foundfile: outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs))) @@ -449,10 +449,10 @@ def readfortrancode(ffile,dowithline=show,istop=1): dowithline(finalline) filepositiontext='' fin.close() - if istop: dowithline('',1) + if istop: dowithline('', 1) else: - gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\ - beginpattern,quiet,verbose,dolowercase=saveglobals + gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase=saveglobals ########### Crack line beforethisafter=r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))'+ \ @@ -460,55 +460,55 @@ beforethisafter=r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))'+ \ r'\s*(?P<after>%s)\s*\Z' ## fortrantypes='character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' -typespattern=re.compile(beforethisafter%('',fortrantypes,fortrantypes,'.*'),re.I),'type' -typespattern4implicit=re.compile(beforethisafter%('',fortrantypes+'|static|automatic|undefined',fortrantypes+'|static|automatic|undefined','.*'),re.I) +typespattern=re.compile(beforethisafter%('', fortrantypes, fortrantypes, '.*'), re.I), 'type' +typespattern4implicit=re.compile(beforethisafter%('', fortrantypes+'|static|automatic|undefined', fortrantypes+'|static|automatic|undefined', '.*'), re.I) # -functionpattern=re.compile(beforethisafter%('([a-z]+[\w\s(=*+-/)]*?|)','function','function','.*'),re.I),'begin' -subroutinepattern=re.compile(beforethisafter%('[a-z\s]*?','subroutine','subroutine','.*'),re.I),'begin' +functionpattern=re.compile(beforethisafter%('([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' +subroutinepattern=re.compile(beforethisafter%('[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' #modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' # groupbegins77=r'program|block\s*data' -beginpattern77=re.compile(beforethisafter%('',groupbegins77,groupbegins77,'.*'),re.I),'begin' +beginpattern77=re.compile(beforethisafter%('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' groupbegins90=groupbegins77+r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()' -beginpattern90=re.compile(beforethisafter%('',groupbegins90,groupbegins90,'.*'),re.I),'begin' +beginpattern90=re.compile(beforethisafter%('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' groupends=r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface' -endpattern=re.compile(beforethisafter%('',groupends,groupends,'[\w\s]*'),re.I),'end' +endpattern=re.compile(beforethisafter%('', groupends, groupends, '[\w\s]*'), re.I), 'end' #endifs='end\s*(if|do|where|select|while|forall)' endifs='(end\s*(if|do|where|select|while|forall))|(module\s*procedure)' -endifpattern=re.compile(beforethisafter%('[\w]*?',endifs,endifs,'[\w\s]*'),re.I),'endif' +endifpattern=re.compile(beforethisafter%('[\w]*?', endifs, endifs, '[\w\s]*'), re.I), 'endif' # -implicitpattern=re.compile(beforethisafter%('','implicit','implicit','.*'),re.I),'implicit' -dimensionpattern=re.compile(beforethisafter%('','dimension|virtual','dimension|virtual','.*'),re.I),'dimension' -externalpattern=re.compile(beforethisafter%('','external','external','.*'),re.I),'external' -optionalpattern=re.compile(beforethisafter%('','optional','optional','.*'),re.I),'optional' -requiredpattern=re.compile(beforethisafter%('','required','required','.*'),re.I),'required' -publicpattern=re.compile(beforethisafter%('','public','public','.*'),re.I),'public' -privatepattern=re.compile(beforethisafter%('','private','private','.*'),re.I),'private' -intrisicpattern=re.compile(beforethisafter%('','intrisic','intrisic','.*'),re.I),'intrisic' -intentpattern=re.compile(beforethisafter%('','intent|depend|note|check','intent|depend|note|check','\s*\(.*?\).*'),re.I),'intent' -parameterpattern=re.compile(beforethisafter%('','parameter','parameter','\s*\(.*'),re.I),'parameter' -datapattern=re.compile(beforethisafter%('','data','data','.*'),re.I),'data' -callpattern=re.compile(beforethisafter%('','call','call','.*'),re.I),'call' -entrypattern=re.compile(beforethisafter%('','entry','entry','.*'),re.I),'entry' -callfunpattern=re.compile(beforethisafter%('','callfun','callfun','.*'),re.I),'callfun' -commonpattern=re.compile(beforethisafter%('','common','common','.*'),re.I),'common' -usepattern=re.compile(beforethisafter%('','use','use','.*'),re.I),'use' -containspattern=re.compile(beforethisafter%('','contains','contains',''),re.I),'contains' -formatpattern=re.compile(beforethisafter%('','format','format','.*'),re.I),'format' +implicitpattern=re.compile(beforethisafter%('', 'implicit', 'implicit', '.*'), re.I), 'implicit' +dimensionpattern=re.compile(beforethisafter%('', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' +externalpattern=re.compile(beforethisafter%('', 'external', 'external', '.*'), re.I), 'external' +optionalpattern=re.compile(beforethisafter%('', 'optional', 'optional', '.*'), re.I), 'optional' +requiredpattern=re.compile(beforethisafter%('', 'required', 'required', '.*'), re.I), 'required' +publicpattern=re.compile(beforethisafter%('', 'public', 'public', '.*'), re.I), 'public' +privatepattern=re.compile(beforethisafter%('', 'private', 'private', '.*'), re.I), 'private' +intrisicpattern=re.compile(beforethisafter%('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic' +intentpattern=re.compile(beforethisafter%('', 'intent|depend|note|check', 'intent|depend|note|check', '\s*\(.*?\).*'), re.I), 'intent' +parameterpattern=re.compile(beforethisafter%('', 'parameter', 'parameter', '\s*\(.*'), re.I), 'parameter' +datapattern=re.compile(beforethisafter%('', 'data', 'data', '.*'), re.I), 'data' +callpattern=re.compile(beforethisafter%('', 'call', 'call', '.*'), re.I), 'call' +entrypattern=re.compile(beforethisafter%('', 'entry', 'entry', '.*'), re.I), 'entry' +callfunpattern=re.compile(beforethisafter%('', 'callfun', 'callfun', '.*'), re.I), 'callfun' +commonpattern=re.compile(beforethisafter%('', 'common', 'common', '.*'), re.I), 'common' +usepattern=re.compile(beforethisafter%('', 'use', 'use', '.*'), re.I), 'use' +containspattern=re.compile(beforethisafter%('', 'contains', 'contains', ''), re.I), 'contains' +formatpattern=re.compile(beforethisafter%('', 'format', 'format', '.*'), re.I), 'format' ## Non-fortran and f2py-specific statements -f2pyenhancementspattern=re.compile(beforethisafter%('','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','.*'),re.I|re.S),'f2pyenhancements' -multilinepattern = re.compile(r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z",re.S),'multiline' +f2pyenhancementspattern=re.compile(beforethisafter%('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I|re.S), 'f2pyenhancements' +multilinepattern = re.compile(r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline' ## def _simplifyargs(argsline): a = [] for n in markoutercomma(argsline).split('@,@'): for r in '(),': - n = n.replace(r,'_') + n = n.replace(r, '_') a.append(n) return ','.join(a) -crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*',re.I) +crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*', re.I) def crackline(line,reset=0): """ reset=-1 --- initialize @@ -517,14 +517,14 @@ def crackline(line,reset=0): Cracked data is saved in grouplist[0]. """ - global beginpattern,groupcounter,groupname,groupcache,grouplist,gotnextfile,\ - filepositiontext,currentfilename,neededmodule,expectbegin,skipblocksuntil,\ - skipemptyends,previous_context + global beginpattern, groupcounter, groupname, groupcache, grouplist, gotnextfile,\ + filepositiontext, currentfilename, neededmodule, expectbegin, skipblocksuntil,\ + skipemptyends, previous_context if ';' in line and not (f2pyenhancementspattern[0].match(line) or multilinepattern[0].match(line)): for l in line.split(';'): - assert reset==0,repr(reset) # XXX: non-zero reset values need testing - crackline(l,reset) + assert reset==0, repr(reset) # XXX: non-zero reset values need testing + crackline(l, reset) return if reset<0: groupcounter=0 @@ -542,7 +542,7 @@ def crackline(line,reset=0): fl=0 if f77modulename and neededmodule==groupcounter: fl=2 while groupcounter>fl: - outmess('crackline: groupcounter=%s groupname=%s\n'%(repr(groupcounter),repr(groupname))) + outmess('crackline: groupcounter=%s groupname=%s\n'%(repr(groupcounter), repr(groupname))) outmess('crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') grouplist[groupcounter-1].append(groupcache[groupcounter]) grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] @@ -561,15 +561,15 @@ def crackline(line,reset=0): return if line=='': return flag=0 - for pat in [dimensionpattern,externalpattern,intentpattern,optionalpattern, + for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, requiredpattern, - parameterpattern,datapattern,publicpattern,privatepattern, + parameterpattern, datapattern, publicpattern, privatepattern, intrisicpattern, - endifpattern,endpattern, + endifpattern, endpattern, formatpattern, - beginpattern,functionpattern,subroutinepattern, - implicitpattern,typespattern,commonpattern, - callpattern,usepattern,containspattern, + beginpattern, functionpattern, subroutinepattern, + implicitpattern, typespattern, commonpattern, + callpattern, usepattern, containspattern, entrypattern, f2pyenhancementspattern, multilinepattern @@ -587,18 +587,18 @@ def crackline(line,reset=0): name=invbadnames[name] if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: continue - m1=re.match(r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z'%name,markouterparen(line),re.I) + m1=re.match(r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z'%name, markouterparen(line), re.I) if m1: m2 = re_1.match(m1.group('before')) a = _simplifyargs(m1.group('args')) if m2: - line='callfun %s(%s) result (%s)'%(name,a,m2.group('result')) - else: line='callfun %s(%s)'%(name,a) + line='callfun %s(%s) result (%s)'%(name, a, m2.group('result')) + else: line='callfun %s(%s)'%(name, a) m = callfunpattern[0].match(line) if not m: outmess('crackline: could not resolve function call for line=%s.\n'%repr(line)) return - analyzeline(m,'callfun',line) + analyzeline(m, 'callfun', line) return if verbose>1 or (verbose==1 and currentfilename.lower().endswith('.pyf')): previous_context = None @@ -632,7 +632,7 @@ def crackline(line,reset=0): groupcounter=groupcounter+1 return gotnextfile=0 - analyzeline(m,pat[1],line) + analyzeline(m, pat[1], line) expectbegin=0 elif pat[1]=='endif': pass @@ -642,7 +642,7 @@ def crackline(line,reset=0): skipblocksuntil=groupcounter else: if 0<=skipblocksuntil<=groupcounter:return - analyzeline(m,pat[1],line) + analyzeline(m, pat[1], line) def markouterparen(line): l='';f=0 @@ -673,10 +673,10 @@ def markoutercomma(line,comma=','): l=l+'@'+comma+'@' continue l=l+c - assert not f,repr((f,line,l,cc)) + assert not f, repr((f, line, l, cc)) return l def unmarkouterparen(line): - r = line.replace('@(@','(').replace('@)@',')') + r = line.replace('@(@', '(').replace('@)@', ')') return r def appenddecl(decl,decl2,force=1): if not decl: decl={} @@ -688,32 +688,32 @@ def appenddecl(decl,decl2,force=1): decl[k]=decl2[k] elif k=='attrspec': for l in decl2[k]: - decl=setattrspec(decl,l,force) + decl=setattrspec(decl, l, force) elif k=='kindselector': - decl=setkindselector(decl,decl2[k],force) + decl=setkindselector(decl, decl2[k], force) elif k=='charselector': - decl=setcharselector(decl,decl2[k],force) - elif k in ['=','typename']: + decl=setcharselector(decl, decl2[k], force) + elif k in ['=', 'typename']: if force or k not in decl: decl[k]=decl2[k] elif k=='note': pass - elif k in ['intent','check','dimension','optional','required']: + elif k in ['intent', 'check', 'dimension', 'optional', 'required']: errmess('appenddecl: "%s" not implemented.\n'%k) else: raise Exception('appenddecl: Unknown variable definition key:' + \ str(k)) return decl -selectpattern=re.compile(r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z',re.I) -nameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z',re.I) -callnameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z',re.I) +selectpattern=re.compile(r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I) +nameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I) +callnameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I) real16pattern = re.compile(r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') real8pattern = re.compile(r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') -_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b',re.I) +_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) def _is_intent_callback(vdecl): - for a in vdecl.get('attrspec',[]): + for a in vdecl.get('attrspec', []): if _intentcallbackpattern.match(a): return 1 return 0 @@ -721,21 +721,21 @@ def _is_intent_callback(vdecl): def _resolvenameargspattern(line): line = markouterparen(line) m1=nameargspattern.match(line) - if m1: - return m1.group('name'),m1.group('args'),m1.group('result'), m1.group('bind') + if m1: + return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind') m1=callnameargspattern.match(line) if m1: - return m1.group('name'),m1.group('args'),None, None - return None,[],None, None + return m1.group('name'), m1.group('args'), None, None + return None, [], None, None -def analyzeline(m,case,line): - global groupcounter,groupname,groupcache,grouplist,filepositiontext,\ - currentfilename,f77modulename,neededinterface,neededmodule,expectbegin,\ - gotnextfile,previous_context +def analyzeline(m, case, line): + global groupcounter, groupname, groupcache, grouplist, filepositiontext,\ + currentfilename, f77modulename, neededinterface, neededmodule, expectbegin,\ + gotnextfile, previous_context block=m.group('this') if case != 'multiline': previous_context = None - if expectbegin and case not in ['begin','call','callfun','type'] \ + if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ and not skipemptyends and groupcounter<1: newname=os.path.basename(currentfilename).split('.')[0] outmess('analyzeline: no group yet. Creating program group with name "%s".\n'%newname) @@ -750,21 +750,21 @@ def analyzeline(m,case,line): groupcache[groupcounter]['name']=newname groupcache[groupcounter]['from']='fromsky' expectbegin=0 - if case in ['begin','call','callfun']: + if case in ['begin', 'call', 'callfun']: # Crack line => block,name,args,result block = block.lower() - if re.match(r'block\s*data',block,re.I): block='block data' - if re.match(r'python\s*module',block,re.I): block='python module' - name,args,result,bind = _resolvenameargspattern(m.group('after')) + if re.match(r'block\s*data', block, re.I): block='block data' + if re.match(r'python\s*module', block, re.I): block='python module' + name, args, result, bind = _resolvenameargspattern(m.group('after')) if name is None: if block=='block data': name = '_BLOCK_DATA_' else: name = '' - if block not in ['interface','block data']: + if block not in ['interface', 'block data']: outmess('analyzeline: No name/args pattern found for line.\n') - previous_context = (block,name,groupcounter) + previous_context = (block, name, groupcounter) if args: args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) else: args=[] if '' in args: @@ -776,7 +776,7 @@ def analyzeline(m,case,line): needmodule=0 needinterface=0 - if case in ['call','callfun']: + if case in ['call', 'callfun']: needinterface=1 if 'args' not in groupcache[groupcounter]: return @@ -799,7 +799,7 @@ def analyzeline(m,case,line): grouplist[groupcounter]=[] if needmodule: if verbose>1: - outmess('analyzeline: Creating module block %s\n'%repr(f77modulename),0) + outmess('analyzeline: Creating module block %s\n'%repr(f77modulename), 0) groupname[groupcounter]='module' groupcache[groupcounter]['block']='python module' groupcache[groupcounter]['name']=f77modulename @@ -813,11 +813,11 @@ def analyzeline(m,case,line): grouplist[groupcounter]=[] if needinterface: if verbose>1: - outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (groupcounter),0) + outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (groupcounter), 0) groupname[groupcounter]='interface' groupcache[groupcounter]['block']='interface' groupcache[groupcounter]['name']='unknown_interface' - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name']) + groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], groupcache[groupcounter-1]['name']) groupcache[groupcounter]['body']=[] groupcache[groupcounter]['externals']=[] groupcache[groupcounter]['interfaced']=[] @@ -835,9 +835,9 @@ def analyzeline(m,case,line): groupcache[groupcounter]['from']=currentfilename else: if f77modulename and groupcounter==3: - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],currentfilename) + groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], currentfilename) else: - groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name']) + groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], groupcache[groupcounter-1]['name']) for k in list(groupcache[groupcounter].keys()): if not groupcache[groupcounter][k]: del groupcache[groupcounter][k] @@ -852,7 +852,7 @@ def analyzeline(m,case,line): if block=='type': groupcache[groupcounter]['varnames'] = [] - if case in ['call','callfun']: # set parents variables + if case in ['call', 'callfun']: # set parents variables if name not in groupcache[groupcounter-2]['externals']: groupcache[groupcounter-2]['externals'].append(name) groupcache[groupcounter]['vars']=copy.deepcopy(groupcache[groupcounter-2]['vars']) @@ -860,23 +860,23 @@ def analyzeline(m,case,line): #except: pass try: del groupcache[groupcounter]['vars'][name][groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] except: pass - if block in ['function','subroutine']: # set global attributes - try: groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter-2]['vars']['']) + if block in ['function', 'subroutine']: # set global attributes + try: groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name], groupcache[groupcounter-2]['vars']['']) except: pass if case=='callfun': # return type if result and result in groupcache[groupcounter]['vars']: if not name==result: - groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter]['vars'][result]) + groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) #if groupcounter>1: # name is interfaced try: groupcache[groupcounter-2]['interfaced'].append(name) except: pass if block=='function': t=typespattern[0].match(m.group('before')+' '+name) if t: - typespec,selector,attr,edecl=cracktypespec0(t.group('this'),t.group('after')) - updatevars(typespec,selector,attr,edecl) + typespec, selector, attr, edecl=cracktypespec0(t.group('this'), t.group('after')) + updatevars(typespec, selector, attr, edecl) - if case in ['call','callfun']: + if case in ['call', 'callfun']: grouplist[groupcounter-1].append(groupcache[groupcounter]) grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter] del grouplist[groupcounter] @@ -887,20 +887,20 @@ def analyzeline(m,case,line): groupcounter=groupcounter-1 # end interface elif case=='entry': - name,args,result,bind=_resolvenameargspattern(m.group('after')) + name, args, result, bind=_resolvenameargspattern(m.group('after')) if name is not None: if args: args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) else: args=[] - assert result is None,repr(result) + assert result is None, repr(result) groupcache[groupcounter]['entry'][name] = args - previous_context = ('entry',name,groupcounter) + previous_context = ('entry', name, groupcounter) elif case=='type': - typespec,selector,attr,edecl=cracktypespec0(block,m.group('after')) - last_name = updatevars(typespec,selector,attr,edecl) + typespec, selector, attr, edecl=cracktypespec0(block, m.group('after')) + last_name = updatevars(typespec, selector, attr, edecl) if last_name is not None: - previous_context = ('variable',last_name,groupcounter) - elif case in ['dimension','intent','optional','required','external','public','private','intrisic']: + previous_context = ('variable', last_name, groupcounter) + elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']: edecl=groupcache[groupcounter]['vars'] ll=m.group('after').strip() i=ll.find('::') @@ -909,7 +909,7 @@ def analyzeline(m,case,line): ll=ll[:i+1]+'::'+ll[i+1:] i=ll.find('::') if ll[i:]=='::' and 'args' in groupcache[groupcounter]: - outmess('All arguments will have attribute %s%s\n'%(m.group('this'),ll[:i])) + outmess('All arguments will have attribute %s%s\n'%(m.group('this'), ll[:i])) ll = ll + ','.join(groupcache[groupcounter]['args']) if i<0:i=0;pl='' else: pl=ll[:i].strip();ll=ll[i+2:] @@ -922,10 +922,10 @@ def analyzeline(m,case,line): for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: m1=namepattern.match(e) if not m1: - if case in ['public','private']: k='' + if case in ['public', 'private']: k='' else: print(m.groupdict()) - outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case,repr(e))) + outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case, repr(e))) continue else: k=rmbadname1(m1.group('name')) @@ -942,14 +942,14 @@ def analyzeline(m,case,line): outmess('analyzeline: missing __user__ module (could be nothing)\n') if k!=groupcache[groupcounter]['name']: # fixes ticket 1693 outmess('analyzeline: appending intent(callback) %s'\ - ' to %s arguments\n' % (k,groupcache[groupcounter]['name'])) + ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) groupcache[groupcounter]['args'].append(k) else: errmess('analyzeline: intent(callback) %s is ignored' % (k)) else: errmess('analyzeline: intent(callback) %s is already'\ ' in argument list' % (k)) - if case in ['optional','required','public','external','private','intrisic']: + if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']: ap=case if 'attrspec' in edecl[k]: edecl[k]['attrspec'].append(ap) @@ -968,23 +968,23 @@ def analyzeline(m,case,line): last_name = k groupcache[groupcounter]['vars']=edecl if last_name is not None: - previous_context = ('variable',last_name,groupcounter) + previous_context = ('variable', last_name, groupcounter) elif case=='parameter': edecl=groupcache[groupcounter]['vars'] ll=m.group('after').strip()[1:-1] last_name = None for e in markoutercomma(ll).split('@,@'): try: - k,initexpr=[x.strip() for x in e.split('=')] + k, initexpr=[x.strip() for x in e.split('=')] except: - outmess('analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n'%(e,ll));continue + outmess('analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n'%(e, ll));continue params = get_parameters(edecl) k=rmbadname1(k) if k not in edecl: edecl[k]={} if '=' in edecl[k] and (not edecl[k]['=']==initexpr): - outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n'%(k,edecl[k]['='],initexpr)) - t = determineexprtype(initexpr,params) + outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n'%(k, edecl[k]['='], initexpr)) + t = determineexprtype(initexpr, params) if t: if t.get('typespec')=='real': tt = list(initexpr) @@ -993,11 +993,11 @@ def analyzeline(m,case,line): initexpr[m.start():m.end()].lower().replace('d', 'e')) initexpr = ''.join(tt) elif t.get('typespec')=='complex': - initexpr = initexpr[1:].lower().replace('d','e').\ - replace(',','+1j*(') + initexpr = initexpr[1:].lower().replace('d', 'e').\ + replace(',', '+1j*(') try: - v = eval(initexpr,{},params) - except (SyntaxError,NameError,TypeError) as msg: + v = eval(initexpr, {}, params) + except (SyntaxError, NameError, TypeError) as msg: errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'\ % (initexpr, msg)) continue @@ -1008,7 +1008,7 @@ def analyzeline(m,case,line): last_name = k groupcache[groupcounter]['vars']=edecl if last_name is not None: - previous_context = ('variable',last_name,groupcounter) + previous_context = ('variable', last_name, groupcounter) elif case=='implicit': if m.group('after').strip().lower()=='none': groupcache[groupcounter]['implicit']=None @@ -1021,14 +1021,14 @@ def analyzeline(m,case,line): impl={} for e in markoutercomma(m.group('after')).split('@,@'): decl={} - m1=re.match(r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z',e,re.I) + m1=re.match(r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) if not m1: outmess('analyzeline: could not extract info of implicit statement part "%s"\n'%(e));continue m2=typespattern4implicit.match(m1.group('this')) if not m2: outmess('analyzeline: could not extract types pattern of implicit statement part "%s"\n'%(e));continue - typespec,selector,attr,edecl=cracktypespec0(m2.group('this'),m2.group('after')) - kindselect,charselect,typename=cracktypespec(typespec,selector) + typespec, selector, attr, edecl=cracktypespec0(m2.group('this'), m2.group('after')) + kindselect, charselect, typename=cracktypespec(typespec, selector) decl['typespec']=typespec decl['kindselector']=kindselect decl['charselector']=charselect @@ -1037,13 +1037,13 @@ def analyzeline(m,case,line): if not decl[k]: del decl[k] for r in markoutercomma(m1.group('after')).split('@,@'): if '-' in r: - try: begc,endc=[x.strip() for x in r.split('-')] + try: begc, endc=[x.strip() for x in r.split('-')] except: outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n'%r);continue else: begc=endc=r.strip() if not len(begc)==len(endc)==1: outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n'%r);continue - for o in range(ord(begc),ord(endc)+1): + for o in range(ord(begc), ord(endc)+1): impl[chr(o)]=decl groupcache[groupcounter]['implicit']=impl elif case=='data': @@ -1061,13 +1061,13 @@ def analyzeline(m,case,line): dl = dl.strip() if dl.startswith(','): dl = dl[1:].strip() - ll.append([dl,il]) + ll.append([dl, il]) dl=c;il='';f=0 if f==2: dl = dl.strip() if dl.startswith(','): dl = dl[1:].strip() - ll.append([dl,il]) + ll.append([dl, il]) vars={} if 'vars' in groupcache[groupcounter]: vars=groupcache[groupcounter]['vars'] @@ -1097,13 +1097,13 @@ def analyzeline(m,case,line): if v not in vars: vars[v]={} if '=' in vars[v] and not vars[v]['=']==l[1][j:i-1]: - outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n'%(v,vars[v]['='],l[1][j:i-1])) + outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n'%(v, vars[v]['='], l[1][j:i-1])) vars[v]['=']=l[1][j:i-1] j=i last_name = v groupcache[groupcounter]['vars']=vars if last_name is not None: - previous_context = ('variable',last_name,groupcounter) + previous_context = ('variable', last_name, groupcounter) elif case=='common': line=m.group('after').strip() if not line[0]=='/':line='//'+line @@ -1114,13 +1114,13 @@ def analyzeline(m,case,line): if f>=3: bn = bn.strip() if not bn: bn='_BLNK_' - cl.append([bn,ol]) + cl.append([bn, ol]) f=f-2;bn='';ol='' if f%2: bn=bn+c else: ol=ol+c bn = bn.strip() if not bn: bn='_BLNK_' - cl.append([bn,ol]) + cl.append([bn, ol]) commonkey={} if 'common' in groupcache[groupcounter]: commonkey=groupcache[groupcounter]['common'] @@ -1132,9 +1132,9 @@ def analyzeline(m,case,line): for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: if i: commonkey[c[0]].append(i) groupcache[groupcounter]['common']=commonkey - previous_context = ('common',bn,groupcounter) + previous_context = ('common', bn, groupcounter) elif case=='use': - m1=re.match(r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z',m.group('after'),re.I) + m1=re.match(r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I) if m1: mm=m1.groupdict() if 'use' not in groupcache[groupcounter]: @@ -1150,7 +1150,7 @@ def analyzeline(m,case,line): rl={} for l in ll: if '=' in l: - m2=re.match(r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z',l,re.I) + m2=re.match(r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z', l, re.I) if m2: rl[m2.group('local').strip()]=m2.group('use').strip() else: outmess('analyzeline: Not local=>use pattern found in %s\n'%repr(l)) @@ -1187,7 +1187,7 @@ def analyzeline(m,case,line): print(m.groupdict()) outmess('analyzeline: No code implemented for line.\n') -def appendmultiline(group, context_name,ml): +def appendmultiline(group, context_name, ml): if 'f2pymultilines' not in group: group['f2pymultilines'] = {} d = group['f2pymultilines'] @@ -1196,11 +1196,11 @@ def appendmultiline(group, context_name,ml): d[context_name].append(ml) return -def cracktypespec0(typespec,ll): +def cracktypespec0(typespec, ll): selector=None attr=None - if re.match(r'double\s*complex',typespec,re.I): typespec='double complex' - elif re.match(r'double\s*precision',typespec,re.I): typespec='double precision' + if re.match(r'double\s*complex', typespec, re.I): typespec='double complex' + elif re.match(r'double\s*precision', typespec, re.I): typespec='double precision' else: typespec=typespec.strip().lower() m1=selectpattern.match(markouterparen(ll)) if not m1: @@ -1208,25 +1208,25 @@ def cracktypespec0(typespec,ll): return d=m1.groupdict() for k in list(d.keys()): d[k]=unmarkouterparen(d[k]) - if typespec in ['complex','integer','logical','real','character','type']: + if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: selector=d['this'] ll=d['after'] i=ll.find('::') if i>=0: attr=ll[:i].strip() ll=ll[i+2:] - return typespec,selector,attr,ll + return typespec, selector, attr, ll ##### -namepattern=re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z',re.I) -kindselector=re.compile(r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z',re.I) -charselector=re.compile(r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z',re.I) -lenkindpattern=re.compile(r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z',re.I) -lenarraypattern=re.compile(r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z',re.I) +namepattern=re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z', re.I) +kindselector=re.compile(r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z', re.I) +charselector=re.compile(r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z', re.I) +lenkindpattern=re.compile(r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z', re.I) +lenarraypattern=re.compile(r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I) def removespaces(expr): expr=expr.strip() if len(expr)<=1: return expr expr2=expr[0] - for i in range(1,len(expr)-1): + for i in range(1, len(expr)-1): if expr[i]==' ' and \ ((expr[i+1] in "()[]{}=+-/* ") or (expr[i-1] in "()[]{}=+-/* ")): continue expr2=expr2+expr[i] @@ -1238,20 +1238,20 @@ def markinnerspaces(line): cc1='"' cb='' for c in line: - if cb=='\\' and c in ['\\','\'','"']: + if cb=='\\' and c in ['\\', '\'', '"']: l=l+c; cb=c continue - if f==0 and c in ['\'','"']: cc=c; cc1={'\'':'"','"':'\''}[c] + if f==0 and c in ['\'', '"']: cc=c; cc1={'\'':'"','"':'\''}[c] if c==cc:f=f+1 elif c==cc:f=f-1 elif c==' ' and f==1: l=l+'@_@'; continue l=l+c;cb=c return l -def updatevars(typespec,selector,attrspec,entitydecl): - global groupcache,groupcounter +def updatevars(typespec, selector, attrspec, entitydecl): + global groupcache, groupcounter last_name = None - kindselect,charselect,typename=cracktypespec(typespec,selector) + kindselect, charselect, typename=cracktypespec(typespec, selector) if attrspec: attrspec=[x.strip() for x in markoutercomma(attrspec).split('@,@')] l = [] @@ -1268,8 +1268,8 @@ def updatevars(typespec,selector,attrspec,entitydecl): el=[x.strip() for x in markoutercomma(entitydecl).split('@,@')] el1=[] for e in el: - for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)),comma=' ').split('@ @')]: - if e1: el1.append(e1.replace('@_@',' ')) + for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: + if e1: el1.append(e1.replace('@_@', ' ')) for e in el1: m=namepattern.match(e) if not m: @@ -1283,29 +1283,29 @@ def updatevars(typespec,selector,attrspec,entitydecl): if not_has_typespec: edecl['typespec']=typespec elif typespec and (not typespec==edecl['typespec']): - outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typespec'],typespec)) + outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typespec'], typespec)) if 'kindselector' not in edecl: edecl['kindselector']=copy.copy(kindselect) elif kindselect: for k in list(kindselect.keys()): if k in edecl['kindselector'] and (not kindselect[k]==edecl['kindselector'][k]): - outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['kindselector'][k],kindselect[k])) + outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['kindselector'][k], kindselect[k])) else: edecl['kindselector'][k]=copy.copy(kindselect[k]) if 'charselector' not in edecl and charselect: if not_has_typespec: edecl['charselector']=charselect else: errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' \ - %(ename,charselect)) + %(ename, charselect)) elif charselect: for k in list(charselect.keys()): if k in edecl['charselector'] and (not charselect[k]==edecl['charselector'][k]): - outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['charselector'][k],charselect[k])) + outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['charselector'][k], charselect[k])) else: edecl['charselector'][k]=copy.copy(charselect[k]) if 'typename' not in edecl: edecl['typename']=typename elif typename and (not edecl['typename']==typename): - outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typename'],typename)) + outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typename'], typename)) if 'attrspec' not in edecl: edecl['attrspec']=copy.copy(attrspec) elif attrspec: @@ -1322,7 +1322,7 @@ def updatevars(typespec,selector,attrspec,entitydecl): m1=lenarraypattern.match(markouterparen(m.group('after'))) if m1: d1=m1.groupdict() - for lk in ['len','array','init']: + for lk in ['len', 'array', 'init']: if d1[lk+'2'] is not None: d1[lk]=d1[lk+'2']; del d1[lk+'2'] for k in list(d1.keys()): if d1[k] is not None: d1[k]=unmarkouterparen(d1[k]) @@ -1334,7 +1334,7 @@ def updatevars(typespec,selector,attrspec,entitydecl): else: d1['array']=d1['array']+','+d1['len'] del d1['len'] - errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n'%(typespec,e,typespec,ename,d1['array'])) + errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n'%(typespec, e, typespec, ename, d1['array'])) if 'array' in d1: dm = 'dimension(%s)'%d1['array'] if 'attrspec' not in edecl or (not edecl['attrspec']): @@ -1345,11 +1345,11 @@ def updatevars(typespec,selector,attrspec,entitydecl): if dm1[:9]=='dimension' and dm1!=dm: del edecl['attrspec'][-1] errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' \ - % (ename,dm1,dm)) + % (ename, dm1, dm)) break if 'len' in d1: - if typespec in ['complex','integer','logical','real']: + if typespec in ['complex', 'integer', 'logical', 'real']: if ('kindselector' not in edecl) or (not edecl['kindselector']): edecl['kindselector']={} edecl['kindselector']['*']=d1['len'] @@ -1361,7 +1361,7 @@ def updatevars(typespec,selector,attrspec,entitydecl): edecl['charselector']['*']=d1['len'] if 'init' in d1: if '=' in edecl and (not edecl['=']==d1['init']): - outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['='],d1['init'])) + outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['='], d1['init'])) else: edecl['=']=d1['init'] else: @@ -1375,12 +1375,12 @@ def updatevars(typespec,selector,attrspec,entitydecl): last_name = ename return last_name -def cracktypespec(typespec,selector): +def cracktypespec(typespec, selector): kindselect=None charselect=None typename=None if selector: - if typespec in ['complex','integer','logical','real']: + if typespec in ['complex', 'integer', 'logical', 'real']: kindselect=kindselector.match(selector) if not kindselect: outmess('cracktypespec: no kindselector pattern found for %s\n'%(repr(selector))) @@ -1390,7 +1390,7 @@ def cracktypespec(typespec,selector): del kindselect['kind2'] for k in list(kindselect.keys()): if not kindselect[k]: del kindselect[k] - for k,i in list(kindselect.items()): + for k, i in list(kindselect.items()): kindselect[k] = rmbadname1(i) elif typespec=='character': charselect=charselector.match(selector) @@ -1403,7 +1403,7 @@ def cracktypespec(typespec,selector): if charselect['lenkind']: lenkind=lenkindpattern.match(markoutercomma(charselect['lenkind'])) lenkind=lenkind.groupdict() - for lk in ['len','kind']: + for lk in ['len', 'kind']: if lenkind[lk+'2']: lenkind[lk]=lenkind[lk+'2'] charselect[lk]=lenkind[lk] @@ -1411,15 +1411,15 @@ def cracktypespec(typespec,selector): del charselect['lenkind'] for k in list(charselect.keys()): if not charselect[k]: del charselect[k] - for k,i in list(charselect.items()): + for k, i in list(charselect.items()): charselect[k] = rmbadname1(i) elif typespec=='type': - typename=re.match(r'\s*\(\s*(?P<name>\w+)\s*\)',selector,re.I) + typename=re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I) if typename: typename=typename.group('name') else: outmess('cracktypespec: no typename found in %s\n'%(repr(typespec+selector))) else: outmess('cracktypespec: no selector used for %s\n'%(repr(selector))) - return kindselect,charselect,typename + return kindselect, charselect, typename ###### def setattrspec(decl,attr,force=0): if not decl: @@ -1479,7 +1479,7 @@ def getblockname(block,unknown='unknown'): def setmesstext(block): global filepositiontext try: - filepositiontext='In: %s:%s\n'%(block['from'],block['name']) + filepositiontext='In: %s:%s\n'%(block['from'], block['name']) except: pass @@ -1498,7 +1498,7 @@ def get_useparameters(block, param_map=None): usedict = get_usedict(block) if not usedict: return param_map - for usename,mapping in list(usedict.items()): + for usename, mapping in list(usedict.items()): usename = usename.lower() if usename not in f90modulevars: outmess('get_useparameters: no module %s info used by %s\n' % (usename, block.get('name'))) @@ -1510,10 +1510,10 @@ def get_useparameters(block, param_map=None): # XXX: apply mapping if mapping: errmess('get_useparameters: mapping for %s not impl.' % (mapping)) - for k,v in list(params.items()): + for k, v in list(params.items()): if k in param_map: outmess('get_useparameters: overriding parameter %s with'\ - ' value from module %s' % (repr(k),repr(usename))) + ' value from module %s' % (repr(k), repr(usename))) param_map[k] = v return param_map @@ -1525,11 +1525,11 @@ def postcrack2(block,tab='',param_map=None): if isinstance(block, list): ret = [] for g in block: - g = postcrack2(g,tab=tab+'\t',param_map=param_map) + g = postcrack2(g, tab=tab+'\t', param_map=param_map) ret.append(g) return ret setmesstext(block) - outmess('%sBlock: %s\n'%(tab,block['name']),0) + outmess('%sBlock: %s\n'%(tab, block['name']), 0) if param_map is None: param_map = get_useparameters(block) @@ -1546,7 +1546,7 @@ def postcrack2(block,tab='',param_map=None): kind['kind'] = param_map[val] new_body = [] for b in block['body']: - b = postcrack2(b,tab=tab+'\t',param_map=param_map) + b = postcrack2(b, tab=tab+'\t', param_map=param_map) new_body.append(b) block['body'] = new_body @@ -1558,13 +1558,13 @@ def postcrack(block,args=None,tab=''): function return values determine expression types if in argument list """ - global usermodules,onlyfunctions + global usermodules, onlyfunctions if isinstance(block, list): gret=[] uret=[] for g in block: setmesstext(g) - g=postcrack(g,tab=tab+'\t') + g=postcrack(g, tab=tab+'\t') if 'name' in g and '__user__' in g['name']: # sort user routines to appear first uret.append(g) else: @@ -1575,7 +1575,7 @@ def postcrack(block,args=None,tab=''): raise Exception('postcrack: Expected block dictionary instead of ' + \ str(block)) if 'name' in block and not block['name']=='unknown_interface': - outmess('%sBlock: %s\n'%(tab,block['name']),0) + outmess('%sBlock: %s\n'%(tab, block['name']), 0) blocktype=block['block'] block=analyzeargs(block) block=analyzecommon(block) @@ -1583,7 +1583,7 @@ def postcrack(block,args=None,tab=''): block['sortvars']=sortvarnames(block['vars']) if 'args' in block and block['args']: args=block['args'] - block['body']=analyzebody(block,args,tab=tab) + block['body']=analyzebody(block, args, tab=tab) userisdefined=[] ## fromuser = [] @@ -1610,8 +1610,8 @@ def postcrack(block,args=None,tab=''): mname='unknown__user__routines' if mname in userisdefined: i=1 - while '%s_%i'%(mname,i) in userisdefined: i=i+1 - mname='%s_%i'%(mname,i) + while '%s_%i'%(mname, i) in userisdefined: i=i+1 + mname='%s_%i'%(mname, i) interface={'block':'interface','body':[],'vars':{},'name':name+'_user_interface'} for e in block['externals']: ## if e in fromuser: @@ -1687,7 +1687,7 @@ def analyzecommon(block): for k in list(block['common'].keys()): comvars=[] for e in block['common'][k]: - m=re.match(r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z',e,re.I) + m=re.match(r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I) if m: dims=[] if m.group('dims'): @@ -1705,7 +1705,7 @@ def analyzecommon(block): if n not in commonvars: commonvars.append(n) else: n=e - errmess('analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n'%(e,k)) + errmess('analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n'%(e, k)) comvars.append(n) block['common'][k]=comvars if 'commonvars' not in block: @@ -1715,12 +1715,12 @@ def analyzecommon(block): return block def analyzebody(block,args,tab=''): - global usermodules,skipfuncs,onlyfuncs,f90modulevars + global usermodules, skipfuncs, onlyfuncs, f90modulevars setmesstext(block) body=[] for b in block['body']: b['parent_block'] = block - if b['block'] in ['function','subroutine']: + if b['block'] in ['function', 'subroutine']: if args is not None and b['name'] not in args: continue else: @@ -1732,11 +1732,11 @@ def analyzebody(block,args,tab=''): b['saved_interface'] = crack2fortrangen(b, '\n'+' '*6, as_interface=True) else: as_=args - b=postcrack(b,as_,tab=tab+'\t') + b=postcrack(b, as_, tab=tab+'\t') if b['block']=='interface' and not b['body']: if 'f2pyenhancements' not in b: continue - if b['block'].replace(' ','')=='pythonmodule': + if b['block'].replace(' ', '')=='pythonmodule': usermodules.append(b) else: if b['block']=='module': @@ -1755,113 +1755,113 @@ def buildimplicitrules(block): outmess('buildimplicitrules: no implicit rules for routine %s.\n'%repr(block['name'])) else: for k in list(block['implicit'].keys()): - if block['implicit'][k].get('typespec') not in ['static','automatic']: + if block['implicit'][k].get('typespec') not in ['static', 'automatic']: implicitrules[k]=block['implicit'][k] else: attrrules[k]=block['implicit'][k]['typespec'] - return implicitrules,attrrules + return implicitrules, attrrules def myeval(e,g=None,l=None): - r = eval(e,g,l) - if type(r) in [type(0),type(0.0)]: + r = eval(e, g, l) + if type(r) in [type(0), type(0.0)]: return r raise ValueError('r=%r' % (r)) -getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z',re.I) -def getlincoef(e,xset): # e = a*x+b ; x in xset +getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) +def getlincoef(e, xset): # e = a*x+b ; x in xset try: - c = int(myeval(e,{},{})) - return 0,c,None + c = int(myeval(e, {}, {})) + return 0, c, None except: pass if getlincoef_re_1.match(e): - return 1,0,e + return 1, 0, e len_e = len(e) for x in xset: if len(x)>len_e: continue if re.search(r'\w\s*\([^)]*\b'+x+r'\b', e): # skip function calls having x as an argument, e.g max(1, x) continue - re_1 = re.compile(r'(?P<before>.*?)\b'+x+r'\b(?P<after>.*)',re.I) + re_1 = re.compile(r'(?P<before>.*?)\b'+x+r'\b(?P<after>.*)', re.I) m = re_1.match(e) if m: try: m1 = re_1.match(e) while m1: - ee = '%s(%s)%s'%(m1.group('before'),0,m1.group('after')) + ee = '%s(%s)%s'%(m1.group('before'), 0, m1.group('after')) m1 = re_1.match(ee) - b = myeval(ee,{},{}) + b = myeval(ee, {}, {}) m1 = re_1.match(e) while m1: - ee = '%s(%s)%s'%(m1.group('before'),1,m1.group('after')) + ee = '%s(%s)%s'%(m1.group('before'), 1, m1.group('after')) m1 = re_1.match(ee) - a = myeval(ee,{},{}) - b + a = myeval(ee, {}, {}) - b m1 = re_1.match(e) while m1: - ee = '%s(%s)%s'%(m1.group('before'),0.5,m1.group('after')) + ee = '%s(%s)%s'%(m1.group('before'), 0.5, m1.group('after')) m1 = re_1.match(ee) - c = myeval(ee,{},{}) + c = myeval(ee, {}, {}) # computing another point to be sure that expression is linear m1 = re_1.match(e) while m1: - ee = '%s(%s)%s'%(m1.group('before'),1.5,m1.group('after')) + ee = '%s(%s)%s'%(m1.group('before'), 1.5, m1.group('after')) m1 = re_1.match(ee) - c2 = myeval(ee,{},{}) + c2 = myeval(ee, {}, {}) if (a*0.5+b==c and a*1.5+b==c2): - return a,b,x + return a, b, x except: pass break - return None,None,None + return None, None, None _varname_match = re.compile(r'\A[a-z]\w*\Z').match def getarrlen(dl,args,star='*'): edl = [] - try: edl.append(myeval(dl[0],{},{})) + try: edl.append(myeval(dl[0], {}, {})) except: edl.append(dl[0]) - try: edl.append(myeval(dl[1],{},{})) + try: edl.append(myeval(dl[1], {}, {})) except: edl.append(dl[1]) if isinstance(edl[0], int): p1 = 1-edl[0] if p1==0: d = str(dl[1]) - elif p1<0: d = '%s-%s'%(dl[1],-p1) - else: d = '%s+%s'%(dl[1],p1) + elif p1<0: d = '%s-%s'%(dl[1], -p1) + else: d = '%s+%s'%(dl[1], p1) elif isinstance(edl[1], int): p1 = 1+edl[1] if p1==0: d='-(%s)' % (dl[0]) - else: d='%s-(%s)' % (p1,dl[0]) - else: d = '%s-(%s)+1'%(dl[1],dl[0]) - try: return repr(myeval(d,{},{})),None,None + else: d='%s-(%s)' % (p1, dl[0]) + else: d = '%s-(%s)+1'%(dl[1], dl[0]) + try: return repr(myeval(d, {}, {})), None, None except: pass - d1,d2=getlincoef(dl[0],args),getlincoef(dl[1],args) - if None not in [d1[0],d2[0]]: - if (d1[0],d2[0])==(0,0): - return repr(d2[1]-d1[1]+1),None,None + d1, d2=getlincoef(dl[0], args), getlincoef(dl[1], args) + if None not in [d1[0], d2[0]]: + if (d1[0], d2[0])==(0, 0): + return repr(d2[1]-d1[1]+1), None, None b = d2[1] - d1[1] + 1 - d1 = (d1[0],0,d1[2]) - d2 = (d2[0],b,d2[2]) + d1 = (d1[0], 0, d1[2]) + d2 = (d2[0], b, d2[2]) if d1[0]==0 and d2[2] in args: - if b<0: return '%s * %s - %s'%(d2[0],d2[2],-b),d2[2],'+%s)/(%s)'%(-b,d2[0]) - elif b: return '%s * %s + %s'%(d2[0],d2[2],b),d2[2],'-%s)/(%s)'%(b,d2[0]) - else: return '%s * %s'%(d2[0],d2[2]),d2[2],')/(%s)'%(d2[0]) + if b<0: return '%s * %s - %s'%(d2[0], d2[2], -b), d2[2], '+%s)/(%s)'%(-b, d2[0]) + elif b: return '%s * %s + %s'%(d2[0], d2[2], b), d2[2], '-%s)/(%s)'%(b, d2[0]) + else: return '%s * %s'%(d2[0], d2[2]), d2[2], ')/(%s)'%(d2[0]) if d2[0]==0 and d1[2] in args: - if b<0: return '%s * %s - %s'%(-d1[0],d1[2],-b),d1[2],'+%s)/(%s)'%(-b,-d1[0]) - elif b: return '%s * %s + %s'%(-d1[0],d1[2],b),d1[2],'-%s)/(%s)'%(b,-d1[0]) - else: return '%s * %s'%(-d1[0],d1[2]),d1[2],')/(%s)'%(-d1[0]) + if b<0: return '%s * %s - %s'%(-d1[0], d1[2], -b), d1[2], '+%s)/(%s)'%(-b, -d1[0]) + elif b: return '%s * %s + %s'%(-d1[0], d1[2], b), d1[2], '-%s)/(%s)'%(b, -d1[0]) + else: return '%s * %s'%(-d1[0], d1[2]), d1[2], ')/(%s)'%(-d1[0]) if d1[2]==d2[2] and d1[2] in args: a = d2[0] - d1[0] - if not a: return repr(b),None,None - if b<0: return '%s * %s - %s'%(a,d1[2],-b),d2[2],'+%s)/(%s)'%(-b,a) - elif b: return '%s * %s + %s'%(a,d1[2],b),d2[2],'-%s)/(%s)'%(b,a) - else: return '%s * %s'%(a,d1[2]),d2[2],')/(%s)'%(a) + if not a: return repr(b), None, None + if b<0: return '%s * %s - %s'%(a, d1[2], -b), d2[2], '+%s)/(%s)'%(-b, a) + elif b: return '%s * %s + %s'%(a, d1[2], b), d2[2], '-%s)/(%s)'%(b, a) + else: return '%s * %s'%(a, d1[2]), d2[2], ')/(%s)'%(a) if d1[0]==d2[0]==1: c = str(d1[2]) if c not in args: if _varname_match(c): outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) c = '(%s)'%c - if b==0: d='%s-%s' % (d2[2],c) - elif b<0: d='%s-%s-%s' % (d2[2],c,-b) - else: d='%s-%s+%s' % (d2[2],c,b) + if b==0: d='%s-%s' % (d2[2], c) + elif b<0: d='%s-%s-%s' % (d2[2], c, -b) + else: d='%s-%s+%s' % (d2[2], c, b) elif d1[0]==0: c2 = str(d2[2]) if c2 not in args: @@ -1870,11 +1870,11 @@ def getarrlen(dl,args,star='*'): c2 = '(%s)'%c2 if d2[0]==1: pass elif d2[0]==-1: c2='-%s' %c2 - else: c2='%s*%s'%(d2[0],c2) + else: c2='%s*%s'%(d2[0], c2) if b==0: d=c2 - elif b<0: d='%s-%s' % (c2,-b) - else: d='%s+%s' % (c2,b) + elif b<0: d='%s-%s' % (c2, -b) + else: d='%s+%s' % (c2, b) elif d2[0]==0: c1 = str(d1[2]) if c1 not in args: @@ -1883,12 +1883,12 @@ def getarrlen(dl,args,star='*'): c1 = '(%s)'%c1 if d1[0]==1: c1='-%s'%c1 elif d1[0]==-1: c1='+%s'%c1 - elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1) - else: c1 = '-%s*%s' % (d1[0],c1) + elif d1[0]<0: c1='+%s*%s'%(-d1[0], c1) + else: c1 = '-%s*%s' % (d1[0], c1) if b==0: d=c1 - elif b<0: d='%s-%s' % (c1,-b) - else: d='%s+%s' % (c1,b) + elif b<0: d='%s-%s' % (c1, -b) + else: d='%s+%s' % (c1, b) else: c1 = str(d1[2]) if c1 not in args: @@ -1897,8 +1897,8 @@ def getarrlen(dl,args,star='*'): c1 = '(%s)'%c1 if d1[0]==1: c1='-%s'%c1 elif d1[0]==-1: c1='+%s'%c1 - elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1) - else: c1 = '-%s*%s' % (d1[0],c1) + elif d1[0]<0: c1='+%s*%s'%(-d1[0], c1) + else: c1 = '-%s*%s' % (d1[0], c1) c2 = str(d2[2]) if c2 not in args: @@ -1907,25 +1907,25 @@ def getarrlen(dl,args,star='*'): c2 = '(%s)'%c2 if d2[0]==1: pass elif d2[0]==-1: c2='-%s' %c2 - else: c2='%s*%s'%(d2[0],c2) + else: c2='%s*%s'%(d2[0], c2) - if b==0: d='%s%s' % (c2,c1) - elif b<0: d='%s%s-%s' % (c2,c1,-b) - else: d='%s%s+%s' % (c2,c1,b) - return d,None,None + if b==0: d='%s%s' % (c2, c1) + elif b<0: d='%s%s-%s' % (c2, c1, -b) + else: d='%s%s+%s' % (c2, c1, b) + return d, None, None -word_pattern = re.compile(r'\b[a-z][\w$]*\b',re.I) +word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) def _get_depend_dict(name, vars, deps): if name in vars: - words = vars[name].get('depend',[]) + words = vars[name].get('depend', []) if '=' in vars[name] and not isstring(vars[name]): for word in word_pattern.findall(vars[name]['=']): if word not in words and word in vars: words.append(word) for word in words[:]: - for w in deps.get(word,[]) \ + for w in deps.get(word, []) \ or _get_depend_dict(word, vars, deps): if w not in words: words.append(w) @@ -1999,9 +1999,9 @@ def _selected_real_kind_func(p, r=0, radix=0): def get_parameters(vars, global_params={}): params = copy.copy(global_params) g_params = copy.copy(global_params) - for name,func in [('kind',_kind_func), - ('selected_int_kind',_selected_int_kind_func), - ('selected_real_kind',_selected_real_kind_func), + for name, func in [('kind', _kind_func), + ('selected_int_kind', _selected_int_kind_func), + ('selected_real_kind', _selected_real_kind_func), ]: if name not in g_params: g_params[name] = func @@ -2009,22 +2009,22 @@ def get_parameters(vars, global_params={}): for n in get_sorted_names(vars): if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: param_names.append(n) - kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)',re.I) - selected_int_kind_re = re.compile(r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)',re.I) - selected_kind_re = re.compile(r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)',re.I) + kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I) + selected_int_kind_re = re.compile(r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I) + selected_kind_re = re.compile(r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I) for n in param_names: if '=' in vars[n]: v = vars[n]['='] if islogical(vars[n]): v = v.lower() for repl in [ - ('.false.','False'), - ('.true.','True'), + ('.false.', 'False'), + ('.true.', 'True'), #TODO: test .eq., .neq., etc replacements. ]: v = v.replace(*repl) - v = kind_re.sub(r'kind("\1")',v) - v = selected_int_kind_re.sub(r'selected_int_kind(\1)',v) + v = kind_re.sub(r'kind("\1")', v) + v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) if isinteger(vars[n]) and not selected_kind_re.match(v): v = v.split('_')[0] if isdouble(vars[n]): @@ -2037,11 +2037,11 @@ def get_parameters(vars, global_params={}): if v[0]=='(' and v[-1]==')': l = markoutercomma(v[1:-1]).split('@,@') try: - params[n] = eval(v,g_params,params) + params[n] = eval(v, g_params, params) except Exception as msg: params[n] = v #print params - outmess('get_parameters: got "%s" on %s\n' % (msg,repr(v))) + outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v))) if isstring(vars[n]) and isinstance(params[n], int): params[n] = chr(params[n]) nl = n.lower() @@ -2052,30 +2052,30 @@ def get_parameters(vars, global_params={}): outmess('get_parameters:parameter %s does not have value?!\n'%(repr(n))) return params -def _eval_length(length,params): - if length in ['(:)','(*)','*']: +def _eval_length(length, params): + if length in ['(:)', '(*)', '*']: return '(*)' - return _eval_scalar(length,params) + return _eval_scalar(length, params) _is_kind_number = re.compile(r'\d+_').match -def _eval_scalar(value,params): +def _eval_scalar(value, params): if _is_kind_number(value): value = value.split('_')[0] try: - value = str(eval(value,{},params)) + value = str(eval(value, {}, params)) except (NameError, SyntaxError): return value except Exception as msg: errmess('"%s" in evaluating %r '\ '(available names: %s)\n' \ - % (msg,value,list(params.keys()))) + % (msg, value, list(params.keys()))) return value def analyzevars(block): global f90modulevars setmesstext(block) - implicitrules,attrrules=buildimplicitrules(block) + implicitrules, attrrules=buildimplicitrules(block) vars=copy.copy(block['vars']) if block['block']=='function' and block['name'] not in vars: vars[block['name']]={} @@ -2084,9 +2084,9 @@ def analyzevars(block): if 'attrspec' in block['vars']['']: gen=block['vars']['']['attrspec'] for n in list(vars.keys()): - for k in ['public','private']: + for k in ['public', 'private']: if k in gen: - vars[n]=setattrspec(vars[n],k) + vars[n]=setattrspec(vars[n], k) svars=[] args = block['args'] for a in args: @@ -2109,10 +2109,10 @@ def analyzevars(block): try: dep_matches[n] except KeyError: - dep_matches[n] = re.compile(r'.*\b%s\b'%(v),re.I).match + dep_matches[n] = re.compile(r'.*\b%s\b'%(v), re.I).match for n in svars: if n[0] in list(attrrules.keys()): - vars[n]=setattrspec(vars[n],attrrules[n[0]]) + vars[n]=setattrspec(vars[n], attrrules[n[0]]) if 'typespec' not in vars[n]: if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): if implicitrules: @@ -2124,15 +2124,15 @@ def analyzevars(block): vars[n][k]=implicitrules[ln0][k] elif k=='attrspec': for l in implicitrules[ln0][k]: - vars[n]=setattrspec(vars[n],l) + vars[n]=setattrspec(vars[n], l) elif n in block['args']: - outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(repr(n),block['name'])) + outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(repr(n), block['name'])) if 'charselector' in vars[n]: if 'len' in vars[n]['charselector']: l = vars[n]['charselector']['len'] try: - l = str(eval(l,{},params)) + l = str(eval(l, {}, params)) except: pass vars[n]['charselector']['len'] = l @@ -2141,7 +2141,7 @@ def analyzevars(block): if 'kind' in vars[n]['kindselector']: l = vars[n]['kindselector']['kind'] try: - l = str(eval(l,{},params)) + l = str(eval(l, {}, params)) except: pass vars[n]['kindselector']['kind'] = l @@ -2151,14 +2151,14 @@ def analyzevars(block): attr=vars[n]['attrspec'] attr.reverse() vars[n]['attrspec']=[] - dim,intent,depend,check,note=None,None,None,None,None + dim, intent, depend, check, note=None, None, None, None, None for a in attr: if a[:9]=='dimension': dim=(a[9:].strip())[1:-1] elif a[:6]=='intent': intent=(a[6:].strip())[1:-1] elif a[:6]=='depend': depend=(a[6:].strip())[1:-1] elif a[:5]=='check': check=(a[5:].strip())[1:-1] elif a[:4]=='note': note=(a[4:].strip())[1:-1] - else: vars[n]=setattrspec(vars[n],a) + else: vars[n]=setattrspec(vars[n], a) if intent: if 'intent' not in vars[n]: vars[n]['intent']=[] @@ -2167,8 +2167,8 @@ def analyzevars(block): vars[n]['intent'].append(c) intent=None if note: - note=note.replace('\\n\\n','\n\n') - note=note.replace('\\n ','\n') + note=note.replace('\\n\\n', '\n\n') + note=note.replace('\\n ', '\n') if 'note' not in vars[n]: vars[n]['note']=[note] else: @@ -2197,23 +2197,23 @@ def analyzevars(block): if d in params: d = str(params[d]) for p in list(params.keys()): - m = re.match(r'(?P<before>.*?)\b'+p+r'\b(?P<after>.*)',d,re.I) + m = re.match(r'(?P<before>.*?)\b'+p+r'\b(?P<after>.*)', d, re.I) if m: #outmess('analyzevars:replacing parameter %s in %s (dimension of %s) with %s\n'%(`p`,`d`,`n`,`params[p]`)) d = m.group('before')+str(params[p])+m.group('after') if d==star: dl = [star] else: - dl=markoutercomma(d,':').split('@:@') + dl=markoutercomma(d, ':').split('@:@') if len(dl)==2 and '*' in dl: # e.g. dimension(5:*) dl = ['*'] d = '*' - if len(dl)==1 and not dl[0]==star: dl = ['1',dl[0]] + if len(dl)==1 and not dl[0]==star: dl = ['1', dl[0]] if len(dl)==2: - d,v,di = getarrlen(dl,list(block['vars'].keys())) + d, v, di = getarrlen(dl, list(block['vars'].keys())) if d[:4] == '1 * ': d = d[4:] if di and di[-4:] == '/(1)': di = di[:-4] - if v: savelindims[d] = v,di + if v: savelindims[d] = v, di vars[n]['dimension'].append(d) if 'dimension' in vars[n]: if isintent_c(vars[n]): @@ -2226,9 +2226,9 @@ def analyzevars(block): if '*' in d: d = d['*'] errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'\ - %(d,n, + %(d, n, ','.join(vars[n]['dimension']), - n,','.join(vars[n]['dimension']+[d]))) + n, ','.join(vars[n]['dimension']+[d]))) vars[n]['dimension'].append(d) del vars[n]['charselector'] if 'intent' not in vars[n]: @@ -2253,14 +2253,14 @@ def analyzevars(block): #origd = d if d not in vars: if d in savelindims: - pd,ad='(',savelindims[d][1] + pd, ad='(', savelindims[d][1] d = savelindims[d][0] else: for r in block['args']: #for r in block['vars'].iterkeys(): if r not in vars: continue - if re.match(r'.*?\b'+r+r'\b',d,re.I): + if re.match(r'.*?\b'+r+r'\b', d, re.I): ddeps.append(r) if d in vars: if 'attrspec' in vars[d]: @@ -2275,22 +2275,22 @@ def analyzevars(block): and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): vars[d]['depend']=[n] if ni>1: - vars[d]['=']='%s%s(%s,%s)%s'% (pd,shape_macro,n,i,ad) + vars[d]['=']='%s%s(%s,%s)%s'% (pd, shape_macro, n, i, ad) else: - vars[d]['=']='%slen(%s)%s'% (pd,n,ad) + vars[d]['=']='%slen(%s)%s'% (pd, n, ad) # /---< no check if 1 and 'check' not in vars[d]: if ni>1: vars[d]['check']=['%s%s(%s,%i)%s==%s'\ - %(pd,shape_macro,n,i,ad,d)] + %(pd, shape_macro, n, i, ad, d)] else: - vars[d]['check']=['%slen(%s)%s>=%s'%(pd,n,ad,d)] + vars[d]['check']=['%slen(%s)%s>=%s'%(pd, n, ad, d)] if 'attrspec' not in vars[d]: vars[d]['attrspec']=['optional'] if ('optional' not in vars[d]['attrspec']) and\ ('required' not in vars[d]['attrspec']): vars[d]['attrspec'].append('optional') - elif d not in ['*',':']: + elif d not in ['*', ':']: #/----< no check #if ni>1: vars[n]['check'].append('shape(%s,%i)==%s'%(n,i,d)) #else: vars[n]['check'].append('len(%s)>=%s'%(n,d)) @@ -2325,11 +2325,11 @@ def analyzevars(block): vars[n]['attrspec'].append('optional') if 'depend' not in vars[n]: vars[n]['depend']=[] - for v,m in list(dep_matches.items()): + for v, m in list(dep_matches.items()): if m(vars[n]['=']): vars[n]['depend'].append(v) if not vars[n]['depend']: del vars[n]['depend'] if isscalar(vars[n]): - vars[n]['='] = _eval_scalar(vars[n]['='],params) + vars[n]['='] = _eval_scalar(vars[n]['='], params) for n in list(vars.keys()): if n==block['name']: # n is block name @@ -2337,38 +2337,38 @@ def analyzevars(block): block['note']=vars[n]['note'] if block['block']=='function': if 'result' in block and block['result'] in vars: - vars[n]=appenddecl(vars[n],vars[block['result']]) + vars[n]=appenddecl(vars[n], vars[block['result']]) if 'prefix' in block: pr=block['prefix']; ispure=0; isrec=1 - pr1=pr.replace('pure','') + pr1=pr.replace('pure', '') ispure=(not pr==pr1) - pr=pr1.replace('recursive','') + pr=pr1.replace('recursive', '') isrec=(not pr==pr1) m=typespattern[0].match(pr) if m: - typespec,selector,attr,edecl=cracktypespec0(m.group('this'),m.group('after')) - kindselect,charselect,typename=cracktypespec(typespec,selector) + typespec, selector, attr, edecl=cracktypespec0(m.group('this'), m.group('after')) + kindselect, charselect, typename=cracktypespec(typespec, selector) vars[n]['typespec']=typespec if kindselect: if 'kind' in kindselect: try: - kindselect['kind'] = eval(kindselect['kind'],{},params) + kindselect['kind'] = eval(kindselect['kind'], {}, params) except: pass vars[n]['kindselector']=kindselect if charselect: vars[n]['charselector']=charselect if typename: vars[n]['typename']=typename - if ispure: vars[n]=setattrspec(vars[n],'pure') - if isrec: vars[n]=setattrspec(vars[n],'recursive') + if ispure: vars[n]=setattrspec(vars[n], 'pure') + if isrec: vars[n]=setattrspec(vars[n], 'recursive') else: outmess('analyzevars: prefix (%s) were not used\n'%repr(block['prefix'])) - if not block['block'] in ['module','pythonmodule','python module','block data']: + if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: if 'commonvars' in block: neededvars=copy.copy(block['args']+block['commonvars']) else: neededvars=copy.copy(block['args']) for n in list(vars.keys()): - if l_or(isintent_callback,isintent_aux)(vars[n]): + if l_or(isintent_callback, isintent_aux)(vars[n]): neededvars.append(n) if 'entry' in block: neededvars.extend(list(block['entry'].keys())) @@ -2381,7 +2381,7 @@ def analyzevars(block): neededvars.append(block['result']) else: neededvars.append(block['name']) - if block['block'] in ['subroutine','function']: + if block['block'] in ['subroutine', 'function']: name = block['name'] if name in vars and 'intent' in vars[name]: block['intent'] = vars[name]['intent'] @@ -2392,13 +2392,13 @@ def analyzevars(block): del vars[n] return vars -analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z',re.I) +analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) def expr2name(a, block, args=[]): orig_a = a a_is_expr = not analyzeargs_re_1.match(a) if a_is_expr: # `a` is an expression - implicitrules,attrrules=buildimplicitrules(block) - at=determineexprtype(a,block['vars'],implicitrules) + implicitrules, attrrules=buildimplicitrules(block) + at=determineexprtype(a, block['vars'], implicitrules) na='e_' for c in a: c = c.lower() @@ -2423,12 +2423,12 @@ def expr2name(a, block, args=[]): else: block['vars'][a]={} if 'externals' in block and orig_a in block['externals']+block['interfaced']: - block['vars'][a]=setattrspec(block['vars'][a],'external') + block['vars'][a]=setattrspec(block['vars'][a], 'external') return a def analyzeargs(block): setmesstext(block) - implicitrules,attrrules=buildimplicitrules(block) + implicitrules, attrrules=buildimplicitrules(block) if 'args' not in block: block['args']=[] args=[] @@ -2437,7 +2437,7 @@ def analyzeargs(block): args.append(a) block['args']=args if 'entry' in block: - for k,args1 in list(block['entry'].items()): + for k, args1 in list(block['entry'].items()): for a in args1: if a not in block['vars']: block['vars'][a]={} @@ -2452,11 +2452,11 @@ def analyzeargs(block): block['vars'][block['result']]={} return block -determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z',re.I) -determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P<name>[\w]+)|)\Z',re.I) -determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\Z',re.I) -determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z',re.I) -determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z',re.I) +determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I) +determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P<name>[\w]+)|)\Z', re.I) +determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\Z', re.I) +determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) +determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I) def _ensure_exprdict(r): if isinstance(r, int): return {'typespec':'integer'} @@ -2484,18 +2484,18 @@ def determineexprtype(expr,vars,rules={}): if 'name' in m.groupdict() and m.group('name'): outmess('determineexprtype: selected kind types not supported (%s)\n'%repr(expr)) return {'typespec':'real'} - for op in ['+','-','*','/']: - for e in [x.strip() for x in markoutercomma(expr,comma=op).split('@'+op+'@')]: + for op in ['+', '-', '*', '/']: + for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@'+op+'@')]: if e in vars: return _ensure_exprdict(vars[e]) t={} if determineexprtype_re_4.match(expr): # in parenthesis - t=determineexprtype(expr[1:-1],vars,rules) + t=determineexprtype(expr[1:-1], vars, rules) else: m = determineexprtype_re_5.match(expr) if m: rn=m.group('name') - t=determineexprtype(m.group('name'),vars,rules) + t=determineexprtype(m.group('name'), vars, rules) if t and 'attrspec' in t: del t['attrspec'] if not t: @@ -2514,12 +2514,12 @@ def crack2fortrangen(block,tab='\n', as_interface=False): ret='' if isinstance(block, list): for g in block: - if g and g['block'] in ['function','subroutine']: + if g and g['block'] in ['function', 'subroutine']: if g['name'] in skipfuncs: continue if onlyfuncs and g['name'] not in onlyfuncs: continue - ret=ret+crack2fortrangen(g,tab,as_interface=as_interface) + ret=ret+crack2fortrangen(g, tab, as_interface=as_interface) return ret prefix='' name='' @@ -2540,20 +2540,20 @@ def crack2fortrangen(block,tab='\n', as_interface=False): f2pyenhancements = '' if 'f2pyenhancements' in block: for k in list(block['f2pyenhancements'].keys()): - f2pyenhancements = '%s%s%s %s'%(f2pyenhancements,tab+tabchar,k,block['f2pyenhancements'][k]) - intent_lst = block.get('intent',[])[:] + f2pyenhancements = '%s%s%s %s'%(f2pyenhancements, tab+tabchar, k, block['f2pyenhancements'][k]) + intent_lst = block.get('intent', [])[:] if blocktype=='function' and 'callback' in intent_lst: intent_lst.remove('callback') if intent_lst: f2pyenhancements = '%s%sintent(%s) %s'%\ - (f2pyenhancements,tab+tabchar, - ','.join(intent_lst),name) + (f2pyenhancements, tab+tabchar, + ','.join(intent_lst), name) use='' if 'use' in block: - use=use2fortran(block['use'],tab+tabchar) + use=use2fortran(block['use'], tab+tabchar) common='' if 'common' in block: - common=common2fortran(block['common'],tab+tabchar) + common=common2fortran(block['common'], tab+tabchar) if name=='unknown_interface': name='' result='' if 'result' in block: @@ -2562,35 +2562,35 @@ def crack2fortrangen(block,tab='\n', as_interface=False): argsl.append(block['result']) #if 'prefix' in block: # prefix=block['prefix']+' ' - body=crack2fortrangen(block['body'],tab+tabchar) - vars=vars2fortran(block,block['vars'],argsl,tab+tabchar, as_interface=as_interface) + body=crack2fortrangen(block['body'], tab+tabchar) + vars=vars2fortran(block, block['vars'], argsl, tab+tabchar, as_interface=as_interface) mess='' if 'from' in block and not as_interface: mess='! in %s'%block['from'] if 'entry' in block: entry_stmts = '' - for k,i in list(block['entry'].items()): + for k, i in list(block['entry'].items()): entry_stmts = '%s%sentry %s(%s)' \ - % (entry_stmts,tab+tabchar,k,','.join(i)) + % (entry_stmts, tab+tabchar, k, ','.join(i)) body = body + entry_stmts if blocktype=='block data' and name=='_BLOCK_DATA_': name = '' - ret='%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s'%(tab,prefix,blocktype,name,args,result,mess,f2pyenhancements,use,vars,common,body,tab,blocktype,name) + ret='%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s'%(tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) return ret def common2fortran(common,tab=''): ret='' for k in list(common.keys()): if k=='_BLNK_': - ret='%s%scommon %s'%(ret,tab,','.join(common[k])) + ret='%s%scommon %s'%(ret, tab, ','.join(common[k])) else: - ret='%s%scommon /%s/ %s'%(ret,tab,k,','.join(common[k])) + ret='%s%scommon /%s/ %s'%(ret, tab, k, ','.join(common[k])) return ret def use2fortran(use,tab=''): ret='' for m in list(use.keys()): - ret='%s%suse %s,'%(ret,tab,m) + ret='%s%suse %s,'%(ret, tab, m) if use[m]=={}: if ret and ret[-1]==',': ret=ret[:-1] continue @@ -2600,9 +2600,9 @@ def use2fortran(use,tab=''): c=' ' for k in list(use[m]['map'].keys()): if k==use[m]['map'][k]: - ret='%s%s%s'%(ret,c,k); c=',' + ret='%s%s%s'%(ret, c, k); c=',' else: - ret='%s%s%s=>%s'%(ret,c,k,use[m]['map'][k]); c=',' + ret='%s%s%s=>%s'%(ret, c, k, use[m]['map'][k]); c=',' if ret and ret[-1]==',': ret=ret[:-1] return ret @@ -2647,13 +2647,13 @@ def vars2fortran(block,vars,args,tab='', as_interface=False): if 'depend' in vars[a]: for d in vars[a]['depend']: if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: - errmess('vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n'%(a,d)) + errmess('vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n'%(a, d)) if 'externals' in block and a in block['externals']: if isintent_callback(vars[a]): - ret='%s%sintent(callback) %s'%(ret,tab,a) - ret='%s%sexternal %s'%(ret,tab,a) + ret='%s%sintent(callback) %s'%(ret, tab, a) + ret='%s%sexternal %s'%(ret, tab, a) if isoptional(vars[a]): - ret='%s%soptional %s'%(ret,tab,a) + ret='%s%soptional %s'%(ret, tab, a) if a in vars and 'typespec' not in vars[a]: continue cont=1 @@ -2671,33 +2671,33 @@ def vars2fortran(block,vars,args,tab='', as_interface=False): if 'typespec' not in vars[a]: if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: if a in args: - ret='%s%sexternal %s'%(ret,tab,a) + ret='%s%sexternal %s'%(ret, tab, a) continue show(vars[a]) outmess('vars2fortran: No typespec for argument "%s".\n'%a) continue vardef=vars[a]['typespec'] if vardef=='type' and 'typename' in vars[a]: - vardef='%s(%s)'%(vardef,vars[a]['typename']) + vardef='%s(%s)'%(vardef, vars[a]['typename']) selector={} if 'kindselector' in vars[a]: selector=vars[a]['kindselector'] elif 'charselector' in vars[a]: selector=vars[a]['charselector'] if '*' in selector: - if selector['*'] in ['*',':']: - vardef='%s*(%s)'%(vardef,selector['*']) + if selector['*'] in ['*', ':']: + vardef='%s*(%s)'%(vardef, selector['*']) else: - vardef='%s*%s'%(vardef,selector['*']) + vardef='%s*%s'%(vardef, selector['*']) else: if 'len' in selector: - vardef='%s(len=%s'%(vardef,selector['len']) + vardef='%s(len=%s'%(vardef, selector['len']) if 'kind' in selector: - vardef='%s,kind=%s)'%(vardef,selector['kind']) + vardef='%s,kind=%s)'%(vardef, selector['kind']) else: vardef='%s)'%(vardef) elif 'kind' in selector: - vardef='%s(kind=%s)'%(vardef,selector['kind']) + vardef='%s(kind=%s)'%(vardef, selector['kind']) c=' ' if 'attrspec' in vars[a]: attr=[] @@ -2705,47 +2705,47 @@ def vars2fortran(block,vars,args,tab='', as_interface=False): if l not in ['external']: attr.append(l) if attr: - vardef='%s, %s'%(vardef,','.join(attr)) + vardef='%s, %s'%(vardef, ','.join(attr)) c=',' if 'dimension' in vars[a]: # if not isintent_c(vars[a]): # vars[a]['dimension'].reverse() - vardef='%s%sdimension(%s)'%(vardef,c,','.join(vars[a]['dimension'])) + vardef='%s%sdimension(%s)'%(vardef, c, ','.join(vars[a]['dimension'])) c=',' if 'intent' in vars[a]: lst = true_intent_list(vars[a]) if lst: - vardef='%s%sintent(%s)'%(vardef,c,','.join(lst)) + vardef='%s%sintent(%s)'%(vardef, c, ','.join(lst)) c=',' if 'check' in vars[a]: - vardef='%s%scheck(%s)'%(vardef,c,','.join(vars[a]['check'])) + vardef='%s%scheck(%s)'%(vardef, c, ','.join(vars[a]['check'])) c=',' if 'depend' in vars[a]: - vardef='%s%sdepend(%s)'%(vardef,c,','.join(vars[a]['depend'])) + vardef='%s%sdepend(%s)'%(vardef, c, ','.join(vars[a]['depend'])) c=',' if '=' in vars[a]: v = vars[a]['='] - if vars[a]['typespec'] in ['complex','double complex']: + if vars[a]['typespec'] in ['complex', 'double complex']: try: v = eval(v) - v = '(%s,%s)' % (v.real,v.imag) + v = '(%s,%s)' % (v.real, v.imag) except: pass - vardef='%s :: %s=%s'%(vardef,a,v) + vardef='%s :: %s=%s'%(vardef, a, v) else: - vardef='%s :: %s'%(vardef,a) - ret='%s%s%s'%(ret,tab,vardef) + vardef='%s :: %s'%(vardef, a) + ret='%s%s%s'%(ret, tab, vardef) return ret ###### def crackfortran(files): global usermodules - outmess('Reading fortran codes...\n',0) - readfortrancode(files,crackline) - outmess('Post-processing...\n',0) + outmess('Reading fortran codes...\n', 0) + readfortrancode(files, crackline) + outmess('Post-processing...\n', 0) usermodules=[] postlist=postcrack(grouplist[0]) - outmess('Post-processing (stage 2)...\n',0) + outmess('Post-processing (stage 2)...\n', 0) postlist=postcrack2(postlist) return usermodules+postlist @@ -2778,7 +2778,7 @@ if __name__ == "__main__": quiet=0 elif l=='-fix': if strictf77: - outmess('Use option -f90 before -fix if Fortran 90 code is in fix form.\n',0) + outmess('Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) skipemptyends=1 sourcecodeform='fix' elif l=='-skipemptyends': @@ -2820,13 +2820,13 @@ if __name__ == "__main__": that should not need one (expect if you are scanning F90 code for non module blocks but then you should use flag -skipemptyends and also be sure that the files do not contain programs without program statement). -""",0) +""", 0) - postlist=crackfortran(files,funcs) + postlist=crackfortran(files, funcs) if pyffilename: - outmess('Writing fortran code to file %s\n'%repr(pyffilename),0) + outmess('Writing fortran code to file %s\n'%repr(pyffilename), 0) pyf=crack2fortran(postlist) - f=open(pyffilename,'w') + f=open(pyffilename, 'w') f.write(pyf) f.close() if showblocklist: diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index f00c3f0ac..68d7e48d2 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -37,7 +37,7 @@ def run(): from numpy.f2py import f2py2e has_f2py2e = 1 except ImportError: - print('Failed to import f2py2e:',sys.exc_info()[1]) + print('Failed to import f2py2e:', sys.exc_info()[1]) has_f2py2e = 0 try: @@ -48,7 +48,7 @@ def run(): import numpy_distutils has_numpy_distutils = 1 except ImportError: - print('Failed to import numpy_distutils:',sys.exc_info()[1]) + print('Failed to import numpy_distutils:', sys.exc_info()[1]) has_numpy_distutils = 0 if has_newnumpy: @@ -62,9 +62,9 @@ def run(): if has_f2py2e: try: print('Found f2py2e version %r in %s' % \ - (f2py2e.__version__.version,f2py2e.__file__)) + (f2py2e.__version__.version, f2py2e.__file__)) except Exception as msg: - print('error:',msg) + print('error:', msg) print('------') if has_numpy_distutils: @@ -79,7 +79,7 @@ def run(): numpy_distutils.__file__)) print('------') except Exception as msg: - print('error:',msg) + print('error:', msg) print('------') try: if has_numpy_distutils == 1: @@ -93,10 +93,10 @@ def run(): compiler_class(verbose=1).is_available() print('------') except Exception as msg: - print('error:',msg) + print('error:', msg) print('------') except Exception as msg: - print('error:',msg,'(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') + print('error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') print('------') try: if has_numpy_distutils == 2: @@ -112,10 +112,10 @@ def run(): fcompiler.show_fcompilers() print('------') except Exception as msg: - print('error:',msg) + print('error:', msg) print('------') except Exception as msg: - print('error:',msg) + print('error:', msg) print('------') try: if has_numpy_distutils == 2: @@ -130,7 +130,7 @@ def run(): print('ok') print('------') except Exception as msg: - print('error:',msg,'(ignore it)') + print('error:', msg, '(ignore it)') print('Importing numpy_distutils.cpuinfo ...', end=' ') from numpy_distutils.cpuinfo import cpuinfo print('ok') @@ -138,11 +138,11 @@ def run(): cpu = cpuinfo() print('CPU information:', end=' ') for name in dir(cpuinfo): - if name[0]=='_' and name[1]!='_' and getattr(cpu,name[1:])(): + if name[0]=='_' and name[1]!='_' and getattr(cpu, name[1:])(): print(name[1:], end=' ') print('------') except Exception as msg: - print('error:',msg) + print('error:', msg) print('------') os.chdir(_path) if __name__ == "__main__": diff --git a/numpy/f2py/doc/Release-2.x.txt b/numpy/f2py/doc/Release-2.x.txt index 807eb0ca8..2085cb1be 100644 --- a/numpy/f2py/doc/Release-2.x.txt +++ b/numpy/f2py/doc/Release-2.x.txt @@ -14,13 +14,13 @@ C expertise is required for using this tool. Features include: *** All basic Fortran types are supported: - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], + integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], character[ | *(*) | *1 | *2 | *3 | ... ] real[ | *4 | *8 | *16 ], double precision, complex[ | *8 | *16 | *32 ] *** Multi-dimensional arrays of (almost) all basic types. - Dimension specifications: + Dimension specifications: <dim> | <start>:<end> | * | : *** Supported attributes: @@ -63,7 +63,7 @@ Features include: *** f2py users list is available for support, feedback, etc. -More information about f2py, see +More information about f2py, see http://cens.ioc.ee/projects/f2py2e/ diff --git a/numpy/f2py/doc/Release-3.x.txt b/numpy/f2py/doc/Release-3.x.txt index 940771015..ddb93b9fd 100644 --- a/numpy/f2py/doc/Release-3.x.txt +++ b/numpy/f2py/doc/Release-3.x.txt @@ -14,13 +14,13 @@ C expertise is required for using this tool. Features include: *** All basic Fortran types are supported: - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], + integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], character[ | *(*) | *1 | *2 | *3 | ... ] real[ | *4 | *8 | *16 ], double precision, complex[ | *8 | *16 | *32 ] *** Multi-dimensional arrays of (almost) all basic types. - Dimension specifications: + Dimension specifications: <dim> | <start>:<end> | * | : *** Supported attributes and statements: @@ -33,7 +33,7 @@ Features include: NEW: intent(c), threadsafe, fortranname *** Calling Fortran 77/90/95 subroutines and functions. Also - Fortran 90/95 module subroutines are supported. Internal + Fortran 90/95 module subroutines are supported. Internal initialization of optional arguments. *** Accessing COMMON blocks from Python. @@ -55,7 +55,7 @@ NEW: Accessing Fortran 90/95 module data. dependencies, etc. NEW: * Automatically generates setup_<modulename>.py for building - extension modules using tools from distutils and + extension modules using tools from distutils and fortran_support module (SciPy). *** Automatically generates Makefile for compiling Fortran and C @@ -72,7 +72,7 @@ NEW: * Installation with distutils. *** And finally, many bugs are fixed. -More information about f2py, see +More information about f2py, see http://cens.ioc.ee/projects/f2py2e/ diff --git a/numpy/f2py/doc/Release-4.x.txt b/numpy/f2py/doc/Release-4.x.txt index ed071a0cb..d490dcb7a 100644 --- a/numpy/f2py/doc/Release-4.x.txt +++ b/numpy/f2py/doc/Release-4.x.txt @@ -18,13 +18,13 @@ New features: Features include: *** All basic Fortran types are supported: - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], + integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], character[ | *(*) | *1 | *2 | *3 | ... ] real[ | *4 | *8 | *16 ], double precision, complex[ | *8 | *16 | *32 ] *** Multi-dimensional arrays of (almost) all basic types. - Dimension specifications: + Dimension specifications: <dim> | <start>:<end> | * | : *** Supported attributes and statements: @@ -37,7 +37,7 @@ Features include: intent(c), threadsafe, fortranname *** Calling Fortran 77/90/95 subroutines and functions. Also - Fortran 90/95 module subroutines are supported. Internal + Fortran 90/95 module subroutines are supported. Internal initialization of optional arguments. *** Accessing COMMON blocks from Python. @@ -59,7 +59,7 @@ Features include: dependencies, etc. *** Automatically generates setup_<modulename>.py for building - extension modules using tools from distutils and + extension modules using tools from distutils and fortran_support module (SciPy). *** Automatically generates Makefile for compiling Fortran and C @@ -76,7 +76,7 @@ Features include: *** And finally, many bugs are fixed. -More information about f2py, see +More information about f2py, see http://cens.ioc.ee/projects/f2py2e/ diff --git a/numpy/f2py/doc/collectinput.py b/numpy/f2py/doc/collectinput.py index 317fde5b2..2585dae49 100755 --- a/numpy/f2py/doc/collectinput.py +++ b/numpy/f2py/doc/collectinput.py @@ -40,7 +40,7 @@ except: try: fi=sys.argv[1] except: fi=() if not stdoutflag: - sys.stdout=open(fn,'w') + sys.stdout=open(fn, 'w') nonverb=r'[\w\s\\&=\^\*\.\{\(\)\[\?\+\$/]*(?!\\verb.)' input=re.compile(nonverb+r'\\(input|include)\*?\s*\{?.*}?') @@ -59,12 +59,12 @@ for l in fileinput.input(fi): if l[-1]=='}': l=l[:-1] i=m.end()-2 sys.stderr.write('>>>>>>') - while i>-1 and (l[i] not in [' ','{']): i=i-1 + while i>-1 and (l[i] not in [' ', '{']): i=i-1 if i>-1: fn=l[i+1:] - try: f=open(fn,'r'); flag=1; f.close() + try: f=open(fn, 'r'); flag=1; f.close() except: - try: f=open(fn+'.tex','r'); flag=1;fn=fn+'.tex'; f.close() + try: f=open(fn+'.tex', 'r'); flag=1;fn=fn+'.tex'; f.close() except: flag=0 if flag==0: sys.stderr.write('Could not open a file: '+fn+'\n') diff --git a/numpy/f2py/doc/f2python9-final/README.txt b/numpy/f2py/doc/f2python9-final/README.txt index b907216b6..2ce8e393a 100644 --- a/numpy/f2py/doc/f2python9-final/README.txt +++ b/numpy/f2py/doc/f2python9-final/README.txt @@ -23,9 +23,9 @@ Note that this file includes the following JPG images PS: The HTML file f2python9.html is generated using TTH (http://hutchinson.belmont.ma.us/tth/) -from the LaTeX source file `python9.tex'. The source can be found in the +from the LaTeX source file `python9.tex'. The source can be found in the src/ -directory. This directory contains also the following EPS files +directory. This directory contains also the following EPS files flow.eps structure.eps aerostructure.eps diff --git a/numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt b/numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt index ae1545718..d4188a91b 100644 --- a/numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt +++ b/numpy/f2py/doc/f2python9-final/src/examples/exp1mess.txt @@ -13,5 +13,5 @@ Building modules... l,u = exp1([n]) Wrote C/API module "foo" to file "foomodule.c" Documentation is saved to file "foomodule.tex" -Run GNU make to build shared modules: +Run GNU make to build shared modules: gmake -f Makefile-<modulename> [test] diff --git a/numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt b/numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt index 9bec9198e..5ae75ebd1 100644 --- a/numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt +++ b/numpy/f2py/doc/f2python9-final/src/examples/exp1session.txt @@ -17,4 +17,4 @@ Return objects: >>> print l,u [ 517656. 190435.] [ 566827. 208524.] >>> print l[0]/l[1], u[0]/u[1]-l[0]/l[1] -2.71828182845 1.36437527942e-11
\ No newline at end of file +2.71828182845 1.36437527942e-11 diff --git a/numpy/f2py/doc/multiarray/array_from_pyobj.c b/numpy/f2py/doc/multiarray/array_from_pyobj.c index 03d4aacf6..237d16dbc 100644 --- a/numpy/f2py/doc/multiarray/array_from_pyobj.c +++ b/numpy/f2py/doc/multiarray/array_from_pyobj.c @@ -311,9 +311,9 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,int *dims) if (arr->dimensions[i]!=dims[i]) { fprintf(stderr,"%d-th dimension must be fixed to %d but got %d\n", i,dims[i],arr->dimensions[i]); - return 1; + return 1; } - if (!dims[i]) dims[i] = 1; + if (!dims[i]) dims[i] = 1; } else dims[i] = arr->dimensions[i]; } diff --git a/numpy/f2py/doc/multiarray/fortran_array_from_pyobj.txt b/numpy/f2py/doc/multiarray/fortran_array_from_pyobj.txt index c7b945c84..e351e8e89 100644 --- a/numpy/f2py/doc/multiarray/fortran_array_from_pyobj.txt +++ b/numpy/f2py/doc/multiarray/fortran_array_from_pyobj.txt @@ -118,7 +118,7 @@ Statement of the problem: ========================= Consider a M-by-N matrix A of integers, where M and N are the number A -rows and columns, respectively. +rows and columns, respectively. In Fortran language, the storing array of this matrix can be defined as follows: @@ -192,7 +192,7 @@ if type(obj) is ArrayType: set_transpose_strides(obj) # lazy-transpose Py_INCREF(obj); return obj - set_transpose_strides(obj) + set_transpose_strides(obj) if is_contiguous(obj): set_transpose_strides(obj) Py_INCREF(obj); @@ -281,4 +281,4 @@ else: 14 January, 2002 -Pearu Peterson <pearu@cens.ioc.ee>
\ No newline at end of file +Pearu Peterson <pearu@cens.ioc.ee> diff --git a/numpy/f2py/doc/multiarray/transpose.txt b/numpy/f2py/doc/multiarray/transpose.txt index a8d41e6df..925e7a399 100644 --- a/numpy/f2py/doc/multiarray/transpose.txt +++ b/numpy/f2py/doc/multiarray/transpose.txt @@ -1,7 +1,7 @@ From: Phil Garner (garner@signal.dra.hmg.gb) - Subject: In place matrix transpose + Subject: In place matrix transpose Newsgroups: sci.math.num-analysis - Date: 1993-08-05 06:35:06 PST + Date: 1993-08-05 06:35:06 PST Someone was talking about matrix transposes earlier on. It's a @@ -161,16 +161,16 @@ int trans(scalar *a, unsigned m, unsigned n, int *move, int iwrk) `-0---0-' `-0--0-' `--OO-------------------O-----' `---0---' `-0---0-' From: Murray Dow (mld900@anusf.anu.edu.au) - Subject: Re: In place matrix transpose + Subject: Re: In place matrix transpose Newsgroups: sci.math.num-analysis - Date: 1993-08-09 19:45:57 PST + Date: 1993-08-09 19:45:57 PST In article <23qmp3INN3gl@mentor.dra.hmg.gb>, garner@signal.dra.hmg.gb (Phil Garner) writes: |> Someone was talking about matrix transposes earlier on. It's a |> curious subject. I found that an in-place transpose is about 12 times |> slower than the trivial copying method. -|> +|> Algorithm 380 from CACM is sloweer than ALG 467. Here are my times from a VP2200 vector computer. Note that the CACM algorithms are scalar. @@ -188,7 +188,7 @@ My Alg Matrix copy Conclusions: dont use Alg 380 from Netlib. If you have the available memory, do a matrix copy. If you don't have the memory, I will send you my algorithm when I have published it. --- +-- Murray Dow GPO Box 4 Canberra ACT 2601 Australia Supercomputer Facility Phone: +61 6 2495028 Australian National University Fax: +61 6 2473425 @@ -197,16 +197,16 @@ mld900@anusf.anu.edu.au ============================================================================= From: Mark Smotherman (mark@hubcap.clemson.edu) - Subject: Matrix transpose benchmark [was Re: MIPS R8000 == TFP?] + Subject: Matrix transpose benchmark [was Re: MIPS R8000 == TFP?] Newsgroups: comp.arch, comp.benchmarks, comp.sys.super - Date: 1994-07-01 06:35:51 PST + Date: 1994-07-01 06:35:51 PST mccalpin@perelandra.cms.udel.edu (John D. McCalpin) writes: -> ->Of course, these results are all for the naive algorithm. I would be ->interested to see what an efficient blocked algorithm looks like. +> +>Of course, these results are all for the naive algorithm. I would be +>interested to see what an efficient blocked algorithm looks like. >Anyone care to offer one? There is clearly a lot of performance >to be gained by the effort.... @@ -225,15 +225,15 @@ Enjoy! Matrix Transpose Generator Copyright 1993, Dept. of Computer Science, Clemson University - + Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appears in all copies. - + Clemson University and its Dept. of Computer Science make no representations about the suitability of this software for any purpose. It is provided "as is" without express or implied warranty. - + Original author: Mark Smotherman -------------------------------------------------------------------------*/ @@ -624,14 +624,14 @@ void processstmt() } } } --- +-- Mark Smotherman, Computer Science Dept., Clemson University, Clemson, SC ======================================================================= From: has (h.genceli@bre.com) - Subject: transpose of a nxm matrix stored in a vector !!! + Subject: transpose of a nxm matrix stored in a vector !!! Newsgroups: sci.math.num-analysis - Date: 2000/07/25 + Date: 2000/07/25 If I have a matrix nrows x ncols, I can store it in a vector. @@ -657,9 +657,9 @@ can give me a lead here, thanks. Has From: wei-choon ng (wng@ux8.cso.uiuc.edu) - Subject: Re: transpose of a nxm matrix stored in a vector !!! + Subject: Re: transpose of a nxm matrix stored in a vector !!! Newsgroups: sci.math.num-analysis - Date: 2000/07/25 + Date: 2000/07/25 has <h.genceli@bre.com> wrote: @@ -674,12 +674,12 @@ no need to explicitly store the transpose matrix in another array and doubling the storage! W.C. --- +-- From: Robin Becker (robin@jessikat.fsnet.co.uk) - Subject: Re: transpose of a nxm matrix stored in a vector !!! + Subject: Re: transpose of a nxm matrix stored in a vector !!! Newsgroups: sci.math.num-analysis - Date: 2000/07/25 + Date: 2000/07/25 In article <snr532fo3j1180@corp.supernews.com>, has <h.genceli@bre.com> @@ -737,13 +737,13 @@ void dmx_transpose(unsigned n, unsigned m, double* a, double* b) } } } --- +-- Robin Becker From: E. Robert Tisdale (edwin@netwood.net) - Subject: Re: transpose of a nxm matrix stored in a vector !!! + Subject: Re: transpose of a nxm matrix stored in a vector !!! Newsgroups: sci.math.num-analysis - Date: 2000/07/25 + Date: 2000/07/25 Take a look at @@ -798,14 +798,14 @@ The C++ Scalar, Vector, Matrix and Tensor class library Read the rest of this message... (50 more lines) From: Victor Eijkhout (eijkhout@disco.cs.utk.edu) - Subject: Re: transpose of a nxm matrix stored in a vector !!! + Subject: Re: transpose of a nxm matrix stored in a vector !!! Newsgroups: sci.math.num-analysis - Date: 2000/07/25 + Date: 2000/07/25 "Alan Miller" <amiller @ vic.bigpond.net.au> writes: -> The attached routine does an in situ transpose. +> The attached routine does an in situ transpose. > begin 666 Dtip.f90 > M4U5"4D]55$E.12!D=&EP("AA+"!N,2P@;C(L(&YD:6TI#0HA("TM+2TM+2TM @@ -814,22 +814,22 @@ Hm. F90? You're not silently allocating a temporary I hope? (Why did you have to encode this? Now I have to save, this decode, ... and all for plain ascii?) --- +-- Victor Eijkhout "When I was coming up, [..] we knew exactly who the they were. It was us -versus them, and it was clear who the them was were. Today, we are not +versus them, and it was clear who the them was were. Today, we are not so sure who the they are, but we know they're there." [G.W. Bush] From: Alan Miller (amiller_@_vic.bigpond.net.au) - Subject: Re: transpose of a nxm matrix stored in a vector !!! + Subject: Re: transpose of a nxm matrix stored in a vector !!! Newsgroups: sci.math.num-analysis - Date: 2000/07/25 + Date: 2000/07/25 Victor Eijkhout wrote in message ... >"Alan Miller" <amiller @ vic.bigpond.net.au> writes: > ->> The attached routine does an in situ transpose. +>> The attached routine does an in situ transpose. >> begin 666 Dtip.f90 >> M4U5"4D]55$E.12!D=&EP("AA+"!N,2P@;C(L(&YD:6TI#0HA("TM+2TM+2TM > @@ -862,13 +862,13 @@ http://users.bigpond.net.au/amiller/ ================================================================= From: Darran Edmundson (dedmunds@sfu.ca) - Subject: array reordering algorithm? + Subject: array reordering algorithm? Newsgroups: sci.math.num-analysis - Date: 1995/04/30 + Date: 1995/04/30 -A code I've written refers to a complex array as two separate real arrays. -However, I have a canned subroutine which expects a single array where the +A code I've written refers to a complex array as two separate real arrays. +However, I have a canned subroutine which expects a single array where the real and imaginary values alternate. Essentially I have a case of mismatched data structures, yet for reasons that I'd rather not go into, I'm stuck with them. @@ -878,7 +878,7 @@ space, what I need is a porting subroutine that remaps the data from one format to the other - using as little space as possible. I think of the problem as follows. Imagine an array of dimension 10 containing -the values 1,3,5,7,9,2,4,6,8,10 in this order. +the values 1,3,5,7,9,2,4,6,8,10 in this order. A(1) / 1 \ C(1) A(2) | 3 | C(2) @@ -892,18 +892,18 @@ the values 1,3,5,7,9,2,4,6,8,10 in this order. B(4) | 8 | C(9) B(5) \ 10 / C(10) -Given that I know this initial pattern, I want to sort the array C in-place *without -making comparisons*. That is, the algorithm can only depend on the initial +Given that I know this initial pattern, I want to sort the array C in-place *without +making comparisons*. That is, the algorithm can only depend on the initial knowledge of the pattern. Do you see what a sort is going to do? It will make the A and B arrays alternate, i.e. C(1)=A(1), C(2)=B(1), C(3)=A(2), -C(4)=B(2), etc. It's not a real sort though because I can't actually refer to the +C(4)=B(2), etc. It's not a real sort though because I can't actually refer to the values above (i.e. no comparisons) because A and B will be holding real data, not this contrived pattern. The pattern above exists though - it's the natural ordering in memory of A and B. -Either pair swapping only or a small amount of workspace can be used. The -in-place is important - imagine scaling this problem up to an -array of 32 or 64 million double precision values and you can easily see how +Either pair swapping only or a small amount of workspace can be used. The +in-place is important - imagine scaling this problem up to an +array of 32 or 64 million double precision values and you can easily see how duplicating the array is not a feasible solution. Any ideas? I've been stumped on this for a day and a half now. @@ -912,9 +912,9 @@ Darran Edmundson dedmunds@sfu.ca From: Roger Critchlow (rec@elf115.elf.org) - Subject: Re: array reordering algorithm? + Subject: Re: array reordering algorithm? Newsgroups: sci.math.num-analysis - Date: 1995/04/30 + Date: 1995/04/30 Any ideas? I've been stumped on this for a day and a half now. @@ -1026,7 +1026,7 @@ int test_matrix_cols(a, m, n) TYPE *a; int m, n; o += a[i*n+j] != j*m+i; return o; } - + /* print a matrix */ void print_matrix(a, m, n) TYPE *a; int m, n; { @@ -1121,7 +1121,6 @@ main(argc, argv) int argc; char *argv[]; return 0; } - + #endif #endif - diff --git a/numpy/f2py/doc/multiarrays.txt b/numpy/f2py/doc/multiarrays.txt index 704208976..75aeaab9a 100644 --- a/numpy/f2py/doc/multiarrays.txt +++ b/numpy/f2py/doc/multiarrays.txt @@ -89,7 +89,7 @@ subroutine foo(l,m,n,a) integer :: n real*8 dimension(l,m,n),check(rank(a)==3),depend(l,m,n), & check(shape(a,2)==l,shape(a,1)==m,shape(a,0)==n):: a - optional a + optional a end Note again that the array a must depend on l,m,n. Then the array a will be allocated in the Python C/API module. Not also that @@ -117,4 +117,3 @@ wrapping Fortran multidimensional arrays with f2py2e. Regards, Pearu - diff --git a/numpy/f2py/doc/using_F_compiler.txt b/numpy/f2py/doc/using_F_compiler.txt index 3067f0776..63bb0d68c 100644 --- a/numpy/f2py/doc/using_F_compiler.txt +++ b/numpy/f2py/doc/using_F_compiler.txt @@ -46,7 +46,7 @@ Required arguments: >>> from Numeric import array >>> a = array(3) ->>> fun.foo.bar(a) +>>> fun.foo.bar(a) Hello from foo.bar a= 3 a= 8 @@ -54,7 +54,7 @@ Required arguments: 8 >>> -This works nicely with all supported Fortran compilers. +This works nicely with all supported Fortran compilers. However, the F compiler (http://www.fortran.com/F/compilers.html) is an exception. Namely, the F compiler is designed to recognize only @@ -117,7 +117,7 @@ value of `a'. 4) Build the extension module f2py -c foo.pyf foo.o --fcompiler=Gnu /opt/F/lib/quickfit.o \ - /opt/F/lib/libf96.a + /opt/F/lib/libf96.a This will create the extension module foo.so into the current directory. Notice that you must use Gnu compiler (gcc) for linking. @@ -137,7 +137,7 @@ Return objects: >>> foo.bar(3) 8 ->>> +>>> Notice that the F compiled module procedures are called as ordinary external procedures. Also I/O seems to be lacking for F compiled diff --git a/numpy/f2py/docs/FAQ.txt b/numpy/f2py/docs/FAQ.txt index 416560e92..4b50933cf 100644 --- a/numpy/f2py/docs/FAQ.txt +++ b/numpy/f2py/docs/FAQ.txt @@ -238,7 +238,7 @@ constructs in future. But note that the task in non-trivial and may require the next edition of F2PY for which I don't have resources to work with at the moment. -Jeffrey Hagelberg from LLNL has made progress on adding +Jeffrey Hagelberg from LLNL has made progress on adding support for derived types to f2py. He writes: At this point, I have a version of f2py that supports derived types @@ -543,7 +543,7 @@ __ http://www.met.ed.ac.uk/~cory/PyARTS/ + `Python interface to PSPLINE`__, a collection of Spline and Hermite interpolation tools for 1D, 2D, and 3D datasets on - rectilinear grids. + rectilinear grids. __ http://pypspline.sourceforge.net @@ -572,11 +572,11 @@ Here are some comments people have posted to f2py mailing list and c.l.py: + Fernando Perez: Anyway, many many thanks for this amazing tool. - I haven't used pyfort, but I can definitely vouch for the amazing quality of - f2py. And since f2py is actively used by numpy, it won't go unmaintained. + I haven't used pyfort, but I can definitely vouch for the amazing quality of + f2py. And since f2py is actively used by numpy, it won't go unmaintained. It's quite impressive, and very easy to use. -+ Kevin Mueller: First off, thanks to those responsible for F2PY; ++ Kevin Mueller: First off, thanks to those responsible for F2PY; its been an integral tool of my research for years now. + David Linke: Best regards and thanks for the great tool! diff --git a/numpy/f2py/docs/HISTORY.txt b/numpy/f2py/docs/HISTORY.txt index 077189ee2..4326e4852 100644 --- a/numpy/f2py/docs/HISTORY.txt +++ b/numpy/f2py/docs/HISTORY.txt @@ -38,7 +38,7 @@ Release 2.46.243 - Fixed missing need kw error. - Fixed getting callback non-existing extra arguments. - - External callback functions and extra_args can be set via + - External callback functions and extra_args can be set via ext.module namespace. - Avoid crash when external callback function is not set. @@ -46,7 +46,7 @@ Release 2.46.243 - Enabled ``intent(out)`` for ``intent(aux)`` non-complex scalars. - Fixed splitting lines in F90 fixed form mode. - - Fixed FORTRANAME typo, relevant when wrapping scalar functions with + - Fixed FORTRANAME typo, relevant when wrapping scalar functions with ``--no-wrap-functions``. - Improved failure handling for callback functions. - Fixed bug in writting F90 wrapper functions when a line length @@ -65,8 +65,8 @@ Release 2.46.243 * crackfortran.py - - Introduced parent_block key. Get ``use`` statements recursively - from parent blocks. + - Introduced parent_block key. Get ``use`` statements recursively + from parent blocks. - Apply parameter values to kindselectors. - Fixed bug evaluating ``selected_int_kind`` function. - Ignore Name and Syntax errors when evaluating scalars. @@ -135,7 +135,7 @@ Release 2.45.241_1926 functional at the moment. - Introduced ``intent(aux)`` attribute. Useful to save a value of a parameter to auxiliary C variable. Note that ``intent(aux)`` - implies ``intent(c)``. + implies ``intent(c)``. - Added ``usercode`` section. When ``usercode`` is used in ``python module`` block twise then the contents of the second multi-line block is inserted after the definition of external routines. @@ -173,7 +173,7 @@ Release 2.43.239_1831 * fortranobject.c - Patch to make PyArray_CanCastSafely safe on 64-bit machines. - Fixes incorrect results when passing ``array('l')`` to + Fixes incorrect results when passing ``array('l')`` to ``real*8 intent(in,out,overwrite)`` arguments. * rules.py @@ -278,14 +278,14 @@ Release 2.43.239_1806 - Added support for multiple statements in one line (separated with semicolon). - Impl. get_useparameters function for using parameter values from other f90 modules. - - Applied Bertholds patch to fix bug in evaluating expressions + - Applied Bertholds patch to fix bug in evaluating expressions like ``1.d0/dvar``. - Fixed bug in reading string parameters. - Evaluating parameters in charselector. Code cleanup. - Using F90 module parameters to resolve kindselectors. - Made the evaluation of module data init-expression more robust. - Support for ``entry`` statement. - - Fixed ``determineexprtype`` that in the case of parameters + - Fixed ``determineexprtype`` that in the case of parameters returned non-dictionary objects. - Use ``-*- fix -*-`` to specify that a file is in fixed format. @@ -316,7 +316,7 @@ Release 2.39.235_1693 - Handle ``XDY`` parameter constants. - - Introduced formatpattern to workaround a corner case where reserved + - Introduced formatpattern to workaround a corner case where reserved keywords are used in format statement. Other than that, format pattern has no use. @@ -371,7 +371,7 @@ Release 2.39.235_1644 - Fixed bug in Fortran 90 comments of fixed format. - Warn when .pyf signatures contain undefined symbols. - Better detection of source code formats. Using ``-*- fortran -*-`` - or ``-*- f90 -*-`` in the first line of a Fortran source file is + or ``-*- f90 -*-`` in the first line of a Fortran source file is recommended to help f2py detect the format, fixed or free, respectively, correctly. @@ -409,7 +409,7 @@ Release 2.39.235_1644 * setup.py - - Installing f2py.py instead of f2py.bat under NT. + - Installing f2py.py instead of f2py.bat under NT. - Introduced ``--with-numpy_distutils`` that is useful when making f2py tar-ball with numpy_distutils included. @@ -451,7 +451,7 @@ Release 2.37.233-1545 - Fixed segfaults (that were introduced with recent memory leak fixes) when using allocatable arrays. - - Introduced F2PY_REPORT_ON_ARRAY_COPY CPP macro int-variable. If defined + - Introduced F2PY_REPORT_ON_ARRAY_COPY CPP macro int-variable. If defined then a message is printed to stderr whenever a copy of an array is made and arrays size is larger than F2PY_REPORT_ON_ARRAY_COPY. @@ -547,7 +547,7 @@ Public Release 2.32.225-1419 * docs/usersguide/ - Complete revision of F2PY Users Guide + Complete revision of F2PY Users Guide * tests/run_all.py @@ -598,8 +598,8 @@ Public Release 2.32.225-1419 - Introduced multiline block. Currently usable only for ``callstatement`` statement. - Improved array length calculation in getarrlen(..). - - "From sky" program group is created only if ``groupcounter<1``. - See TODO.txt. + - "From sky" program group is created only if ``groupcounter<1``. + See TODO.txt. - Added support for ``dimension(n:*)``, ``dimension(*:n)``. They are treated as ``dimesnion(*)`` by f2py. - Fixed parameter substitution (this fixes TODO item by Patrick @@ -640,7 +640,7 @@ Public Release 2.32.225-1419 * src/fortranobject.c - - Multi-dimensional common block members and allocatable arrays + - Multi-dimensional common block members and allocatable arrays are returned as Fortran-contiguous arrays. - Fixed NULL return to Python without exception. - Fixed memory leak in getattr(<fortranobj>,'__doc__'). @@ -667,7 +667,7 @@ Public Release 2.32.225-1419 - Introduced dummy routine feature. - F77 and F90 wrapper subroutines (if any) as saved to different files, <modulename>-f2pywrappers.f and <modulename>-f2pywrappers2.f90, - respectively. Therefore, wrapping F90 requires numpy_distutils >= + respectively. Therefore, wrapping F90 requires numpy_distutils >= 0.2.0_alpha_2.229. - Fixed compiler warnings about meaningless ``const void (*f2py_func)(..)``. - Improved error messages for ``*_from_pyobj``. @@ -690,9 +690,9 @@ Public Release 2.32.225-1419 - (float|long_double)_from_pyobj now use double_from_pyobj. - complex_(float|long_double)_from_pyobj now use complex_double_from_pyobj. - Rewrote ``*_from_pyobj`` to be more robust. This fixes segfaults if - getting * from a string. Note that int_from_pyobj differs - from PyNumber_Int in that it accepts also complex arguments - (takes the real part) and sequences (takes the 1st element). + getting * from a string. Note that int_from_pyobj differs + from PyNumber_Int in that it accepts also complex arguments + (takes the real part) and sequences (takes the 1st element). - Removed unnecessary void* casts in NUMFROMARROBJ. - Fixed casts in ``*_from_pyobj`` functions. - Replaced CNUMFROMARROBJ with NUMFROMARROBJ. @@ -704,7 +704,7 @@ Public Release 2.32.225-1419 - Introduced isdummyroutine(). - Fixed islong_* functions. - Fixed isintent_in for intent(c) arguments (bug report: Pierre Schnizer). - - Introduced F2PYError and throw_error. Using throw_error, f2py + - Introduced F2PYError and throw_error. Using throw_error, f2py rejects illegal .pyf file constructs that otherwise would cause compilation failures or python crashes. - Fixed islong_long(logical*8)->True. @@ -715,7 +715,7 @@ Public Release 2.32.225-1419 * Speed up for ``*_from_pyobj`` functions if obj is a sequence. -* Fixed SegFault (reported by M.Braun) due to invalid ``Py_DECREF`` +* Fixed SegFault (reported by M.Braun) due to invalid ``Py_DECREF`` in ``GETSCALARFROMPYTUPLE``. Older Releases @@ -737,7 +737,7 @@ Older Releases *** Fixed the order of build_flib options when using --fcompiler=... *** Recognize .f95 and .F95 files as Fortran sources with free format. *** Cleaned up the output of 'f2py -h': removed obsolete items, - added build_flib options section. + added build_flib options section. *** Added --help-compiler option: it lists available Fortran compilers as detected by numpy_distutils/command/build_flib.py. This option is available only with -c option. @@ -748,7 +748,7 @@ Older Releases :: - *** Fixed copying of non-contigious 1-dimensional arrays bug. + *** Fixed copying of non-contigious 1-dimensional arrays bug. (Thanks to Travis O.). @@ -760,7 +760,7 @@ Older Releases *** Fixed ignoring type declarations. *** Turned F2PY_REPORT_ATEXIT off by default. *** Made MAX,MIN macros available by default so that they can be - always used in signature files. + always used in signature files. *** Disabled F2PY_REPORT_ATEXIT for FreeBSD. @@ -791,7 +791,7 @@ Older Releases *** Updated f2py for the latest numpy_distutils. *** A nasty bug with multi-dimensional Fortran arrays is fixed - (intent(out) arrays had wrong shapes). (Thanks to Eric for + (intent(out) arrays had wrong shapes). (Thanks to Eric for pointing out this bug). *** F2PY_REPORT_ATEXIT is disabled by default for __WIN32__. @@ -809,9 +809,9 @@ Older Releases *** Introduced 'callprotoargument' statement so that proper prototypes can be declared. This is crucial when wrapping C functions as it will fix segmentation faults when these wrappers use non-pointer - arguments (thanks to R. Clint Whaley for explaining this to me). + arguments (thanks to R. Clint Whaley for explaining this to me). Note that in f2py generated wrapper, the prototypes have - the following forms: + the following forms: extern #rtype# #fortranname#(#callprotoargument#); or extern #rtype# F_FUNC(#fortranname#,#FORTRANNAME#)(#callprotoargument#); @@ -831,7 +831,7 @@ Older Releases intent(overwrite) will make default overwrite_<name>=1. *** Introduced intent(in|inout,out,out=<name>) attribute that renames arguments name when returned. This renaming has effect only in - documentation strings. + documentation strings. *** Introduced 'callstatement' statement to pyf file syntax. With this one can specify explicitly how wrapped function should be called from the f2py generated module. WARNING: this is a dangerous feature @@ -841,7 +841,7 @@ Older Releases be used only inside a subroutine or function block (it should be enough though) and must be only in one continuous line. The syntax of the statement is: callstatement <C-expression>; - + :Release: 2.11.174 :Date: 18 January 2002 @@ -852,23 +852,23 @@ Older Releases *** Introduced extra keyword argument copy_<varname> for intent(copy) variables. It defaults to 1 and forces to make a copy for intent(in) variables when passing on to wrapped functions (in case - they undesirably change the variable in-situ). + they undesirably change the variable in-situ). *** Introduced has_column_major_storage member function for all f2py generated extension modules. It is equivalent to Python call 'transpose(obj).iscontiguous()' but very efficient. *** Introduced -DF2PY_REPORT_ATEXIT. If this is used when compiling, a report is printed to stderr as python exits. The report includes - the following timings: - 1) time spent in all wrapped function calls; + the following timings: + 1) time spent in all wrapped function calls; 2) time spent in f2py generated interface around the wrapped functions. This gives a hint whether one should worry about storing data in proper order (C or Fortran). 3) time spent in Python functions called by wrapped functions through call-back interface. 4) time spent in f2py generated call-back interface. - For now, -DF2PY_REPORT_ATEXIT is enabled by default. Use + For now, -DF2PY_REPORT_ATEXIT is enabled by default. Use -DF2PY_REPORT_ATEXIT_DISABLE to disable it (I am not sure if - Windows has needed tools, let me know). + Windows has needed tools, let me know). Also, I appreciate if you could send me the output of 'F2PY performance report' (with CPU and platform information) so that I could optimize f2py generated interfaces for future releases. @@ -885,15 +885,15 @@ Older Releases may be useful for cases where the wrapped function changes the argument in situ and this may not be desired side effect. Otherwise, it is safe to not use intent(copy) for the sake - of a better performance. - intent(cache,hide|optional) - just creates a junk of memory. + of a better performance. + intent(cache,hide|optional) - just creates a junk of memory. It does not care about proper storage order. Can be also intent(in) but then the corresponding argument must be a contiguous array with a proper elsize. *** intent(c) can be used also for subroutine names so that -DNO_APPEND_FORTRAN can be avoided for C functions. - *** IMPORTANT BREAKING GOOD ... NEWS!!!: + *** IMPORTANT BREAKING GOOD ... NEWS!!!: From now on you don't have to worry about the proper storage order in multi-dimensional arrays that was earlier a real headache when @@ -929,14 +929,14 @@ Older Releases changed. *** Introduced test-site/test_f2py2e.py script that runs all tests. - *** Fixed global variables initialization problem in crackfortran + *** Fixed global variables initialization problem in crackfortran when run_main is called several times. *** Added 'import Numeric' to C/API init<module> function. *** Fixed f2py.bat in setup.py. *** Switched over to numpy_distutils and dropped fortran_support. *** On Windows create f2py.bat file. *** Introduced -c option: read fortran or pyf files, construct extension - modules, build, and save them to current directory. + modules, build, and save them to current directory. In one word: do-it-all-in-one-call. *** Introduced pyf_extensions(sources,f2py_opts) function. It simplifies the extension building process considerably. Only for internal use. @@ -983,7 +983,7 @@ Older Releases and caused core dump with a non-gcc compiler (Thanks to Pierre Schnizer for reporting this bug). *** Fixed "warning: variable `..' might be clobbered by `longjmp' or `vfork'": - - Reorganized the structure of wrapper functions to get rid of + - Reorganized the structure of wrapper functions to get rid of `goto capi_fail' statements that caused the above warning. @@ -1038,7 +1038,6 @@ Older Releases feature. setup_<modulename>.py uses fortran_support module (from SciPy), but for your convenience it is included also with f2py as an additional package. Note that it has not as many compilers supported as with - using Makefile-<modulename>, but new compilers should be added to + using Makefile-<modulename>, but new compilers should be added to fortran_support module, not to f2py2e package. *** Fixed some compiler warnings about else statements. - diff --git a/numpy/f2py/docs/OLDNEWS.txt b/numpy/f2py/docs/OLDNEWS.txt index 401d2dcee..23d332e35 100644 --- a/numpy/f2py/docs/OLDNEWS.txt +++ b/numpy/f2py/docs/OLDNEWS.txt @@ -60,4 +60,4 @@ .. _HISTORY.txt: HISTORY.html .. _Numarray: http://www.stsci.edu/resources/software_hardware/numarray -.. _TESTING.txt: TESTING.html
\ No newline at end of file +.. _TESTING.txt: TESTING.html diff --git a/numpy/f2py/docs/README.txt b/numpy/f2py/docs/README.txt index cec8a6ec0..f232f1d5d 100644 --- a/numpy/f2py/docs/README.txt +++ b/numpy/f2py/docs/README.txt @@ -21,10 +21,10 @@ __ FAQ.html .. topic:: NEWS!!! January 5, 2006 - + WARNING -- these notes are out of date! The package structure for NumPy and SciPy has changed considerably. Much of this information is now incorrect. - + January 30, 2005 Latest F2PY release (version 2.45.241_1926). @@ -47,7 +47,7 @@ __ FAQ.html __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.137&r2=1.131&f=h September 25, 2004 - Latest F2PY release (version 2.43.239_1806). + Latest F2PY release (version 2.43.239_1806). Support for ``ENTRY`` statement. New attributes: ``intent(inplace)``, ``intent(callback)``. Supports Numarray 1.1. Introduced ``-*- fix -*-`` header content. Improved ``PARAMETER`` support. @@ -150,7 +150,7 @@ Here follows a more detailed list of F2PY features: <dim> | <start>:<end> | * | : + Attributes and statements:: - + intent([ in | inout | out | hide | in,out | inout,out | c | copy | cache | callback | inplace | aux ]) dimension(<dimspec>) @@ -249,7 +249,7 @@ Unpack the source file, change to directrory ``F2PY-?-???/`` and run The F2PY installation installs a Python package ``f2py2e`` to your Python ``site-packages`` directory and a script ``f2py`` to your -Python executable path. +Python executable path. See also Installation__ section in `F2PY FAQ`_. @@ -287,7 +287,7 @@ __ hello.f 2) Run :: - + f2py -c -m hello hello.f This will build an extension module ``hello.so`` (or ``hello.sl``, @@ -299,12 +299,12 @@ __ hello.f >>> import hello >>> print hello.__doc__ >>> print hello.foo.__doc__ - >>> hello.foo(4) + >>> hello.foo(4) Hello from Fortran! a= 4 - >>> + >>> -If the above works, then you can try out more thorough +If the above works, then you can try out more thorough `F2PY unit tests`__ and read the `F2PY Users Guide and Reference Manual`_. __ FAQ.html#q-how-to-test-if-f2py-is-working-correctly @@ -354,7 +354,7 @@ __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&con A mailing list f2py-users@cens.ioc.ee is open for F2PY releated discussion/questions/etc. -* `Subscribe..`__ +* `Subscribe..`__ * `Archives..`__ __ http://cens.ioc.ee/mailman/listinfo/f2py-users @@ -412,9 +412,9 @@ __ http://jrfonseca.dyndns.org/debian/ * `American National Standard Programming Language FORTRAN ANSI(R) X3.9-1978`__ * `J3`_ -- The US Fortran standards committee. * SWIG_ -- A software development tool that connects programs written - in C and C++ with a variety of high-level programming languages. + in C and C++ with a variety of high-level programming languages. * `Mathtools.net`_ -- A technical computing portal for all scientific - and engineering needs. + and engineering needs. .. __: http://www.fortran.com/fortran/F77_std/rjcnf.html @@ -445,7 +445,7 @@ __ http://jrfonseca.dyndns.org/debian/ .. _distutils: http://www.python.org/sigs/distutils-sig/ .. _Numerical Python: http://www.numpy.org/ .. _Pyfort: http://pyfortran.sourceforge.net/ -.. _Scientific Python: +.. _Scientific Python: http://starship.python.net/crew/hinsen/scientific.html .. _The Fortran Company: http://www.fortran.com/fortran/ .. _J3: http://www.j3-fortran.org/ diff --git a/numpy/f2py/docs/TESTING.txt b/numpy/f2py/docs/TESTING.txt index d90521175..a6df92c48 100644 --- a/numpy/f2py/docs/TESTING.txt +++ b/numpy/f2py/docs/TESTING.txt @@ -35,7 +35,7 @@ Tests about the failure. * Test intent(in), intent(out) scalar arguments, - scalars returned by F77 functions + scalars returned by F77 functions and F90 module functions:: tests/f77/return_integer.py diff --git a/numpy/f2py/docs/THANKS.txt b/numpy/f2py/docs/THANKS.txt index 0a3f0b9d6..636540687 100644 --- a/numpy/f2py/docs/THANKS.txt +++ b/numpy/f2py/docs/THANKS.txt @@ -4,7 +4,7 @@ ================= F2PY__ is an open source Python package and command line tool developed and -maintained by Pearu Peterson (me__). +maintained by Pearu Peterson (me__). .. __: http://cens.ioc.ee/projects/f2py2e/ .. __: http://cens.ioc.ee/~pearu/ diff --git a/numpy/f2py/docs/pytest.py b/numpy/f2py/docs/pytest.py index b61a607fe..bf4ef917f 100644 --- a/numpy/f2py/docs/pytest.py +++ b/numpy/f2py/docs/pytest.py @@ -4,9 +4,9 @@ from __future__ import division, absolute_import, print_function import Numeric def foo(a): a = Numeric.array(a) - m,n = a.shape + m, n = a.shape for i in range(m): for j in range(n): - a[i,j] = a[i,j] + 10*(i+1) + (j+1) + a[i, j] = a[i, j] + 10*(i+1) + (j+1) return a #eof diff --git a/numpy/f2py/docs/usersguide/index.txt b/numpy/f2py/docs/usersguide/index.txt index 5a8d12c68..7b26cac54 100644 --- a/numpy/f2py/docs/usersguide/index.txt +++ b/numpy/f2py/docs/usersguide/index.txt @@ -4,7 +4,7 @@ F2PY Users Guide and Reference Manual ////////////////////////////////////////////////////////////////////// -:Author: Pearu Peterson +:Author: Pearu Peterson :Contact: pearu@cens.ioc.ee :Web site: http://cens.ioc.ee/projects/f2py2e/ :Date: $Date: 2005/04/02 10:03:26 $ @@ -97,7 +97,7 @@ Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: a : input rank-1 array('d') with bounds (n) Optional arguments: n := len(a) input int - + >>> a=Numeric.zeros(8,'d') >>> fib1.fib(a) >>> print a @@ -125,7 +125,7 @@ Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``:: Traceback (most recent call last): File "<stdin>", line 1, in ? fib.error: (len(a)>=n) failed for 1st keyword n - >>> + >>> This demonstrates one of the useful features in F2PY, that it, F2PY implements basic compatibility checks between related @@ -236,8 +236,8 @@ In Python:: n : input int Return objects: a : rank-1 array('d') with bounds (n) - - >>> print fib2.fib(8) + + >>> print fib2.fib(8) [ 0. 1. 1. 2. 3. 5. 8. 13.] .. topic:: Comments @@ -292,8 +292,8 @@ previous case:: n : input int Return objects: a : rank-1 array('d') with bounds (n) - - >>> print fib3.fib(8) + + >>> print fib3.fib(8) [ 0. 1. 1. 2. 3. 5. 8. 13.] @@ -405,17 +405,17 @@ Type declarations :: - <typespec> := byte | character [<charselector>] + <typespec> := byte | character [<charselector>] | complex [<kindselector>] | real [<kindselector>] - | double complex | double precision + | double complex | double precision | integer [<kindselector>] | logical [<kindselector>] - <charselector> := * <charlen> + <charselector> := * <charlen> | ( [len=] <len> [ , [kind=] <kind>] ) | ( kind= <kind> [ , len= <len> ] ) <kindselector> := * <intlen> | ( [kind=] <kind> ) - <entitydecl> := <name> [ [ * <charlen> ] [ ( <arrayspec> ) ] + <entitydecl> := <name> [ [ * <charlen> ] [ ( <arrayspec> ) ] | [ ( <arrayspec> ) ] * <charlen> ] | [ / <init_expr> / | = <init_expr> ] \ [ , <entitydecl> ] @@ -425,7 +425,7 @@ Type declarations + ``<attrspec>`` is a comma separated list of attributes_; + ``<arrayspec>`` is a comma separated list of dimension bounds; - + + ``<init_expr>`` is a `C expression`__. + ``<intlen>`` may be negative integer for ``integer`` type @@ -463,7 +463,7 @@ Use statements: <rename_list> := <local_name> => <use_name> [ , <rename_list> ] Currently F2PY uses ``use`` statement only for linking call-back - modules and ``external`` arguments (call-back functions), see + modules and ``external`` arguments (call-back functions), see `Call-back arguments`_. Common block statements: @@ -570,10 +570,10 @@ __ external_ corresponding prototype:: extern <return type> FUNC_F(<routine name>,<ROUTINE NAME>)(<callprotoargument>); - + + ``fortranname [<acctual Fortran/C routine name>]`` You can use arbitrary ``<routine name>`` for a given Fortran/C - function. Then you have to specify + function. Then you have to specify ``<acctual Fortran/C routine name>`` with this statement. If ``fortranname`` statement is used without @@ -605,7 +605,7 @@ __ external_ module methods ``PyMethodDef``-array. It must be a comma-separated list of C arrays (see `Extending and Embedding`__ Python documentation for details). - ``pymethoddef`` statement can be used only inside + ``pymethoddef`` statement can be used only inside ``python module`` block. __ http://www.python.org/doc/current/ext/ext.html @@ -626,7 +626,7 @@ The following attributes are used by F2PY: For an optional array argument, all its dimensions must be bounded. -``required`` +``required`` The corresponding argument is considered as a required one. This is default. You need to specify ``required`` only if there is a need to disable automatic ``optional`` setting when ``<init_expr>`` is used. @@ -691,7 +691,7 @@ The following attributes are used by F2PY: argument like in the following example:: integer intent(hide),depend(a) :: n = len(a) - real intent(in),dimension(n) :: a + real intent(in),dimension(n) :: a + ``c`` The argument is treated as a C scalar or C array argument. In @@ -809,7 +809,7 @@ The following attributes are used by F2PY: ``external`` The corresponding argument is a function provided by user. The - signature of this so-called call-back function can be defined + signature of this so-called call-back function can be defined - in ``__user__`` module block, - or by demonstrative (or real, if the signature file is a real Fortran @@ -964,12 +964,12 @@ All wrappers (to Fortran/C routines or to common blocks or to Fortran 90 module data) generated by F2PY are exposed to Python as ``fortran`` type objects. Routine wrappers are callable ``fortran`` type objects while wrappers to Fortran data have attributes referring to data -objects. +objects. All ``fortran`` type object have attribute ``_cpointer`` that contains CObject referring to the C pointer of the corresponding Fortran/C function or variable in C level. Such CObjects can be used as an -callback argument of F2PY generated functions to bypass Python C/API +callback argument of F2PY generated functions to bypass Python C/API layer of calling Python functions from Fortran or C when the computational part of such functions is implemented in C or Fortran and wrapped with F2PY (or any other tool capable of providing CObject @@ -1047,7 +1047,7 @@ expects an array version of a string in order to *in situ* changes to be effective. .. topic:: Example - + Consider the following `Fortran 77 code`__: .. include:: string.f @@ -1439,7 +1439,7 @@ distinguished by the usage of ``-c`` and ``-h`` switches: "filename.ext" <filename.ext> - + The include statement is inserted just before the wrapper functions. This feature enables using arbitrary C functions (defined in ``<includefile>``) in F2PY generated wrappers. @@ -1457,7 +1457,7 @@ distinguished by the usage of ``-c`` and ``-h`` switches: ``--include-paths <path1>:<path2>:..`` Search include files from given directories. - ``--help-link [<list of resources names>]`` + ``--help-link [<list of resources names>]`` List system resources found by ``numpy_distutils/system_info.py``. For example, try ``f2py --help-link lapack_opt``. @@ -1490,9 +1490,9 @@ distinguished by the usage of ``-c`` and ``-h`` switches: ``--fcompiler=<Vendor>`` Specify Fortran compiler type by vendor. ``--f77exec=<path>`` - Specify the path to F77 compiler + Specify the path to F77 compiler ``--fcompiler-exec=<path>`` [depreciated] - Specify the path to F77 compiler + Specify the path to F77 compiler ``--f90exec=<path>`` Specify the path to F90 compiler ``--f90compiler-exec=<path>`` [depreciated] @@ -1553,7 +1553,7 @@ distinguished by the usage of ``-c`` and ``-h`` switches: Other options: ``-m <modulename>`` - Name of an extension module. Default is ``untitled``. Don't use this option + Name of an extension module. Default is ``untitled``. Don't use this option if a signature file (*.pyf) is used. ``--[no-]lower`` Do [not] lower the cases in ``<fortran files>``. By default, @@ -1602,7 +1602,7 @@ The following functions are provided by the ``f2py2e`` module: ``compile(source, modulename='untitled', extra_args='', verbose=1, source_fn=None)`` - Build extension module from Fortran 77 source string ``source``. + Build extension module from Fortran 77 source string ``source``. Return 0 if successful. Note that this function actually calls ``f2py -c ..`` from shell to ensure safety of the current Python process. @@ -1618,7 +1618,7 @@ Using ``numpy_distutils`` ``numpy_distutils`` is part of the SciPy_ project and aims to extend standard Python ``distutils`` to deal with Fortran sources and F2PY signature files, e.g. compile Fortran sources, call F2PY to construct -extension modules, etc. +extension modules, etc. .. topic:: Example @@ -1664,7 +1664,7 @@ extension modules, etc. ``config_fc`` to change Fortran compiler options - as well as ``build_ext`` and ``build_clib`` commands are enhanced + as well as ``build_ext`` and ``build_clib`` commands are enhanced to support Fortran sources. Run @@ -1679,7 +1679,7 @@ extension modules, etc. can choose different Fortran compilers by using ``build_ext`` command option ``--fcompiler=<Vendor>``. Here ``<Vendor>`` can be one of the following names:: - + absoft sun mips intel intelv intele intelev nag compaq compaqv gnu vast pg hpux See ``numpy_distutils/fcompiler.py`` for up-to-date list of @@ -1712,7 +1712,7 @@ extension modules, etc. command option ``--fcompiler=<Vendor>`` or by defining environment variable ``FC_VENDOR=<Vendor>``. Here ``<Vendor>`` can be one of the following names:: - + Absoft Sun SGI Intel Itanium NAG Compaq Digital Gnu VAST PG See ``numpy_distutils/command/build_flib.py`` for up-to-date list of @@ -1740,7 +1740,7 @@ wraps the C library function ``system()``:: In Python: .. include:: spam_session.dat - :literal: + :literal: Modifying the dictionary of a F2PY generated module =================================================== diff --git a/numpy/f2py/docs/usersguide/setup_example.py b/numpy/f2py/docs/usersguide/setup_example.py index 8dfaa7a52..ab451084b 100644 --- a/numpy/f2py/docs/usersguide/setup_example.py +++ b/numpy/f2py/docs/usersguide/setup_example.py @@ -8,7 +8,7 @@ from numpy_distutils.core import Extension ext1 = Extension(name = 'scalar', sources = ['scalar.f']) ext2 = Extension(name = 'fib2', - sources = ['fib2.pyf','fib1.f']) + sources = ['fib2.pyf', 'fib1.f']) if __name__ == "__main__": from numpy_distutils.core import setup @@ -16,6 +16,6 @@ if __name__ == "__main__": description = "F2PY Users Guide examples", author = "Pearu Peterson", author_email = "pearu@cens.ioc.ee", - ext_modules = [ext1,ext2] + ext_modules = [ext1, ext2] ) # End of setup_example.py diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 64c13fff0..011b430d5 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -173,8 +173,8 @@ Copyright 1999 - 2011 Pearu Peterson all rights reserved. http://cens.ioc.ee/projects/f2py2e/"""%(f2py_version, numpy_version) def scaninputline(inputline): - files,funcs,skipfuncs,onlyfuncs,debug=[],[],[],[],[] - f,f2,f3,f4,f5,f6,f7,f8,f9=1,0,0,0,0,0,0,0,0 + files, funcs, skipfuncs, onlyfuncs, debug=[], [], [], [], [] + f, f2, f3, f4, f5, f6, f7, f8, f9=1, 0, 0, 0, 0, 0, 0, 0, 0 verbose = 1 dolc=-1 dolatexdoc = 0 @@ -182,7 +182,7 @@ def scaninputline(inputline): wrapfuncs = 1 buildpath = '.' include_paths = [] - signsfile,modulename=None,None + signsfile, modulename=None, None options = {'buildpath':buildpath, 'coutput': None, 'f2py_wrapper_output': None} @@ -236,7 +236,7 @@ def scaninputline(inputline): open(l).close() files.append(l) except IOError as detail: - errmess('IOError: %s. Skipping file "%s".\n'%(str(detail),l)) + errmess('IOError: %s. Skipping file "%s".\n'%(str(detail), l)) elif f==-1: skipfuncs.append(l) elif f==0: onlyfuncs.append(l) if not f5 and not files and not modulename: @@ -247,7 +247,7 @@ def scaninputline(inputline): outmess('Creating build directory %s'%(buildpath)) os.mkdir(buildpath) if signsfile: - signsfile = os.path.join(buildpath,signsfile) + signsfile = os.path.join(buildpath, signsfile) if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: errmess('Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n'%(signsfile)) sys.exit() @@ -265,9 +265,9 @@ def scaninputline(inputline): options['wrapfuncs'] = wrapfuncs options['buildpath']=buildpath options['include_paths']=include_paths - return files,options + return files, options -def callcrackfortran(files,options): +def callcrackfortran(files, options): rules.options=options funcs=[] crackfortran.debug=options['debug'] @@ -287,7 +287,7 @@ def callcrackfortran(files,options): if options['signsfile'][-6:]=='stdout': sys.stdout.write(pyf) else: - f=open(options['signsfile'],'w') + f=open(options['signsfile'], 'w') f.write(pyf) f.close() if options["coutput"] is None: @@ -307,7 +307,7 @@ def callcrackfortran(files,options): def buildmodules(lst): cfuncs.buildcfuncs() outmess('Building modules...\n') - modules,mnames,isusedby=[],[],{} + modules, mnames, isusedby=[], [], {} for i in range(len(lst)): if '__user__' in lst[i]['name']: cb_rules.buildcallbacks(lst[i]) @@ -322,7 +322,7 @@ def buildmodules(lst): ret = {} for i in range(len(mnames)): if mnames[i] in isusedby: - outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i],','.join(['"%s"'%s for s in isusedby[mnames[i]]]))) + outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i], ','.join(['"%s"'%s for s in isusedby[mnames[i]]]))) else: um=[] if 'use' in modules[i]: @@ -330,13 +330,13 @@ def buildmodules(lst): if u in isusedby and u in mnames: um.append(modules[mnames.index(u)]) else: - outmess('\tModule "%s" uses nonexisting "%s" which will be ignored.\n'%(mnames[i],u)) + outmess('\tModule "%s" uses nonexisting "%s" which will be ignored.\n'%(mnames[i], u)) ret[mnames[i]] = {} - dict_append(ret[mnames[i]],rules.buildmodule(modules[i],um)) + dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um)) return ret -def dict_append(d_out,d_in): - for (k,v) in d_in.items(): +def dict_append(d_out, d_in): + for (k, v) in d_in.items(): if k not in d_out: d_out[k] = [] if isinstance(v, list): @@ -354,11 +354,11 @@ def run_main(comline_list): else: reload(crackfortran) f2pydir=os.path.dirname(os.path.abspath(cfuncs.__file__)) - fobjhsrc = os.path.join(f2pydir,'src','fortranobject.h') - fobjcsrc = os.path.join(f2pydir,'src','fortranobject.c') - files,options=scaninputline(comline_list) + fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') + fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') + files, options=scaninputline(comline_list) auxfuncs.options=options - postlist=callcrackfortran(files,options) + postlist=callcrackfortran(files, options) isusedby={} for i in range(len(postlist)): if 'use' in postlist[i]: @@ -370,11 +370,11 @@ def run_main(comline_list): if postlist[i]['block']=='python module' and '__user__' in postlist[i]['name']: if postlist[i]['name'] in isusedby: #if not quiet: - outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'],','.join(['"%s"'%s for s in isusedby[postlist[i]['name']]]))) + outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'], ','.join(['"%s"'%s for s in isusedby[postlist[i]['name']]]))) if 'signsfile' in options: if options['verbose']>1: outmess('Stopping. Edit the signature file and then run f2py on the signature file: ') - outmess('%s %s\n'%(os.path.basename(sys.argv[0]),options['signsfile'])) + outmess('%s %s\n'%(os.path.basename(sys.argv[0]), options['signsfile'])) return for i in range(len(postlist)): if postlist[i]['block']!='python module': @@ -388,14 +388,14 @@ def run_main(comline_list): ret=buildmodules(postlist) for mn in ret.keys(): - dict_append(ret[mn],{'csrc':fobjcsrc,'h':fobjhsrc}) + dict_append(ret[mn], {'csrc':fobjcsrc,'h':fobjhsrc}) return ret def filter_files(prefix,suffix,files,remove_prefix=None): """ Filter files by prefix and suffix. """ - filtered,rest = [],[] + filtered, rest = [], [] match = re.compile(prefix+r'.*'+suffix+r'\Z').match if remove_prefix: ind = len(prefix) @@ -404,7 +404,7 @@ def filter_files(prefix,suffix,files,remove_prefix=None): for file in [x.strip() for x in files]: if match(file): filtered.append(file[ind:]) else: rest.append(file) - return filtered,rest + return filtered, rest def get_prefix(module): p = os.path.dirname(os.path.dirname(module.__file__)) @@ -442,7 +442,7 @@ def run_compile(): f2py_flags2 = [] fl = 0 for a in sys.argv[1:]: - if a in ['only:','skip:']: + if a in ['only:', 'skip:']: fl = 1 elif a==':': fl = 0 @@ -483,7 +483,7 @@ def run_compile(): for s in del_list: i = flib_flags.index(s) del flib_flags[i] - assert len(flib_flags)<=2,repr(flib_flags) + assert len(flib_flags)<=2, repr(flib_flags) _reg5 = re.compile(r'[-][-](verbose)') setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] @@ -499,39 +499,39 @@ def run_compile(): if optname in sys.argv: i = sys.argv.index (optname) f2py_flags.extend (sys.argv[i:i+2]) - del sys.argv[i+1],sys.argv[i] + del sys.argv[i+1], sys.argv[i] sources = sys.argv[1:] if '-m' in sys.argv: i = sys.argv.index('-m') modulename = sys.argv[i+1] - del sys.argv[i+1],sys.argv[i] + del sys.argv[i+1], sys.argv[i] sources = sys.argv[1:] else: from numpy.distutils.command.build_src import get_f2py_modulename - pyf_files,sources = filter_files('','[.]pyf([.]src|)',sources) + pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources) sources = pyf_files + sources for f in pyf_files: modulename = get_f2py_modulename(f) if modulename: break - extra_objects, sources = filter_files('','[.](o|a|so)',sources) - include_dirs, sources = filter_files('-I','',sources,remove_prefix=1) - library_dirs, sources = filter_files('-L','',sources,remove_prefix=1) - libraries, sources = filter_files('-l','',sources,remove_prefix=1) - undef_macros, sources = filter_files('-U','',sources,remove_prefix=1) - define_macros, sources = filter_files('-D','',sources,remove_prefix=1) + extra_objects, sources = filter_files('', '[.](o|a|so)', sources) + include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1) + library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) + libraries, sources = filter_files('-l', '', sources, remove_prefix=1) + undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) + define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) using_numarray = 0 using_numeric = 0 for i in range(len(define_macros)): - name_value = define_macros[i].split('=',1) + name_value = define_macros[i].split('=', 1) if len(name_value)==1: name_value.append(None) if len(name_value)==2: define_macros[i] = tuple(name_value) else: - print('Invalid use of -D:',name_value) + print('Invalid use of -D:', name_value) from numpy.distutils.system_info import get_info @@ -544,10 +544,10 @@ def run_compile(): #num_info = {'include_dirs': get_numpy_include_dirs()} if num_info: - include_dirs.extend(num_info.get('include_dirs',[])) + include_dirs.extend(num_info.get('include_dirs', [])) - from numpy.distutils.core import setup,Extension - ext_args = {'name':modulename,'sources':sources, + from numpy.distutils.core import setup, Extension + ext_args = {'name': modulename, 'sources': sources, 'include_dirs': include_dirs, 'library_dirs': library_dirs, 'libraries': libraries, @@ -569,9 +569,9 @@ def run_compile(): ext = Extension(**ext_args) sys.argv = [sys.argv[0]] + setup_flags sys.argv.extend(['build', - '--build-temp',build_dir, - '--build-base',build_dir, - '--build-platlib','.']) + '--build-temp', build_dir, + '--build-base', build_dir, + '--build-platlib', '.']) if fc_flags: sys.argv.extend(['config_fc']+fc_flags) if flib_flags: diff --git a/numpy/f2py/f2py_testing.py b/numpy/f2py/f2py_testing.py index e5ba8a9d5..4cec4baad 100644 --- a/numpy/f2py/f2py_testing.py +++ b/numpy/f2py/f2py_testing.py @@ -15,10 +15,10 @@ def cmdline(): else: args.append(a) f2py_opts = ' '.join(args) - return repeat,f2py_opts + return repeat, f2py_opts def run(runtest,test_functions,repeat=1): - l = [(t,repr(t.__doc__.split('\n')[1].strip())) for t in test_functions] + l = [(t, repr(t.__doc__.split('\n')[1].strip())) for t in test_functions] #l = [(t,'') for t in test_functions] start_memusage = memusage() diff_memusage = None @@ -26,7 +26,7 @@ def run(runtest,test_functions,repeat=1): i = 0 while i<repeat: i += 1 - for t,fname in l: + for t, fname in l: runtest(t) if start_memusage is None: continue if diff_memusage is None: @@ -39,8 +39,8 @@ def run(runtest,test_functions,repeat=1): fname) diff_memusage = diff_memusage2 current_memusage = memusage() - print('run',repeat*len(test_functions),'tests',\ + print('run', repeat*len(test_functions), 'tests',\ 'in %.2f seconds' % ((jiffies()-start_jiffies)/100.0)) if start_memusage: - print('initial virtual memory size:',start_memusage,'bytes') - print('current virtual memory size:',current_memusage,'bytes') + print('initial virtual memory size:', start_memusage, 'bytes') + print('current virtual memory size:', current_memusage, 'bytes') diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index 7e25a4930..758cad58a 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -83,19 +83,19 @@ fgetdims2_sa="""\ def buildhooks(pymod): - global fgetdims1,fgetdims2 + global fgetdims1, fgetdims2 from . import rules ret = {'f90modhooks':[],'initf90modhooks':[],'body':[], - 'need':['F_FUNC','arrayobject.h'], + 'need':['F_FUNC', 'arrayobject.h'], 'separatorsfor':{'includes0':'\n','includes':'\n'}, 'docs':['"Fortran 90/95 modules:\\n"'], 'latexdoc':[]} fhooks=[''] - def fadd(line,s=fhooks): s[0] = '%s\n %s'%(s[0],line) + def fadd(line,s=fhooks): s[0] = '%s\n %s'%(s[0], line) doc = [''] - def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0],line) + def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0], line) for m in findf90modules(pymod): - sargs,fargs,efargs,modobjs,notvars,onlyvars=[],[],[],[],[m['name']],[] + sargs, fargs, efargs, modobjs, notvars, onlyvars=[], [], [], [], [m['name']], [] sargsp = [] ifargs = [] mfargs = [] @@ -103,16 +103,16 @@ def buildhooks(pymod): for b in m['body']: notvars.append(b['name']) for n in m['vars'].keys(): var = m['vars'][n] - if (n not in notvars) and (not l_or(isintent_hide,isprivate)(var)): + if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): onlyvars.append(n) mfargs.append(n) outmess('\t\tConstructing F90 module support for "%s"...\n'%(m['name'])) if onlyvars: outmess('\t\t Variables: %s\n'%(' '.join(onlyvars))) chooks=[''] - def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0],line) + def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0], line) ihooks=[''] - def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0],line) + def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0], line) vrd=capi_maps.modsign2map(m) cadd('static FortranDataDef f2py_%s_def[] = {'%(m['name'])) @@ -128,33 +128,33 @@ def buildhooks(pymod): modobjs.append(n) ct = capi_maps.getctype(var) at = capi_maps.c2capi_map[ct] - dm = capi_maps.getarrdims(n,var) - dms = dm['dims'].replace('*','-1').strip() - dms = dms.replace(':','-1').strip() + dm = capi_maps.getarrdims(n, var) + dms = dm['dims'].replace('*', '-1').strip() + dms = dms.replace(':', '-1').strip() if not dms: dms='-1' use_fgetdims2 = fgetdims2 if isstringarray(var): if 'charselector' in var and 'len' in var['charselector']: cadd('\t{"%s",%s,{{%s,%s}},%s},'\ - %(undo_rmbadname1(n),dm['rank'],dms,var['charselector']['len'],at)) + %(undo_rmbadname1(n), dm['rank'], dms, var['charselector']['len'], at)) use_fgetdims2 = fgetdims2_sa else: - cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n),dm['rank'],dms,at)) + cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n), dm['rank'], dms, at)) else: - cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n),dm['rank'],dms,at)) - dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,var))) + cadd('\t{"%s",%s,{{%s}},%s},'%(undo_rmbadname1(n), dm['rank'], dms, at)) + dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n, var))) if hasnote(var): note = var['note'] if isinstance(note, list): note='\n'.join(note) dadd('--- %s'%(note)) if isallocatable(var): - fargs.append('f2py_%s_getdims_%s'%(m['name'],n)) + fargs.append('f2py_%s_getdims_%s'%(m['name'], n)) efargs.append(fargs[-1]) sargs.append('void (*%s)(int*,int*,void(*)(char*,int*),int*)'%(n)) sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)') - iadd('\tf2py_%s_def[i_f2py++].func = %s;'%(m['name'],n)) + iadd('\tf2py_%s_def[i_f2py++].func = %s;'%(m['name'], n)) fadd('subroutine %s(r,s,f2pysetdata,flag)'%(fargs[-1])) - fadd('use %s, only: d => %s\n'%(m['name'],undo_rmbadname1(n))) + fadd('use %s, only: d => %s\n'%(m['name'], undo_rmbadname1(n))) fadd('integer flag\n') fhooks[0]=fhooks[0]+fgetdims1 dms = eval('range(1,%s+1)'%(dm['rank'])) @@ -165,27 +165,27 @@ def buildhooks(pymod): fargs.append(n) sargs.append('char *%s'%(n)) sargsp.append('char*') - iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'],n)) + iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'], n)) if onlyvars: dadd('\\end{description}') if hasbody(m): for b in m['body']: if not isroutine(b): - print('Skipping',b['block'],b['name']) + print('Skipping', b['block'], b['name']) continue modobjs.append('%s()'%(b['name'])) b['modulename'] = m['name'] - api,wrap=rules.buildapi(b) + api, wrap=rules.buildapi(b) if isfunction(b): fhooks[0]=fhooks[0]+wrap - fargs.append('f2pywrap_%s_%s'%(m['name'],b['name'])) + fargs.append('f2pywrap_%s_%s'%(m['name'], b['name'])) #efargs.append(fargs[-1]) - ifargs.append(func2subr.createfuncwrapper(b,signature=1)) + ifargs.append(func2subr.createfuncwrapper(b, signature=1)) else: if wrap: fhooks[0]=fhooks[0]+wrap - fargs.append('f2pywrap_%s_%s'%(m['name'],b['name'])) - ifargs.append(func2subr.createsubrwrapper(b,signature=1)) + fargs.append('f2pywrap_%s_%s'%(m['name'], b['name'])) + ifargs.append(func2subr.createsubrwrapper(b, signature=1)) else: fargs.append(b['name']) mfargs.append(fargs[-1]) @@ -193,35 +193,35 @@ def buildhooks(pymod): # outmess('\t\t\tapplying --external-modroutines for %s\n'%(b['name'])) # efargs.append(fargs[-1]) api['externroutines']=[] - ar=applyrules(api,vrd) + ar=applyrules(api, vrd) ar['docs']=[] ar['docshort']=[] - ret=dictappend(ret,ar) - cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},'%(b['name'],m['name'],b['name'],m['name'],b['name'])) + ret=dictappend(ret, ar) + cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},'%(b['name'], m['name'], b['name'], m['name'], b['name'])) sargs.append('char *%s'%(b['name'])) sargsp.append('char *') - iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'],b['name'])) + iadd('\tf2py_%s_def[i_f2py++].data = %s;'%(m['name'], b['name'])) cadd('\t{NULL}\n};\n') iadd('}') - ihooks[0]='static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s'%(m['name'],','.join(sargs),ihooks[0]) + ihooks[0]='static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s'%(m['name'], ','.join(sargs), ihooks[0]) if '_' in m['name']: F_FUNC='F_FUNC_US' else: F_FUNC='F_FUNC' iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));'\ - %(F_FUNC,m['name'],m['name'].upper(),','.join(sargsp))) + %(F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) iadd('static void f2py_init_%s(void) {'%(m['name'])) iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\ - %(F_FUNC,m['name'],m['name'].upper(),m['name'])) + %(F_FUNC, m['name'], m['name'].upper(), m['name'])) iadd('}\n') ret['f90modhooks']=ret['f90modhooks']+chooks+ihooks - ret['initf90modhooks']=['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(m['name'],m['name'],m['name'])]+ret['initf90modhooks'] + ret['initf90modhooks']=['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(m['name'], m['name'], m['name'])]+ret['initf90modhooks'] fadd('') fadd('subroutine f2pyinit%s(f2pysetupfunc)'%(m['name'])) #fadd('use %s'%(m['name'])) if mfargs: for a in undo_rmbadname(mfargs): - fadd('use %s, only : %s'%(m['name'],a)) + fadd('use %s, only : %s'%(m['name'], a)) if ifargs: fadd(' '.join(['interface']+ifargs)) fadd('end interface') @@ -232,7 +232,7 @@ def buildhooks(pymod): fadd('call f2pysetupfunc(%s)'%(','.join(undo_rmbadname(fargs)))) fadd('end subroutine f2pyinit%s\n'%(m['name'])) - dadd('\n'.join(ret['latexdoc']).replace(r'\subsection{',r'\subsubsection{')) + dadd('\n'.join(ret['latexdoc']).replace(r'\subsection{', r'\subsubsection{')) ret['latexdoc']=[] ret['docs'].append('"\t%s --- %s"'%(m['name'], @@ -243,4 +243,4 @@ def buildhooks(pymod): ret['docshort']=[] ret['latexdoc']=doc[0] if len(ret['docs'])<=1: ret['docs']='' - return ret,fhooks[0] + return ret, fhooks[0] diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index f69750dd0..22f60851d 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -40,7 +40,7 @@ def var2fixfortran(vars,a,fa=None,f90mode=None): return '' vardef=vars[a]['typespec'] if vardef=='type' and 'typename' in vars[a]: - vardef='%s(%s)'%(vardef,vars[a]['typename']) + vardef='%s(%s)'%(vardef, vars[a]['typename']) selector={} lk = '' if 'kindselector' in vars[a]: @@ -51,28 +51,28 @@ def var2fixfortran(vars,a,fa=None,f90mode=None): lk = 'len' if '*' in selector: if f90mode: - if selector['*'] in ['*',':','(*)']: + if selector['*'] in ['*', ':', '(*)']: vardef='%s(len=*)'%(vardef) else: - vardef='%s(%s=%s)'%(vardef,lk,selector['*']) + vardef='%s(%s=%s)'%(vardef, lk, selector['*']) else: - if selector['*'] in ['*',':']: - vardef='%s*(%s)'%(vardef,selector['*']) + if selector['*'] in ['*', ':']: + vardef='%s*(%s)'%(vardef, selector['*']) else: - vardef='%s*%s'%(vardef,selector['*']) + vardef='%s*%s'%(vardef, selector['*']) else: if 'len' in selector: - vardef='%s(len=%s'%(vardef,selector['len']) + vardef='%s(len=%s'%(vardef, selector['len']) if 'kind' in selector: - vardef='%s,kind=%s)'%(vardef,selector['kind']) + vardef='%s,kind=%s)'%(vardef, selector['kind']) else: vardef='%s)'%(vardef) elif 'kind' in selector: - vardef='%s(kind=%s)'%(vardef,selector['kind']) + vardef='%s(kind=%s)'%(vardef, selector['kind']) - vardef='%s %s'%(vardef,fa) + vardef='%s %s'%(vardef, fa) if 'dimension' in vars[a]: - vardef='%s(%s)'%(vardef,','.join(vars[a]['dimension'])) + vardef='%s(%s)'%(vardef, ','.join(vars[a]['dimension'])) return vardef def createfuncwrapper(rout,signature=0): @@ -82,7 +82,7 @@ def createfuncwrapper(rout,signature=0): vars = rout['vars'] for a in rout['args']: v = rout['vars'][a] - for i,d in enumerate(v.get('dimension',[])): + for i, d in enumerate(v.get('dimension', [])): if d==':': dn = 'f2py_%s_d%s' % (a, i) dv = dict(typespec='integer', intent=['hide']) @@ -92,10 +92,10 @@ def createfuncwrapper(rout,signature=0): v['dimension'][i] = dn rout['args'].extend(extra_args) need_interface = bool(extra_args) - + ret = [''] def add(line,ret=ret): - ret[0] = '%s\n %s'%(ret[0],line) + ret[0] = '%s\n %s'%(ret[0], line) name = rout['name'] fortranname = getfortranname(rout) f90mode = ismoduleroutine(rout) @@ -107,22 +107,22 @@ def createfuncwrapper(rout,signature=0): else: args = [newname]+rout['args'] - l = var2fixfortran(vars,name,newname,f90mode) + l = var2fixfortran(vars, name, newname, f90mode) return_char_star = 0 if l[:13]=='character*(*)': return_char_star = 1 if f90mode: l = 'character(len=10)'+l[13:] else: l = 'character*10'+l[13:] charselect = vars[name]['charselector'] - if charselect.get('*','')=='(*)': + if charselect.get('*', '')=='(*)': charselect['*'] = '10' sargs = ', '.join(args) if f90mode: - add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'],name,sargs)) + add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'], name, sargs)) if not signature: - add('use %s, only : %s'%(rout['modulename'],fortranname)) + add('use %s, only : %s'%(rout['modulename'], fortranname)) else: - add('subroutine f2pywrap%s (%s)'%(name,sargs)) + add('subroutine f2pywrap%s (%s)'%(name, sargs)) if not need_interface: add('external %s'%(fortranname)) l = l + ', '+fortranname @@ -140,17 +140,17 @@ def createfuncwrapper(rout,signature=0): for a in args: if a in dumped_args: continue if isscalar(vars[a]): - add(var2fixfortran(vars,a,f90mode=f90mode)) + add(var2fixfortran(vars, a, f90mode=f90mode)) dumped_args.append(a) for a in args: if a in dumped_args: continue if isintent_in(vars[a]): - add(var2fixfortran(vars,a,f90mode=f90mode)) + add(var2fixfortran(vars, a, f90mode=f90mode)) dumped_args.append(a) for a in args: if a in dumped_args: continue - add(var2fixfortran(vars,a,f90mode=f90mode)) - + add(var2fixfortran(vars, a, f90mode=f90mode)) + add(l) if need_interface: @@ -166,11 +166,11 @@ def createfuncwrapper(rout,signature=0): if not signature: if islogicalfunction(rout): - add('%s = .not.(.not.%s(%s))'%(newname,fortranname,sargs)) + add('%s = .not.(.not.%s(%s))'%(newname, fortranname, sargs)) else: - add('%s = %s(%s)'%(newname,fortranname,sargs)) + add('%s = %s(%s)'%(newname, fortranname, sargs)) if f90mode: - add('end subroutine f2pywrap_%s_%s'%(rout['modulename'],name)) + add('end subroutine f2pywrap_%s_%s'%(rout['modulename'], name)) else: add('end') #print '**'*10 @@ -185,7 +185,7 @@ def createsubrwrapper(rout,signature=0): vars = rout['vars'] for a in rout['args']: v = rout['vars'][a] - for i,d in enumerate(v.get('dimension',[])): + for i, d in enumerate(v.get('dimension', [])): if d==':': dn = 'f2py_%s_d%s' % (a, i) dv = dict(typespec='integer', intent=['hide']) @@ -198,7 +198,7 @@ def createsubrwrapper(rout,signature=0): ret = [''] def add(line,ret=ret): - ret[0] = '%s\n %s'%(ret[0],line) + ret[0] = '%s\n %s'%(ret[0], line) name = rout['name'] fortranname = getfortranname(rout) f90mode = ismoduleroutine(rout) @@ -207,11 +207,11 @@ def createsubrwrapper(rout,signature=0): sargs = ', '.join(args) if f90mode: - add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'],name,sargs)) + add('subroutine f2pywrap_%s_%s (%s)'%(rout['modulename'], name, sargs)) if not signature: - add('use %s, only : %s'%(rout['modulename'],fortranname)) + add('use %s, only : %s'%(rout['modulename'], fortranname)) else: - add('subroutine f2pywrap%s (%s)'%(name,sargs)) + add('subroutine f2pywrap%s (%s)'%(name, sargs)) if not need_interface: add('external %s'%(fortranname)) @@ -228,11 +228,11 @@ def createsubrwrapper(rout,signature=0): for a in args: if a in dumped_args: continue if isscalar(vars[a]): - add(var2fixfortran(vars,a,f90mode=f90mode)) + add(var2fixfortran(vars, a, f90mode=f90mode)) dumped_args.append(a) for a in args: if a in dumped_args: continue - add(var2fixfortran(vars,a,f90mode=f90mode)) + add(var2fixfortran(vars, a, f90mode=f90mode)) if need_interface: if f90mode: @@ -246,9 +246,9 @@ def createsubrwrapper(rout,signature=0): sargs = ', '.join([a for a in args if a not in extra_args]) if not signature: - add('call %s(%s)'%(fortranname,sargs)) + add('call %s(%s)'%(fortranname, sargs)) if f90mode: - add('end subroutine f2pywrap_%s_%s'%(rout['modulename'],name)) + add('end subroutine f2pywrap_%s_%s'%(rout['modulename'], name)) else: add('end') #print '**'*10 @@ -261,7 +261,7 @@ def assubr(rout): if isfunction_wrap(rout): fortranname = getfortranname(rout) name = rout['name'] - outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n'%(name,fortranname)) + outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n'%(name, fortranname)) rout = copy.copy(rout) fname = name rname = fname @@ -281,12 +281,11 @@ def assubr(rout): if flag: fvar['intent'].append('out=%s' % (rname)) rout['args'][:] = [fname] + rout['args'] - return rout,createfuncwrapper(rout) + return rout, createfuncwrapper(rout) if issubroutine_wrap(rout): fortranname = getfortranname(rout) name = rout['name'] - outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n'%(name,fortranname)) + outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n'%(name, fortranname)) rout = copy.copy(rout) - return rout,createsubrwrapper(rout) - return rout,'' - + return rout, createsubrwrapper(rout) + return rout, '' diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index f7f82fc99..4c186712c 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -81,18 +81,18 @@ sepdict={} for k in ['decl', 'frompyobj', 'cleanupfrompyobj', - 'topyarr','method', - 'pyobjfrom','closepyobjfrom', + 'topyarr', 'method', + 'pyobjfrom', 'closepyobjfrom', 'freemem', 'userincludes', - 'includes0','includes','typedefs','typedefs_generated', - 'cppmacros','cfuncs','callbacks', + 'includes0', 'includes', 'typedefs', 'typedefs_generated', + 'cppmacros', 'cfuncs', 'callbacks', 'latexdoc', 'restdoc', - 'routine_defs','externroutines', + 'routine_defs', 'externroutines', 'initf2pywraphooks', - 'commonhooks','initcommonhooks', - 'f90modhooks','initf90modhooks']: + 'commonhooks', 'initcommonhooks', + 'f90modhooks', 'initf90modhooks']: sepdict[k]='\n' #################### Rules for C/API module ################# @@ -243,16 +243,16 @@ PyMODINIT_FUNC init#modulename#(void) { } defmod_rules=[ - {'body':'/*eof body*/', - 'method':'/*eof method*/', - 'externroutines':'/*eof externroutines*/', - 'routine_defs':'/*eof routine_defs*/', - 'initf90modhooks':'/*eof initf90modhooks*/', - 'initf2pywraphooks':'/*eof initf2pywraphooks*/', - 'initcommonhooks':'/*eof initcommonhooks*/', - 'latexdoc':'', - 'restdoc':'', - 'modnote':{hasnote:'#note#',l_not(hasnote):''}, + {'body': '/*eof body*/', + 'method': '/*eof method*/', + 'externroutines': '/*eof externroutines*/', + 'routine_defs': '/*eof routine_defs*/', + 'initf90modhooks': '/*eof initf90modhooks*/', + 'initf2pywraphooks': '/*eof initf2pywraphooks*/', + 'initcommonhooks': '/*eof initcommonhooks*/', + 'latexdoc': '', + 'restdoc': '', + 'modnote': {hasnote:'#note#',l_not(hasnote):''}, } ] @@ -320,7 +320,7 @@ f2py_stop_clock(); 'doc':'#docreturn##name#(#docsignature#)', 'docshort':'#docreturn##name#(#docsignatureshort#)', 'docs':'"\t#docreturn##name#(#docsignature#)\\n"\n', - 'need':['arrayobject.h','CFUNCSMESS','MINMAX'], + 'need':['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], 'cppmacros':{debugcapi:'#define DEBUGCFUNCS'}, 'latexdoc':['\\subsection{Wrapper function \\texttt{#texname#}}\n', """ @@ -338,65 +338,65 @@ f2py_stop_clock(); rout_rules=[ { # Init - 'separatorsfor': {'callfortranroutine':'\n','routdebugenter':'\n','decl':'\n', - 'routdebugleave':'\n','routdebugfailure':'\n', - 'setjmpbuf':' || ', - 'docstrreq':'\n','docstropt':'\n','docstrout':'\n', - 'docstrcbs':'\n','docstrsigns':'\\n"\n"', - 'latexdocstrsigns':'\n', - 'latexdocstrreq':'\n','latexdocstropt':'\n', - 'latexdocstrout':'\n','latexdocstrcbs':'\n', + 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', + 'routdebugleave': '\n', 'routdebugfailure': '\n', + 'setjmpbuf': ' || ', + 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', + 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', }, - 'kwlist':'','kwlistopt':'','callfortran':'','callfortranappend':'', - 'docsign':'','docsignopt':'','decl':'/*decl*/', - 'freemem':'/*freemem*/', - 'docsignshort':'','docsignoptshort':'', - 'docstrsigns':'','latexdocstrsigns':'', - 'docstrreq':'\\nParameters\\n----------', - 'docstropt':'\\nOther Parameters\\n----------------', - 'docstrout':'\\nReturns\\n-------', - 'docstrcbs':'\\nNotes\\n-----\\nCall-back functions::\\n', - 'latexdocstrreq':'\\noindent Required arguments:', - 'latexdocstropt':'\\noindent Optional arguments:', - 'latexdocstrout':'\\noindent Return objects:', - 'latexdocstrcbs':'\\noindent Call-back functions:', - 'args_capi':'','keys_capi':'','functype':'', - 'frompyobj':'/*frompyobj*/', - 'cleanupfrompyobj':['/*end of cleanupfrompyobj*/'], #this list will be reversed - 'pyobjfrom':'/*pyobjfrom*/', - 'closepyobjfrom':['/*end of closepyobjfrom*/'], #this list will be reversed - 'topyarr':'/*topyarr*/','routdebugleave':'/*routdebugleave*/', - 'routdebugenter':'/*routdebugenter*/', - 'routdebugfailure':'/*routdebugfailure*/', - 'callfortranroutine':'/*callfortranroutine*/', - 'argformat':'','keyformat':'','need_cfuncs':'', - 'docreturn':'','return':'','returnformat':'','rformat':'', - 'kwlistxa':'','keys_xa':'','xaformat':'','docsignxa':'','docsignxashort':'', - 'initf2pywraphook':'', - 'routnote':{hasnote:'--- #note#',l_not(hasnote):''}, - },{ + 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', + 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', + 'freemem': '/*freemem*/', + 'docsignshort': '', 'docsignoptshort': '', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\\nParameters\\n----------', + 'docstropt': '\\nOther Parameters\\n----------------', + 'docstrout': '\\nReturns\\n-------', + 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'args_capi': '', 'keys_capi': '', 'functype': '', + 'frompyobj': '/*frompyobj*/', + 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], #this list will be reversed + 'pyobjfrom': '/*pyobjfrom*/', + 'closepyobjfrom': ['/*end of closepyobjfrom*/'], #this list will be reversed + 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', + 'routdebugenter': '/*routdebugenter*/', + 'routdebugfailure': '/*routdebugfailure*/', + 'callfortranroutine': '/*callfortranroutine*/', + 'argformat': '', 'keyformat': '', 'need_cfuncs': '', + 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', + 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', + 'initf2pywraphook': '', + 'routnote': {hasnote:'--- #note#',l_not(hasnote):''}, + }, { 'apiname':'f2py_rout_#modulename#_#name#', 'pyname':'#modulename#.#name#', 'decl':'', '_check':l_not(ismoduleroutine) - },{ + }, { 'apiname':'f2py_rout_#modulename#_#f90modulename#_#name#', 'pyname':'#modulename#.#f90modulename#.#name#', 'decl':'', '_check':ismoduleroutine - },{ # Subroutine - 'functype':'void', - 'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);', + }, { # Subroutine + 'functype': 'void', + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);', ismoduleroutine:'', isdummyroutine:'' }, - 'routine_def':{l_not(l_or(ismoduleroutine,isintent_c,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine),isdummyroutine):'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', }, - 'need':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'F_FUNC'}, - 'callfortranroutine':[ + 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'F_FUNC'}, + 'callfortranroutine': [ {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, {hasexternals:"""\ \t\tif (#setjmpbuf#) { @@ -405,21 +405,21 @@ rout_rules=[ {isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'}, {hascallstatement:'''\t\t\t\t#callstatement#; \t\t\t\t/*(*f2py_func)(#callfortran#);*/'''}, - {l_not(l_or(hascallstatement,isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'}, + {l_not(l_or(hascallstatement, isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'}, {isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'}, {hasexternals:"""\t\t}"""} ], - '_check':l_and(issubroutine,l_not(issubroutine_wrap)), - },{ # Wrapped function - 'functype':'void', - 'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', - isdummyroutine:'', + '_check': l_and(issubroutine, l_not(issubroutine_wrap)), + }, { # Wrapped function + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', }, - 'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', }, - 'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):''' + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)):''' { extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); PyObject* o = PyDict_GetItemString(d,"#name#"); @@ -431,30 +431,30 @@ rout_rules=[ #endif } '''}, - 'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']}, - 'callfortranroutine':[ + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)):['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, {hasexternals:"""\ \tif (#setjmpbuf#) { \t\tf2py_success = 0; \t} else {"""}, {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, - {l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'}, + {l_not(l_or(hascallstatement, isdummyroutine)):'\t(*f2py_func)(#callfortran#);'}, {hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, {hasexternals:'\t}'} ], - '_check':isfunction_wrap, - },{ # Wrapped subroutine - 'functype':'void', - 'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', - isdummyroutine:'', + '_check': isfunction_wrap, + }, { # Wrapped subroutine + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', }, - 'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', }, - 'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):''' + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)):''' { extern void #F_FUNC#(#name_lower#,#NAME#)(void); PyObject* o = PyDict_GetItemString(d,"#name#"); @@ -466,46 +466,46 @@ rout_rules=[ #endif } '''}, - 'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']}, - 'callfortranroutine':[ + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)):['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ {debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, {hasexternals:"""\ \tif (#setjmpbuf#) { \t\tf2py_success = 0; \t} else {"""}, {isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'}, - {l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'}, + {l_not(l_or(hascallstatement, isdummyroutine)):'\t(*f2py_func)(#callfortran#);'}, {hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'}, {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, {hasexternals:'\t}'} ], - '_check':issubroutine_wrap, - },{ # Function + '_check': issubroutine_wrap, + }, { # Function 'functype':'#ctype#', 'docreturn':{l_not(isintent_hide):'#rname#,'}, 'docstrout':'#pydocsignout#', 'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}', {hasresultnote:'--- #resultnote#'}], - 'callfortranroutine':[{l_and(debugcapi,isstringfunction):"""\ + 'callfortranroutine':[{l_and(debugcapi, isstringfunction):"""\ #ifdef USESCOMPAQFORTRAN \tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); #else \tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); #endif """}, - {l_and(debugcapi,l_not(isstringfunction)):"""\ + {l_and(debugcapi, l_not(isstringfunction)):"""\ \tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); """} ], - '_check':l_and(isfunction,l_not(isfunction_wrap)) - },{ # Scalar function - 'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);', + '_check':l_and(isfunction, l_not(isfunction_wrap)) + }, { # Scalar function + 'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);', isdummyroutine:'' }, - 'routine_def':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', - isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', + 'routine_def':{l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},', }, 'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};', l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'}, @@ -520,11 +520,11 @@ rout_rules=[ {hascallstatement:'''\t#callstatement#; /*\t#name#_return_value = (*f2py_func)(#callfortran#);*/ '''}, - {l_not(l_or(hascallstatement,isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'}, + {l_not(l_or(hascallstatement, isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'}, {isthreadsafe:'\tPy_END_ALLOW_THREADS'}, {hasexternals:'\t}'}, - {l_and(debugcapi,iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, - {l_and(debugcapi,l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], + {l_and(debugcapi, iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, + {l_and(debugcapi, l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], 'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, 'need':[{l_not(isdummyroutine):'F_FUNC'}, {iscomplexfunction:'pyobj_from_#ctype#1'}, @@ -532,14 +532,14 @@ rout_rules=[ {islong_doublefunction:'long_double'}], 'returnformat':{l_not(isintent_hide):'#rformat#'}, 'return':{iscomplexfunction:',#name#_return_value_capi', - l_not(l_or(iscomplexfunction,isintent_hide)):',#name#_return_value'}, - '_check':l_and(isfunction,l_not(isstringfunction),l_not(isfunction_wrap)) - },{ # String function # in use for --no-wrap + l_not(l_or(iscomplexfunction, isintent_hide)):',#name#_return_value'}, + '_check':l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) + }, { # String function # in use for --no-wrap 'declfortranroutine':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', - 'routine_def':{l_not(l_or(ismoduleroutine,isintent_c)): + 'routine_def':{l_not(l_or(ismoduleroutine, isintent_c)): # '\t{\"#name#\",-1,{{-1}},0,(char *)F_FUNC(#fortranname#,#FORTRANNAME#),(void *)#apiname#,doc_#apiname#},', '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', - l_and(l_not(ismoduleroutine),isintent_c): + l_and(l_not(ismoduleroutine), isintent_c): # '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(void *)#apiname#,doc_#apiname#},' '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' }, @@ -574,8 +574,8 @@ rout_rules=[ 'returnformat':'#rformat#', 'return':',#name#_return_value', 'freemem':'\tSTRINGFREE(#name#_return_value);', - 'need':['F_FUNC','#ctype#','STRINGFREE'], - '_check':l_and(isstringfunction,l_not(isfunction_wrap)) # ???obsolete + 'need':['F_FUNC', '#ctype#', 'STRINGFREE'], + '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete }, { # Debugging 'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', @@ -587,17 +587,17 @@ rout_rules=[ ################ Rules for arguments ################## -typedef_need_dict = {islong_long:'long_long', - islong_double:'long_double', - islong_complex:'complex_long_double', - isunsigned_char:'unsigned_char', - isunsigned_short:'unsigned_short', - isunsigned:'unsigned', - isunsigned_long_long:'unsigned_long_long', - isunsigned_chararray:'unsigned_char', - isunsigned_shortarray:'unsigned_short', - isunsigned_long_longarray:'unsigned_long_long', - issigned_long_longarray:'long_long', +typedef_need_dict = {islong_long: 'long_long', + islong_double: 'long_double', + islong_complex: 'complex_long_double', + isunsigned_char: 'unsigned_char', + isunsigned_short: 'unsigned_short', + isunsigned: 'unsigned', + isunsigned_long_long: 'unsigned_long_long', + isunsigned_chararray: 'unsigned_char', + isunsigned_shortarray: 'unsigned_short', + isunsigned_long_longarray: 'unsigned_long_long', + issigned_long_longarray: 'long_long', } aux_rules=[ @@ -605,24 +605,24 @@ aux_rules=[ 'separatorsfor':sepdict }, { # Common - 'frompyobj':['\t/* Processing auxiliary variable #varname# */', + 'frompyobj': ['\t/* Processing auxiliary variable #varname# */', {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},], - 'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */', - 'need':typedef_need_dict, + 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', + 'need': typedef_need_dict, }, # Scalars (not complex) { # Common - 'decl':'\t#ctype# #varname# = 0;', - 'need':{hasinitvalue:'math.h'}, - 'frompyobj':{hasinitvalue:'\t#varname# = #init#;'}, - '_check':l_and(isscalar,l_not(iscomplex)), + 'decl': '\t#ctype# #varname# = 0;', + 'need': {hasinitvalue:'math.h'}, + 'frompyobj': {hasinitvalue:'\t#varname# = #init#;'}, + '_check': l_and(isscalar, l_not(iscomplex)), }, { - 'return':',#varname#', - 'docstrout':'#pydocsignout#', - 'docreturn':'#outvarname#,', - 'returnformat':'#varrformat#', - '_check':l_and(isscalar,l_not(iscomplex),isintent_out), + 'return': ',#varname#', + 'docstrout': '#pydocsignout#', + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': l_and(isscalar, l_not(iscomplex), isintent_out), }, # Complex scalars { # Common @@ -644,14 +644,14 @@ aux_rules=[ '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', '\tconst int #varname#_Rank = #rank#;', ], - 'need':['len..',{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}], + 'need':['len..', {hasinitvalue:'forcomb'}, {hasinitvalue:'CFUNCSMESS'}], '_check':isarray }, # Scalararray { # Common - '_check':l_and(isarray,l_not(iscomplexarray)) - },{ # Not hidden - '_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide) + '_check':l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check':l_and(isarray, l_not(iscomplexarray), isintent_nothide) }, # Integer*1 array {'need':'#ctype#', @@ -691,37 +691,37 @@ arg_rules=[ 'separatorsfor':sepdict }, { # Common - 'frompyobj':['\t/* Processing variable #varname# */', + 'frompyobj': ['\t/* Processing variable #varname# */', {debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},], - 'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */', - '_depend':'', - 'need':typedef_need_dict, + 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */', + '_depend': '', + 'need': typedef_need_dict, }, # Doc signatures { - 'docstropt':{l_and(isoptional,isintent_nothide):'#pydocsign#'}, - 'docstrreq':{l_and(isrequired,isintent_nothide):'#pydocsign#'}, + 'docstropt':{l_and(isoptional, isintent_nothide):'#pydocsign#'}, + 'docstrreq':{l_and(isrequired, isintent_nothide):'#pydocsign#'}, 'docstrout':{isintent_out:'#pydocsignout#'}, - 'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', + 'latexdocstropt':{l_and(isoptional, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', {hasnote:'--- #note#'}]}, - 'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', + 'latexdocstrreq':{l_and(isrequired, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}', {hasnote:'--- #note#'}]}, 'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}', - {l_and(hasnote,isintent_hide):'--- #note#', - l_and(hasnote,isintent_nothide):'--- See above.'}]}, + {l_and(hasnote, isintent_hide):'--- #note#', + l_and(hasnote, isintent_nothide):'--- See above.'}]}, 'depend':'' }, # Required/Optional arguments { 'kwlist':'"#varname#",', 'docsign':'#varname#,', - '_check':l_and(isintent_nothide,l_not(isoptional)) + '_check':l_and(isintent_nothide, l_not(isoptional)) }, { 'kwlistopt':'"#varname#",', 'docsignopt':'#varname#=#showinit#,', 'docsignoptshort':'#varname#,', - '_check':l_and(isintent_nothide,isoptional) + '_check':l_and(isintent_nothide, isoptional) }, # Docstring/BuildValue { @@ -752,7 +752,7 @@ arg_rules=[ 'keys_xa':',&PyTuple_Type,&#varname#_xa_capi', 'setjmpbuf':'(setjmp(#cbname#_jmpbuf))', 'callfortran':{l_not(isintent_callback):'#varname#_cptr,'}, - 'need':['#cbname#','setjmp.h'], + 'need':['#cbname#', 'setjmp.h'], '_check':isexternal }, { @@ -762,7 +762,7 @@ if(F2PyCapsule_Check(#varname#_capi)) { } else { #varname#_cptr = #cbname#; } -"""},{isintent_callback:"""\ +"""}, {isintent_callback:"""\ if (#varname#_capi==Py_None) { #varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); if (#varname#_capi) { @@ -814,7 +814,7 @@ if (#varname#_capi==Py_None) { \t\t#cbname#_nofargs = #varname#_nofargs_capi; \t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf)); \t}""", - 'need':['SWAP','create_cb_arglist'], + 'need':['SWAP', 'create_cb_arglist'], '_check':isexternal, '_depend':'' }, @@ -824,12 +824,12 @@ if (#varname#_capi==Py_None) { 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, 'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'}, 'return':{isintent_out:',#varname#'}, - '_check':l_and(isscalar,l_not(iscomplex)) - },{ - 'need':{hasinitvalue:'math.h'}, - '_check':l_and(isscalar,l_not(iscomplex)), + '_check':l_and(isscalar, l_not(iscomplex)) + }, { + 'need': {hasinitvalue:'math.h'}, + '_check': l_and(isscalar, l_not(iscomplex)), #'_depend':'' - },{ # Not hidden + }, { # Not hidden 'decl':'\tPyObject *#varname#_capi = Py_None;', 'argformat':{isrequired:'O'}, 'keyformat':{isoptional:'O'}, @@ -840,8 +840,8 @@ if (#varname#_capi==Py_None) { \tif (f2py_success) {"""}, 'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, - '_check':l_and(isscalar,l_not(iscomplex),isintent_nothide) - },{ + '_check':l_and(isscalar, l_not(iscomplex), isintent_nothide) + }, { 'frompyobj':[ # hasinitvalue... # if pyobj is None: @@ -860,7 +860,7 @@ if (#varname#_capi==Py_None) { # {hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else', '_depend':''}, - {l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)', + {l_and(isoptional, l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)', '_depend':''}, {l_not(islogical):'''\ \t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); @@ -872,18 +872,18 @@ if (#varname#_capi==Py_None) { ], 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname#*/', 'need':{l_not(islogical):'#ctype#_from_pyobj'}, - '_check':l_and(isscalar,l_not(iscomplex),isintent_nothide), + '_check':l_and(isscalar, l_not(iscomplex), isintent_nothide), '_depend':'' # },{ # Hidden # '_check':l_and(isscalar,l_not(iscomplex),isintent_hide) - },{ # Hidden + }, { # Hidden 'frompyobj':{hasinitvalue:'\t#varname# = #init#;'}, 'need':typedef_need_dict, - '_check':l_and(isscalar,l_not(iscomplex),isintent_hide), + '_check':l_and(isscalar, l_not(iscomplex), isintent_hide), '_depend':'' - },{ # Common + }, { # Common 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, - '_check':l_and(isscalar,l_not(iscomplex)), + '_check':l_and(isscalar, l_not(iscomplex)), '_depend':'' }, # Complex scalars @@ -893,7 +893,7 @@ if (#varname#_capi==Py_None) { 'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, 'return':{isintent_out:',#varname#_capi'}, '_check':iscomplex - },{ # Not hidden + }, { # Not hidden 'decl':'\tPyObject *#varname#_capi = Py_None;', 'argformat':{isrequired:'O'}, 'keyformat':{isoptional:'O'}, @@ -904,29 +904,29 @@ if (#varname#_capi==Py_None) { \t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); \t\tif (f2py_success) {"""}, 'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"}, - '_check':l_and(iscomplex,isintent_nothide) - },{ + '_check':l_and(iscomplex, isintent_nothide) + }, { 'frompyobj':[{hasinitvalue:'\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, - {l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'}, + {l_and(isoptional, l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'}, # '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\\n");' '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' '\n\tif (f2py_success) {'], 'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname# frompyobj*/', 'need':['#ctype#_from_pyobj'], - '_check':l_and(iscomplex,isintent_nothide), + '_check':l_and(iscomplex, isintent_nothide), '_depend':'' - },{ # Hidden + }, { # Hidden 'decl':{isintent_out:'\tPyObject *#varname#_capi = Py_None;'}, - '_check':l_and(iscomplex,isintent_hide) - },{ + '_check':l_and(iscomplex, isintent_hide) + }, { 'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'}, - '_check':l_and(iscomplex,isintent_hide), + '_check':l_and(iscomplex, isintent_hide), '_depend':'' - },{ # Common + }, { # Common 'pyobjfrom':{isintent_out:'\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'}, 'need':['pyobj_from_#ctype#1'], '_check':iscomplex - },{ + }, { 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, '_check':iscomplex, '_depend':'' @@ -943,7 +943,7 @@ if (#varname#_capi==Py_None) { 'return':{isintent_out:',#varname#'}, 'need':['len..'],#'STRINGFREE'], '_check':isstring - },{ # Common + }, { # Common 'frompyobj':"""\ \tslen(#varname#) = #length#; \tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\"); @@ -951,10 +951,10 @@ if (#varname#_capi==Py_None) { 'cleanupfrompyobj':"""\ \t\tSTRINGFREE(#varname#); \t} /*if (f2py_success) of #varname#*/""", - 'need':['#ctype#_from_pyobj','len..','STRINGFREE'], + 'need':['#ctype#_from_pyobj', 'len..', 'STRINGFREE'], '_check':isstring, '_depend':'' - },{ # Not hidden + }, { # Not hidden 'argformat':{isrequired:'O'}, 'keyformat':{isoptional:'O'}, 'args_capi':{isrequired:',&#varname#_capi'}, @@ -964,10 +964,10 @@ if (#varname#_capi==Py_None) { \tif (f2py_success) {'''}, 'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'}, 'need':{isintent_inout:'try_pyarr_from_#ctype#'}, - '_check':l_and(isstring,isintent_nothide) - },{ # Hidden - '_check':l_and(isstring,isintent_hide) - },{ + '_check':l_and(isstring, isintent_nothide) + }, { # Hidden + '_check':l_and(isstring, isintent_hide) + }, { 'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, '_check':isstring, '_depend':'' @@ -984,38 +984,38 @@ if (#varname#_capi==Py_None) { 'return':{isintent_out:',capi_#varname#_tmp'}, 'need':'len..', '_check':isarray - },{ # intent(overwrite) array - 'decl':'\tint capi_overwrite_#varname# = 1;', - 'kwlistxa':'"overwrite_#varname#",', - 'xaformat':'i', - 'keys_xa':',&capi_overwrite_#varname#', - 'docsignxa':'overwrite_#varname#=1,', - 'docsignxashort':'overwrite_#varname#,', - 'docstropt':'overwrite_#varname# : input int, optional\\n Default: 1', - '_check':l_and(isarray,isintent_overwrite), - },{ - 'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', - '_check':l_and(isarray,isintent_overwrite), - '_depend':'', + }, { # intent(overwrite) array + 'decl': '\tint capi_overwrite_#varname# = 1;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=1,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', + '_check': l_and(isarray, isintent_overwrite), + }, { + 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_overwrite), + '_depend': '', }, { # intent(copy) array - 'decl':'\tint capi_overwrite_#varname# = 0;', - 'kwlistxa':'"overwrite_#varname#",', - 'xaformat':'i', - 'keys_xa':',&capi_overwrite_#varname#', - 'docsignxa':'overwrite_#varname#=0,', - 'docsignxashort':'overwrite_#varname#,', - 'docstropt':'overwrite_#varname# : input int, optional\\n Default: 0', - '_check':l_and(isarray,isintent_copy), - },{ - 'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', - '_check':l_and(isarray,isintent_copy), - '_depend':'', - },{ - 'need':[{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}], + 'decl': '\tint capi_overwrite_#varname# = 0;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=0,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', + '_check': l_and(isarray, isintent_copy), + }, { + 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_copy), + '_depend': '', + }, { + 'need':[{hasinitvalue:'forcomb'}, {hasinitvalue:'CFUNCSMESS'}], '_check':isarray, '_depend':'' - },{ # Not hidden + }, { # Not hidden 'decl':'\tPyObject *#varname#_capi = Py_None;', 'argformat':{isrequired:'O'}, 'keyformat':{isoptional:'O'}, @@ -1036,8 +1036,8 @@ if (#varname#_capi==Py_None) { # */ # """}, # 'need':{isintent_inout:'copy_ND_array'}, - '_check':l_and(isarray,isintent_nothide) - },{ + '_check':l_and(isarray, isintent_nothide) + }, { 'frompyobj':['\t#setdims#;', '\tcapi_#varname#_intent |= #intent#;', {isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'}, @@ -1069,10 +1069,10 @@ if (#varname#_capi==Py_None) { ], 'cleanupfrompyobj':[ # note that this list will be reversed '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/', - {l_not(l_or(isintent_out,isintent_hide)):"""\ + {l_not(l_or(isintent_out, isintent_hide)):"""\ \tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) { \t\tPy_XDECREF(capi_#varname#_tmp); }"""}, - {l_and(isintent_hide,l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""}, + {l_and(isintent_hide, l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""}, {hasinitvalue:'\t} /*if (f2py_success) of #varname# init*/'}, ], '_check':isarray, @@ -1084,9 +1084,9 @@ if (#varname#_capi==Py_None) { # }, # Scalararray { # Common - '_check':l_and(isarray,l_not(iscomplexarray)) - },{ # Not hidden - '_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide) + '_check':l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check':l_and(isarray, l_not(iscomplexarray), isintent_nothide) }, # Integer*1 array {'need':'#ctype#', @@ -1127,28 +1127,28 @@ check_rules=[ { 'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, 'need':'len..' - },{ + }, { 'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', 'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/', 'need':'CHECKSCALAR', - '_check':l_and(isscalar,l_not(iscomplex)), + '_check':l_and(isscalar, l_not(iscomplex)), '_break':'' - },{ + }, { 'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', 'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/', 'need':'CHECKSTRING', '_check':isstring, '_break':'' - },{ + }, { 'need':'CHECKARRAY', 'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', 'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/', '_check':isarray, '_break':'' - },{ - 'need':'CHECKGENERIC', - 'frompyobj':'\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', - 'cleanupfrompyobj':'\t} /*CHECKGENERIC(#check#)*/', + }, { + 'need': 'CHECKGENERIC', + 'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/', } ] @@ -1156,16 +1156,16 @@ check_rules=[ #################### Build C/API module ####################### -def buildmodule(m,um): +def buildmodule(m, um): """ Return """ - global f2py_version,options + global f2py_version, options outmess('\tBuilding module "%s"...\n'%(m['name'])) ret = {} mod_rules=defmod_rules[:] vrd=modsign2map(m) - rd=dictappend({'f2py_version':f2py_version},vrd) + rd=dictappend({'f2py_version':f2py_version}, vrd) funcwrappers = [] funcwrappers2 = [] # F90 codes for n in m['interfaced']: @@ -1182,39 +1182,39 @@ def buildmodule(m,um): continue nb_list = [nb] if 'entry' in nb: - for k,a in nb['entry'].items(): + for k, a in nb['entry'].items(): nb1 = copy.deepcopy(nb) del nb1['entry'] nb1['name'] = k nb1['args'] = a nb_list.append(nb1) for nb in nb_list: - api,wrap=buildapi(nb) + api, wrap=buildapi(nb) if wrap: if ismoduleroutine(nb): funcwrappers2.append(wrap) else: funcwrappers.append(wrap) - ar=applyrules(api,vrd) - rd=dictappend(rd,ar) + ar=applyrules(api, vrd) + rd=dictappend(rd, ar) # Construct COMMON block support - cr,wrap = common_rules.buildhooks(m) + cr, wrap = common_rules.buildhooks(m) if wrap: funcwrappers.append(wrap) - ar=applyrules(cr,vrd) - rd=dictappend(rd,ar) + ar=applyrules(cr, vrd) + rd=dictappend(rd, ar) # Construct F90 module support - mr,wrap = f90mod_rules.buildhooks(m) + mr, wrap = f90mod_rules.buildhooks(m) if wrap: funcwrappers2.append(wrap) - ar=applyrules(mr,vrd) - rd=dictappend(rd,ar) + ar=applyrules(mr, vrd) + rd=dictappend(rd, ar) for u in um: - ar=use_rules.buildusevars(u,m['use'][u['name']]) - rd=dictappend(rd,ar) + ar=use_rules.buildusevars(u, m['use'][u['name']]) + rd=dictappend(rd, ar) needs=cfuncs.get_needs() code={} @@ -1248,28 +1248,28 @@ def buildmodule(m,um): mod_rules.append(code) for r in mod_rules: if ('_check' in r and r['_check'](m)) or ('_check' not in r): - ar=applyrules(r,vrd,m) - rd=dictappend(rd,ar) - ar=applyrules(module_rules,rd) + ar=applyrules(r, vrd, m) + rd=dictappend(rd, ar) + ar=applyrules(module_rules, rd) - fn = os.path.join(options['buildpath'],vrd['coutput']) + fn = os.path.join(options['buildpath'], vrd['coutput']) ret['csrc'] = fn - f=open(fn,'w') - f.write(ar['modulebody'].replace('\t',2*' ')) + f=open(fn, 'w') + f.write(ar['modulebody'].replace('\t', 2*' ')) f.close() - outmess('\tWrote C/API module "%s" to file "%s"\n'%(m['name'],fn)) + outmess('\tWrote C/API module "%s" to file "%s"\n'%(m['name'], fn)) if options['dorestdoc']: - fn = os.path.join(options['buildpath'],vrd['modulename']+'module.rest') - f=open(fn,'w') + fn = os.path.join(options['buildpath'], vrd['modulename']+'module.rest') + f=open(fn, 'w') f.write('.. -*- rest -*-\n') f.write('\n'.join(ar['restdoc'])) f.close() - outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'],vrd['modulename'])) + outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'], vrd['modulename'])) if options['dolatexdoc']: - fn = os.path.join(options['buildpath'],vrd['modulename']+'module.tex') + fn = os.path.join(options['buildpath'], vrd['modulename']+'module.tex') ret['ltx'] = fn - f=open(fn,'w') + f=open(fn, 'w') f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version)) if 'shortlatex' not in options: f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') @@ -1277,11 +1277,11 @@ def buildmodule(m,um): if 'shortlatex' not in options: f.write('\\end{document}') f.close() - outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'],vrd['modulename'])) + outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'], vrd['modulename'])) if funcwrappers: - wn = os.path.join(options['buildpath'],vrd['f2py_wrapper_output']) + wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) ret['fsrc'] = wn - f=open(wn,'w') + f=open(wn, 'w') f.write('C -*- fortran -*-\n') f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version)) f.write('C It contains Fortran 77 wrappers to fortran functions.\n') @@ -1293,14 +1293,14 @@ def buildmodule(m,um): l = l[66:] lines.append(l+'\n') else: lines.append(l+'\n') - lines = ''.join(lines).replace('\n &\n','\n') + lines = ''.join(lines).replace('\n &\n', '\n') f.write(lines) f.close() outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn)) if funcwrappers2: - wn = os.path.join(options['buildpath'],'%s-f2pywrappers2.f90'%(vrd['modulename'])) + wn = os.path.join(options['buildpath'], '%s-f2pywrappers2.f90'%(vrd['modulename'])) ret['fsrc'] = wn - f=open(wn,'w') + f=open(wn, 'w') f.write('! -*- f90 -*-\n') f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version)) f.write('! It contains Fortran 90 wrappers to fortran functions.\n') @@ -1314,7 +1314,7 @@ def buildmodule(m,um): l = l[66:] lines.append(l+'\n') else: lines.append(l+'\n') - lines = ''.join(lines).replace('\n &\n','\n') + lines = ''.join(lines).replace('\n &\n', '\n') f.write(lines) f.close() outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn)) @@ -1325,29 +1325,29 @@ def buildmodule(m,um): stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'} def buildapi(rout): - rout,wrap = func2subr.assubr(rout) - args,depargs=getargs2(rout) + rout, wrap = func2subr.assubr(rout) + args, depargs=getargs2(rout) capi_maps.depargs=depargs var=rout['vars'] auxvars = [a for a in var.keys() if isintent_aux(var[a])] if ismoduleroutine(rout): - outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'],rout['name'])) + outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'], rout['name'])) else: outmess('\t\tConstructing wrapper function "%s"...\n'%(rout['name'])) # Routine vrd=routsign2map(rout) - rd=dictappend({},vrd) + rd=dictappend({}, vrd) for r in rout_rules: if ('_check' in r and r['_check'](rout)) or ('_check' not in r): - ar=applyrules(r,vrd,rout) - rd=dictappend(rd,ar) + ar=applyrules(r, vrd, rout) + rd=dictappend(rd, ar) # Args - nth,nthk=0,0 + nth, nthk=0, 0 savevrd={} for a in args: - vrd=sign2map(a,var[a]) + vrd=sign2map(a, var[a]) if isintent_aux(var[a]): _rules = aux_rules else: @@ -1365,8 +1365,8 @@ def buildapi(rout): if '_depend' in r: continue if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) + ar=applyrules(r, vrd, var[a]) + rd=dictappend(rd, ar) if '_break' in r: break for a in depargs: @@ -1379,15 +1379,15 @@ def buildapi(rout): if '_depend' not in r: continue if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): - ar=applyrules(r,vrd,var[a]) - rd=dictappend(rd,ar) + ar=applyrules(r, vrd, var[a]) + rd=dictappend(rd, ar) if '_break' in r: break if 'check' in var[a]: for c in var[a]['check']: vrd['check']=c - ar=applyrules(check_rules,vrd,var[a]) - rd=dictappend(rd,ar) + ar=applyrules(check_rules, vrd, var[a]) + rd=dictappend(rd, ar) if isinstance(rd['cleanupfrompyobj'], list): rd['cleanupfrompyobj'].reverse() if isinstance(rd['closepyobjfrom'], list): @@ -1401,25 +1401,25 @@ def buildapi(rout): 'docsignopt':rd['docsignoptshort']} )) if optargs=='': - rd['docsignatureshort']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']})) + rd['docsignatureshort']=stripcomma(replace('#docsign#', {'docsign':rd['docsign']})) else: rd['docsignatureshort']=replace('#docsign#[#docsignopt#]', - {'docsign':rd['docsign'], - 'docsignopt':optargs, + {'docsign': rd['docsign'], + 'docsignopt': optargs, }) - rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_','\\_') - rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',',', ') - cfs=stripcomma(replace('#callfortran##callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) + rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_', '\\_') + rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',', ', ') + cfs=stripcomma(replace('#callfortran##callfortranappend#', {'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) if len(rd['callfortranappend'])>1: - rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) + rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#', {'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']})) else: rd['callcompaqfortran']=cfs rd['callfortran']=cfs if isinstance(rd['docreturn'], list): - rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))+' = ' + rd['docreturn']=stripcomma(replace('#docreturn#', {'docreturn':rd['docreturn']}))+' = ' rd['docstrsigns']=[] rd['latexdocstrsigns']=[] - for k in ['docstrreq','docstropt','docstrout','docstrcbs']: + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: if k in rd and isinstance(rd[k], list): rd['docstrsigns']=rd['docstrsigns']+rd[k] k='latex'+k @@ -1434,15 +1434,15 @@ def buildapi(rout): if isinstance(argformat, list): argformat.append('|') else: - assert isinstance(argformat, str),repr((argformat, type(argformat))) + assert isinstance(argformat, str), repr((argformat, type(argformat))) rd['argformat'] += '|' - ar=applyrules(routine_rules,rd) + ar=applyrules(routine_rules, rd) if ismoduleroutine(rout): outmess('\t\t\t %s\n'%(ar['docshort'])) else: outmess('\t\t %s\n'%(ar['docshort'])) - return ar,wrap + return ar, wrap #################### EOF rules.py ####################### diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py index 96a047ec7..2f1fd6a01 100644 --- a/numpy/f2py/setup.py +++ b/numpy/f2py/setup.py @@ -48,10 +48,10 @@ def configuration(parent_package='',top_path=None): f2py_exe = f2py_exe[:-4] + '.py' if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py': f2py_exe = f2py_exe + '.py' - target = os.path.join(build_dir,f2py_exe) - if newer(__file__,target): + target = os.path.join(build_dir, f2py_exe) + if newer(__file__, target): log.info('Creating %s', target) - f = open(target,'w') + f = open(target, 'w') f.write('''\ #!%s # See http://cens.ioc.ee/projects/f2py2e/ @@ -91,7 +91,7 @@ if __name__ == "__main__": config = configuration(top_path='') version = config.get_version() - print('F2PY Version',version) + print('F2PY Version', version) config = config.todict() if sys.version[:3]>='2.3': @@ -125,5 +125,5 @@ wrapping Fortran 77/90/95 subroutines, accessing common blocks from Python, and calling Python functions from Fortran (call-backs). Interfacing subroutines/data from Fortran 90/95 modules is supported.""", url = "http://cens.ioc.ee/projects/f2py2e/", - keywords = ['Fortran','f2py'], + keywords = ['Fortran', 'f2py'], **config) diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index 6707e8d8b..09d613293 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -8,7 +8,7 @@ import copy import nose from numpy.testing import * -from numpy import array, alltrue, ndarray, asarray, can_cast,zeros, dtype +from numpy import array, alltrue, ndarray, asarray, can_cast, zeros, dtype from numpy.core.multiarray import typeinfo import util @@ -44,12 +44,12 @@ def flags_info(arr): def flags2names(flags): info = [] - for flagname in ['CONTIGUOUS','FORTRAN','OWNDATA','ENSURECOPY', - 'ENSUREARRAY','ALIGNED','NOTSWAPPED','WRITEABLE', - 'UPDATEIFCOPY','BEHAVED','BEHAVED_RO', - 'CARRAY','FARRAY' + for flagname in ['CONTIGUOUS', 'FORTRAN', 'OWNDATA', 'ENSURECOPY', + 'ENSUREARRAY', 'ALIGNED', 'NOTSWAPPED', 'WRITEABLE', + 'UPDATEIFCOPY', 'BEHAVED', 'BEHAVED_RO', + 'CARRAY', 'FARRAY' ]: - if abs(flags) & getattr(wrap,flagname, 0): + if abs(flags) & getattr(wrap, flagname, 0): info.append(flagname) return info @@ -61,9 +61,9 @@ class Intent(object): if i=='optional': flags |= wrap.F2PY_OPTIONAL else: - flags |= getattr(wrap,'F2PY_INTENT_'+i.upper()) + flags |= getattr(wrap, 'F2PY_INTENT_'+i.upper()) self.flags = flags - def __getattr__(self,name): + def __getattr__(self, name): name = name.lower() if name=='in_': name='in' return self.__class__(self.intent_list+[name]) @@ -82,9 +82,9 @@ class Intent(object): intent = Intent() class Type(object): - _type_names = ['BOOL','BYTE','UBYTE','SHORT','USHORT','INT','UINT', - 'LONG','ULONG','LONGLONG','ULONGLONG', - 'FLOAT','DOUBLE','LONGDOUBLE','CFLOAT','CDOUBLE', + _type_names = ['BOOL', 'BYTE', 'UBYTE', 'SHORT', 'USHORT', 'INT', 'UINT', + 'LONG', 'ULONG', 'LONGLONG', 'ULONGLONG', + 'FLOAT', 'DOUBLE', 'LONGDOUBLE', 'CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] _type_cache = {} @@ -93,10 +93,10 @@ class Type(object): _cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE'] _cast_dict['BYTE'] = ['BYTE'] _cast_dict['UBYTE'] = ['UBYTE'] - _cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE','SHORT'] - _cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE','USHORT'] - _cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT','INT'] - _cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT','UINT'] + _cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE', 'SHORT'] + _cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE', 'USHORT'] + _cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT', 'INT'] + _cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT', 'UINT'] _cast_dict['LONG'] = _cast_dict['INT'] + ['LONG'] _cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG'] @@ -104,24 +104,24 @@ class Type(object): _cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG'] _cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG'] - _cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT','FLOAT'] - _cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT','FLOAT','DOUBLE'] - _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + ['ULONG','FLOAT','DOUBLE','LONGDOUBLE'] + _cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT', 'FLOAT'] + _cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT', 'FLOAT', 'DOUBLE'] + _cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + ['ULONG', 'FLOAT', 'DOUBLE', 'LONGDOUBLE'] _cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT'] - _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT','CDOUBLE'] - _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + ['CFLOAT','CDOUBLE','CLONGDOUBLE'] + _cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE'] + _cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + ['CFLOAT', 'CDOUBLE', 'CLONGDOUBLE'] - def __new__(cls,name): - if isinstance(name,dtype): + def __new__(cls, name): + if isinstance(name, dtype): dtype0 = name name = None - for n,i in typeinfo.items(): - if isinstance(i,tuple) and dtype0.type is i[-1]: + for n, i in typeinfo.items(): + if isinstance(i, tuple) and dtype0.type is i[-1]: name = n break - obj = cls._type_cache.get(name.upper(),None) + obj = cls._type_cache.get(name.upper(), None) if obj is not None: return obj obj = object.__new__(cls) @@ -129,10 +129,10 @@ class Type(object): cls._type_cache[name.upper()] = obj return obj - def _init(self,name): + def _init(self, name): self.NAME = name.upper() - self.type_num = getattr(wrap,'NPY_'+self.NAME) - assert_equal(self.type_num,typeinfo[self.NAME][1]) + self.type_num = getattr(wrap, 'NPY_'+self.NAME) + assert_equal(self.type_num, typeinfo[self.NAME][1]) self.dtype = typeinfo[self.NAME][-1] self.elsize = typeinfo[self.NAME][2] / 8 self.dtypechar = typeinfo[self.NAME][0] @@ -169,7 +169,7 @@ class Type(object): return types class Array(object): - def __init__(self,typ,dims,intent,obj): + def __init__(self, typ, dims, intent, obj): self.type = typ self.dims = dims self.intent = intent @@ -177,16 +177,16 @@ class Array(object): self.obj = obj # arr.dtypechar may be different from typ.dtypechar - self.arr = wrap.call(typ.type_num,dims,intent.flags,obj) + self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) - assert_(isinstance(self.arr, ndarray),repr(type(self.arr))) + assert_(isinstance(self.arr, ndarray), repr(type(self.arr))) self.arr_attr = wrap.array_attrs(self.arr) if len(dims)>1: if self.intent.is_intent('c'): assert_(intent.flags & wrap.F2PY_INTENT_C) - assert_(not self.arr.flags['FORTRAN'],repr((self.arr.flags,getattr(obj,'flags',None)))) + assert_(not self.arr.flags['FORTRAN'], repr((self.arr.flags, getattr(obj, 'flags', None)))) assert_(self.arr.flags['CONTIGUOUS']) assert_(not self.arr_attr[6] & wrap.FORTRAN) else: @@ -201,14 +201,14 @@ class Array(object): return if intent.is_intent('cache'): - assert_(isinstance(obj,ndarray),repr(type(obj))) + assert_(isinstance(obj, ndarray), repr(type(obj))) self.pyarr = array(obj).reshape(*dims).copy() else: self.pyarr = array(array(obj, dtype = typ.dtypechar).reshape(*dims), order=self.intent.is_intent('c') and 'C' or 'F') assert_(self.pyarr.dtype == typ, \ - repr((self.pyarr.dtype,typ))) + repr((self.pyarr.dtype, typ))) assert_(self.pyarr.flags['OWNDATA'], (obj, intent)) self.pyarr_attr = wrap.array_attrs(self.pyarr) @@ -227,26 +227,26 @@ class Array(object): assert_(self.arr_attr[2]==self.pyarr_attr[2]) # dimensions if self.arr_attr[1]<=1: assert_(self.arr_attr[3]==self.pyarr_attr[3],\ - repr((self.arr_attr[3],self.pyarr_attr[3],self.arr.tostring(),self.pyarr.tostring()))) # strides + repr((self.arr_attr[3], self.pyarr_attr[3], self.arr.tostring(), self.pyarr.tostring()))) # strides assert_(self.arr_attr[5][-2:]==self.pyarr_attr[5][-2:],\ - repr((self.arr_attr[5],self.pyarr_attr[5]))) # descr + repr((self.arr_attr[5], self.pyarr_attr[5]))) # descr assert_(self.arr_attr[6]==self.pyarr_attr[6],\ - repr((self.arr_attr[6],self.pyarr_attr[6],flags2names(0*self.arr_attr[6]-self.pyarr_attr[6]),flags2names(self.arr_attr[6]),intent))) # flags + repr((self.arr_attr[6], self.pyarr_attr[6], flags2names(0*self.arr_attr[6]-self.pyarr_attr[6]), flags2names(self.arr_attr[6]), intent))) # flags if intent.is_intent('cache'): assert_(self.arr_attr[5][3]>=self.type.elsize,\ - repr((self.arr_attr[5][3],self.type.elsize))) + repr((self.arr_attr[5][3], self.type.elsize))) else: assert_(self.arr_attr[5][3]==self.type.elsize,\ - repr((self.arr_attr[5][3],self.type.elsize))) - assert_(self.arr_equal(self.pyarr,self.arr)) + repr((self.arr_attr[5][3], self.type.elsize))) + assert_(self.arr_equal(self.pyarr, self.arr)) - if isinstance(self.obj,ndarray): + if isinstance(self.obj, ndarray): if typ.elsize==Type(obj.dtype).elsize: if not intent.is_intent('copy') and self.arr_attr[1]<=1: assert_(self.has_shared_memory()) - def arr_equal(self,arr1,arr2): + def arr_equal(self, arr1, arr2): if arr1.shape != arr2.shape: return False s = arr1==arr2 @@ -260,7 +260,7 @@ class Array(object): """ if self.obj is self.arr: return True - if not isinstance(self.obj,ndarray): + if not isinstance(self.obj, ndarray): return False obj_attr = wrap.array_attrs(self.obj) return obj_attr[0]==self.arr_attr[0] @@ -269,36 +269,36 @@ class Array(object): class test_intent(unittest.TestCase): def test_in_out(self): - assert_equal(str(intent.in_.out),'intent(in,out)') + assert_equal(str(intent.in_.out), 'intent(in,out)') assert_(intent.in_.c.is_intent('c')) assert_(not intent.in_.c.is_intent_exact('c')) - assert_(intent.in_.c.is_intent_exact('c','in')) - assert_(intent.in_.c.is_intent_exact('in','c')) + assert_(intent.in_.c.is_intent_exact('c', 'in')) + assert_(intent.in_.c.is_intent_exact('in', 'c')) assert_(not intent.in_.is_intent('c')) class _test_shared_memory: - num2seq = [1,2] - num23seq = [[1,2,3],[4,5,6]] + num2seq = [1, 2] + num23seq = [[1, 2, 3], [4, 5, 6]] def test_in_from_2seq(self): - a = self.array([2],intent.in_,self.num2seq) + a = self.array([2], intent.in_, self.num2seq) assert_(not a.has_shared_memory()) def test_in_from_2casttype(self): for t in self.type.cast_types(): - obj = array(self.num2seq,dtype=t.dtype) - a = self.array([len(self.num2seq)],intent.in_,obj) + obj = array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_, obj) if t.elsize==self.type.elsize: - assert_(a.has_shared_memory(),repr((self.type.dtype,t.dtype))) + assert_(a.has_shared_memory(), repr((self.type.dtype, t.dtype))) else: - assert_(not a.has_shared_memory(),repr(t.dtype)) + assert_(not a.has_shared_memory(), repr(t.dtype)) def test_inout_2seq(self): - obj = array(self.num2seq,dtype=self.type.dtype) - a = self.array([len(self.num2seq)],intent.inout,obj) + obj = array(self.num2seq, dtype=self.type.dtype) + a = self.array([len(self.num2seq)], intent.inout, obj) assert_(a.has_shared_memory()) try: - a = self.array([2],intent.in_.inout,self.num2seq) + a = self.array([2], intent.in_.inout, self.num2seq) except TypeError as msg: if not str(msg).startswith('failed to initialize intent(inout|inplace|cache) array'): raise @@ -306,15 +306,15 @@ class _test_shared_memory: raise SystemError('intent(inout) should have failed on sequence') def test_f_inout_23seq(self): - obj = array(self.num23seq,dtype=self.type.dtype,order='F') - shape = (len(self.num23seq),len(self.num23seq[0])) - a = self.array(shape,intent.in_.inout,obj) + obj = array(self.num23seq, dtype=self.type.dtype, order='F') + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.inout, obj) assert_(a.has_shared_memory()) - obj = array(self.num23seq,dtype=self.type.dtype,order='C') - shape = (len(self.num23seq),len(self.num23seq[0])) + obj = array(self.num23seq, dtype=self.type.dtype, order='C') + shape = (len(self.num23seq), len(self.num23seq[0])) try: - a = self.array(shape,intent.in_.inout,obj) + a = self.array(shape, intent.in_.inout, obj) except ValueError as msg: if not str(msg).startswith('failed to initialize intent(inout) array'): raise @@ -322,84 +322,84 @@ class _test_shared_memory: raise SystemError('intent(inout) should have failed on improper array') def test_c_inout_23seq(self): - obj = array(self.num23seq,dtype=self.type.dtype) - shape = (len(self.num23seq),len(self.num23seq[0])) - a = self.array(shape,intent.in_.c.inout,obj) + obj = array(self.num23seq, dtype=self.type.dtype) + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.c.inout, obj) assert_(a.has_shared_memory()) def test_in_copy_from_2casttype(self): for t in self.type.cast_types(): - obj = array(self.num2seq,dtype=t.dtype) - a = self.array([len(self.num2seq)],intent.in_.copy,obj) - assert_(not a.has_shared_memory(),repr(t.dtype)) + obj = array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) def test_c_in_from_23seq(self): - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_,self.num23seq) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, self.num23seq) assert_(not a.has_shared_memory()) def test_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq,dtype=t.dtype) - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_,obj) - assert_(not a.has_shared_memory(),repr(t.dtype)) + obj = array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) def test_f_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq,dtype=t.dtype,order='F') - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_,obj) + obj = array(self.num23seq, dtype=t.dtype, order='F') + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_, obj) if t.elsize==self.type.elsize: - assert_(a.has_shared_memory(),repr(t.dtype)) + assert_(a.has_shared_memory(), repr(t.dtype)) else: - assert_(not a.has_shared_memory(),repr(t.dtype)) + assert_(not a.has_shared_memory(), repr(t.dtype)) def test_c_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq,dtype=t.dtype) - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_.c,obj) + obj = array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.c, obj) if t.elsize==self.type.elsize: - assert_(a.has_shared_memory(),repr(t.dtype)) + assert_(a.has_shared_memory(), repr(t.dtype)) else: - assert_(not a.has_shared_memory(),repr(t.dtype)) + assert_(not a.has_shared_memory(), repr(t.dtype)) def test_f_copy_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq,dtype=t.dtype,order='F') - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_.copy,obj) - assert_(not a.has_shared_memory(),repr(t.dtype)) + obj = array(self.num23seq, dtype=t.dtype, order='F') + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) def test_c_copy_in_from_23casttype(self): for t in self.type.cast_types(): - obj = array(self.num23seq,dtype=t.dtype) - a = self.array([len(self.num23seq),len(self.num23seq[0])], - intent.in_.c.copy,obj) - assert_(not a.has_shared_memory(),repr(t.dtype)) + obj = array(self.num23seq, dtype=t.dtype) + a = self.array([len(self.num23seq), len(self.num23seq[0])], + intent.in_.c.copy, obj) + assert_(not a.has_shared_memory(), repr(t.dtype)) def test_in_cache_from_2casttype(self): for t in self.type.all_types(): if t.elsize != self.type.elsize: continue - obj = array(self.num2seq,dtype=t.dtype) + obj = array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq),) - a = self.array(shape,intent.in_.c.cache,obj) - assert_(a.has_shared_memory(),repr(t.dtype)) + a = self.array(shape, intent.in_.c.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) - a = self.array(shape,intent.in_.cache,obj) - assert_(a.has_shared_memory(),repr(t.dtype)) + a = self.array(shape, intent.in_.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) - obj = array(self.num2seq,dtype=t.dtype,order='F') - a = self.array(shape,intent.in_.c.cache,obj) - assert_(a.has_shared_memory(),repr(t.dtype)) + obj = array(self.num2seq, dtype=t.dtype, order='F') + a = self.array(shape, intent.in_.c.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) - a = self.array(shape,intent.in_.cache,obj) - assert_(a.has_shared_memory(),repr(t.dtype)) + a = self.array(shape, intent.in_.cache, obj) + assert_(a.has_shared_memory(), repr(t.dtype)) try: - a = self.array(shape,intent.in_.cache,obj[::-1]) + a = self.array(shape, intent.in_.cache, obj[::-1]) except ValueError as msg: if not str(msg).startswith('failed to initialize intent(cache) array'): raise @@ -409,10 +409,10 @@ class _test_shared_memory: for t in self.type.all_types(): if t.elsize >= self.type.elsize: continue - obj = array(self.num2seq,dtype=t.dtype) + obj = array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq),) try: - a = self.array(shape,intent.in_.cache,obj) + a = self.array(shape, intent.in_.cache, obj) except ValueError as msg: if not str(msg).startswith('failed to initialize intent(cache) array'): raise @@ -421,16 +421,16 @@ class _test_shared_memory: def test_cache_hidden(self): shape = (2,) - a = self.array(shape,intent.cache.hide,None) + a = self.array(shape, intent.cache.hide, None) assert_(a.arr.shape==shape) - shape = (2,3) - a = self.array(shape,intent.cache.hide,None) + shape = (2, 3) + a = self.array(shape, intent.cache.hide, None) assert_(a.arr.shape==shape) - shape = (-1,3) + shape = (-1, 3) try: - a = self.array(shape,intent.cache.hide,None) + a = self.array(shape, intent.cache.hide, None) except ValueError as msg: if not str(msg).startswith('failed to create intent(cache|hide)|optional array'): raise @@ -439,25 +439,25 @@ class _test_shared_memory: def test_hidden(self): shape = (2,) - a = self.array(shape,intent.hide,None) + a = self.array(shape, intent.hide, None) assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - shape = (2,3) - a = self.array(shape,intent.hide,None) + shape = (2, 3) + a = self.array(shape, intent.hide, None) assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) - shape = (2,3) - a = self.array(shape,intent.c.hide,None) + shape = (2, 3) + a = self.array(shape, intent.c.hide, None) assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) - shape = (-1,3) + shape = (-1, 3) try: - a = self.array(shape,intent.hide,None) + a = self.array(shape, intent.hide, None) except ValueError as msg: if not str(msg).startswith('failed to create intent(cache|hide)|optional array'): raise @@ -466,48 +466,48 @@ class _test_shared_memory: def test_optional_none(self): shape = (2,) - a = self.array(shape,intent.optional,None) + a = self.array(shape, intent.optional, None) assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) - shape = (2,3) - a = self.array(shape,intent.optional,None) + shape = (2, 3) + a = self.array(shape, intent.optional, None) assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) assert_(a.arr.flags['FORTRAN'] and not a.arr.flags['CONTIGUOUS']) - shape = (2,3) - a = self.array(shape,intent.c.optional,None) + shape = (2, 3) + a = self.array(shape, intent.c.optional, None) assert_(a.arr.shape==shape) - assert_(a.arr_equal(a.arr,zeros(shape,dtype=self.type.dtype))) + assert_(a.arr_equal(a.arr, zeros(shape, dtype=self.type.dtype))) assert_(not a.arr.flags['FORTRAN'] and a.arr.flags['CONTIGUOUS']) def test_optional_from_2seq(self): obj = self.num2seq shape = (len(obj),) - a = self.array(shape,intent.optional,obj) + a = self.array(shape, intent.optional, obj) assert_(a.arr.shape==shape) assert_(not a.has_shared_memory()) def test_optional_from_23seq(self): obj = self.num23seq - shape = (len(obj),len(obj[0])) - a = self.array(shape,intent.optional,obj) + shape = (len(obj), len(obj[0])) + a = self.array(shape, intent.optional, obj) assert_(a.arr.shape==shape) assert_(not a.has_shared_memory()) - a = self.array(shape,intent.optional.c,obj) + a = self.array(shape, intent.optional.c, obj) assert_(a.arr.shape==shape) assert_(not a.has_shared_memory()) def test_inplace(self): - obj = array(self.num23seq,dtype=self.type.dtype) + obj = array(self.num23seq, dtype=self.type.dtype) assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) shape = obj.shape - a = self.array(shape,intent.inplace,obj) - assert_(obj[1][2]==a.arr[1][2],repr((obj,a.arr))) + a = self.array(shape, intent.inplace, obj) + assert_(obj[1][2]==a.arr[1][2], repr((obj, a.arr))) a.arr[1][2]=54 - assert_(obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),repr((obj,a.arr))) + assert_(obj[1][2]==a.arr[1][2]==array(54, dtype=self.type.dtype), repr((obj, a.arr))) assert_(a.arr is obj) assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! assert_(not obj.flags['CONTIGUOUS']) @@ -516,15 +516,15 @@ class _test_shared_memory: for t in self.type.cast_types(): if t is self.type: continue - obj = array(self.num23seq,dtype=t.dtype) + obj = array(self.num23seq, dtype=t.dtype) assert_(obj.dtype.type==t.dtype) assert_(obj.dtype.type is not self.type.dtype) assert_(not obj.flags['FORTRAN'] and obj.flags['CONTIGUOUS']) shape = obj.shape - a = self.array(shape,intent.inplace,obj) - assert_(obj[1][2]==a.arr[1][2],repr((obj,a.arr))) + a = self.array(shape, intent.inplace, obj) + assert_(obj[1][2]==a.arr[1][2], repr((obj, a.arr))) a.arr[1][2]=54 - assert_(obj[1][2]==a.arr[1][2]==array(54,dtype=self.type.dtype),repr((obj,a.arr))) + assert_(obj[1][2]==a.arr[1][2]==array(54, dtype=self.type.dtype), repr((obj, a.arr))) assert_(a.arr is obj) assert_(obj.flags['FORTRAN']) # obj attributes are changed inplace! assert_(not obj.flags['CONTIGUOUS']) @@ -539,7 +539,7 @@ class test_%s_gen(unittest.TestCase, def setUp(self): self.type = Type(%r) array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj) -''' % (t,t,t)) +''' % (t, t, t)) if __name__ == "__main__": setup() diff --git a/numpy/f2py/tests/test_assumed_shape.py b/numpy/f2py/tests/test_assumed_shape.py index 6c0ea9ebd..d6beaee63 100644 --- a/numpy/f2py/tests/test_assumed_shape.py +++ b/numpy/f2py/tests/test_assumed_shape.py @@ -20,17 +20,17 @@ class TestAssumedShapeSumExample(util.F2PyTest): @dec.slow def test_all(self): - r = self.module.fsum([1,2]) - assert_(r==3,repr(r)) - r = self.module.sum([1,2]) - assert_(r==3,repr(r)) - r = self.module.sum_with_use([1,2]) - assert_(r==3,repr(r)) - - r = self.module.mod.sum([1,2]) - assert_(r==3,repr(r)) - r = self.module.mod.fsum([1,2]) - assert_(r==3,repr(r)) + r = self.module.fsum([1, 2]) + assert_(r==3, repr(r)) + r = self.module.sum([1, 2]) + assert_(r==3, repr(r)) + r = self.module.sum_with_use([1, 2]) + assert_(r==3, repr(r)) + + r = self.module.mod.sum([1, 2]) + assert_(r==3, repr(r)) + r = self.module.mod.fsum([1, 2]) + assert_(r==3, repr(r)) if __name__ == "__main__": import nose diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 7a7e8bc11..98a90a28c 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -74,24 +74,24 @@ cf2py intent(out) a def check_function(self, name): t = getattr(self.module, name) r = t(lambda : 4) - assert_( r==4,repr(r)) - r = t(lambda a:5,fun_extra_args=(6,)) - assert_( r==5,repr(r)) - r = t(lambda a:a,fun_extra_args=(6,)) - assert_( r==6,repr(r)) - r = t(lambda a:5+a,fun_extra_args=(7,)) - assert_( r==12,repr(r)) - r = t(lambda a:math.degrees(a),fun_extra_args=(math.pi,)) - assert_( r==180,repr(r)) - r = t(math.degrees,fun_extra_args=(math.pi,)) - assert_( r==180,repr(r)) + assert_( r==4, repr(r)) + r = t(lambda a:5, fun_extra_args=(6,)) + assert_( r==5, repr(r)) + r = t(lambda a:a, fun_extra_args=(6,)) + assert_( r==6, repr(r)) + r = t(lambda a:5+a, fun_extra_args=(7,)) + assert_( r==12, repr(r)) + r = t(lambda a:math.degrees(a), fun_extra_args=(math.pi,)) + assert_( r==180, repr(r)) + r = t(math.degrees, fun_extra_args=(math.pi,)) + assert_( r==180, repr(r)) r = t(self.module.func, fun_extra_args=(6,)) - assert_( r==17,repr(r)) + assert_( r==17, repr(r)) r = t(self.module.func0) - assert_( r==11,repr(r)) + assert_( r==11, repr(r)) r = t(self.module.func0._cpointer) - assert_( r==11,repr(r)) + assert_( r==11, repr(r)) class A(object): def __call__(self): return 7 @@ -99,9 +99,9 @@ cf2py intent(out) a return 9 a = A() r = t(a) - assert_( r==7,repr(r)) + assert_( r==7, repr(r)) r = t(a.mth) - assert_( r==9,repr(r)) + assert_( r==9, repr(r)) if __name__ == "__main__": import nose diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index 493c06942..f96fbffdb 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -24,11 +24,11 @@ class TestKind(util.F2PyTest): selectedintkind = self.module.selectedintkind for i in range(40): - assert_(selectedintkind(i) in [selected_int_kind(i),-1],\ + assert_(selectedintkind(i) in [selected_int_kind(i), -1],\ 'selectedintkind(%s): expected %r but got %r' % (i, selected_int_kind(i), selectedintkind(i))) for i in range(20): - assert_(selectedrealkind(i) in [selected_real_kind(i),-1],\ + assert_(selectedrealkind(i) in [selected_real_kind(i), -1],\ 'selectedrealkind(%s): expected %r but got %r' % (i, selected_real_kind(i), selectedrealkind(i))) if __name__ == "__main__": diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py index 213730008..0865d54b3 100644 --- a/numpy/f2py/tests/test_return_character.py +++ b/numpy/f2py/tests/test_return_character.py @@ -8,19 +8,19 @@ import util class TestReturnCharacter(util.F2PyTest): def check_function(self, t): tname = t.__doc__.split()[0] - if tname in ['t0','t1','s0','s1']: + if tname in ['t0', 't1', 's0', 's1']: assert_( t(23)==asbytes('2')) - r = t('ab');assert_( r==asbytes('a'),repr(r)) - r = t(array('ab'));assert_( r==asbytes('a'),repr(r)) - r = t(array(77,'u1'));assert_( r==asbytes('M'),repr(r)) + r = t('ab');assert_( r==asbytes('a'), repr(r)) + r = t(array('ab'));assert_( r==asbytes('a'), repr(r)) + r = t(array(77, 'u1'));assert_( r==asbytes('M'), repr(r)) #assert_(_raises(ValueError, t, array([77,87]))) #assert_(_raises(ValueError, t, array(77))) - elif tname in ['ts','ss']: - assert_( t(23)==asbytes('23 '),repr(t(23))) + elif tname in ['ts', 'ss']: + assert_( t(23)==asbytes('23 '), repr(t(23))) assert_( t('123456789abcdef')==asbytes('123456789a')) - elif tname in ['t5','s5']: - assert_( t(23)==asbytes('23 '),repr(t(23))) - assert_( t('ab')==asbytes('ab '),repr(t('ab'))) + elif tname in ['t5', 's5']: + assert_( t(23)==asbytes('23 '), repr(t(23))) + assert_( t('ab')==asbytes('ab '), repr(t('ab'))) assert_( t('123456789abcdef')==asbytes('12345')) else: raise NotImplementedError diff --git a/numpy/f2py/tests/test_return_complex.py b/numpy/f2py/tests/test_return_complex.py index f03416648..d144cecf1 100644 --- a/numpy/f2py/tests/test_return_complex.py +++ b/numpy/f2py/tests/test_return_complex.py @@ -8,7 +8,7 @@ import util class TestReturnComplex(util.F2PyTest): def check_function(self, t): tname = t.__doc__.split()[0] - if tname in ['t0','t8','s0','s8']: + if tname in ['t0', 't8', 's0', 's8']: err = 1e-5 else: err = 0.0 @@ -22,18 +22,18 @@ class TestReturnComplex(util.F2PyTest): assert_( abs(t([234])-234.)<=err) assert_( abs(t((234,))-234.)<=err) assert_( abs(t(array(234))-234.)<=err) - assert_( abs(t(array(23+4j,'F'))-(23+4j))<=err) + assert_( abs(t(array(23+4j, 'F'))-(23+4j))<=err) assert_( abs(t(array([234]))-234.)<=err) assert_( abs(t(array([[234]]))-234.)<=err) - assert_( abs(t(array([234],'b'))+22.)<=err) - assert_( abs(t(array([234],'h'))-234.)<=err) - assert_( abs(t(array([234],'i'))-234.)<=err) - assert_( abs(t(array([234],'l'))-234.)<=err) - assert_( abs(t(array([234],'q'))-234.)<=err) - assert_( abs(t(array([234],'f'))-234.)<=err) - assert_( abs(t(array([234],'d'))-234.)<=err) - assert_( abs(t(array([234+3j],'F'))-(234+3j))<=err) - assert_( abs(t(array([234],'D'))-234.)<=err) + assert_( abs(t(array([234], 'b'))+22.)<=err) + assert_( abs(t(array([234], 'h'))-234.)<=err) + assert_( abs(t(array([234], 'i'))-234.)<=err) + assert_( abs(t(array([234], 'l'))-234.)<=err) + assert_( abs(t(array([234], 'q'))-234.)<=err) + assert_( abs(t(array([234], 'f'))-234.)<=err) + assert_( abs(t(array([234], 'd'))-234.)<=err) + assert_( abs(t(array([234+3j], 'F'))-(234+3j))<=err) + assert_( abs(t(array([234], 'D'))-234.)<=err) #assert_raises(TypeError, t, array([234], 'a1')) assert_raises(TypeError, t, 'abc') @@ -46,7 +46,7 @@ class TestReturnComplex(util.F2PyTest): try: r = t(10**400) - assert_( repr(r) in ['(inf+0j)','(Infinity+0j)'],repr(r)) + assert_( repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r)) except OverflowError: pass diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py index d19653f4d..056466208 100644 --- a/numpy/f2py/tests/test_return_integer.py +++ b/numpy/f2py/tests/test_return_integer.py @@ -7,7 +7,7 @@ import util class TestReturnInteger(util.F2PyTest): def check_function(self, t): - assert_( t(123)==123,repr(t(123))) + assert_( t(123)==123, repr(t(123))) assert_( t(123.6)==123) assert_( t(long(123))==123) assert_( t('123')==123) @@ -17,13 +17,13 @@ class TestReturnInteger(util.F2PyTest): assert_( t(array(123))==123) assert_( t(array([123]))==123) assert_( t(array([[123]]))==123) - assert_( t(array([123],'b'))==123) - assert_( t(array([123],'h'))==123) - assert_( t(array([123],'i'))==123) - assert_( t(array([123],'l'))==123) - assert_( t(array([123],'B'))==123) - assert_( t(array([123],'f'))==123) - assert_( t(array([123],'d'))==123) + assert_( t(array([123], 'b'))==123) + assert_( t(array([123], 'h'))==123) + assert_( t(array([123], 'i'))==123) + assert_( t(array([123], 'l'))==123) + assert_( t(array([123], 'B'))==123) + assert_( t(array([123], 'f'))==123) + assert_( t(array([123], 'd'))==123) #assert_raises(ValueError, t, array([123],'S3')) assert_raises(ValueError, t, 'abc') @@ -34,7 +34,7 @@ class TestReturnInteger(util.F2PyTest): assert_raises(Exception, t, t) assert_raises(Exception, t, {}) - if t.__doc__.split()[0] in ['t8','s8']: + if t.__doc__.split()[0] in ['t8', 's8']: assert_raises(OverflowError, t, 100000000000000000000000) assert_raises(OverflowError, t, 10000000011111111111111.23) diff --git a/numpy/f2py/tests/test_return_logical.py b/numpy/f2py/tests/test_return_logical.py index 3823e5642..82f86b67f 100644 --- a/numpy/f2py/tests/test_return_logical.py +++ b/numpy/f2py/tests/test_return_logical.py @@ -7,8 +7,8 @@ import util class TestReturnLogical(util.F2PyTest): def check_function(self, t): - assert_( t(True)==1,repr(t(True))) - assert_( t(False)==0,repr(t(False))) + assert_( t(True)==1, repr(t(True))) + assert_( t(False)==0, repr(t(False))) assert_( t(0)==0) assert_( t(None)==0) assert_( t(0.0)==0) @@ -32,20 +32,20 @@ class TestReturnLogical(util.F2PyTest): assert_( t(array(234))==1) assert_( t(array([234]))==1) assert_( t(array([[234]]))==1) - assert_( t(array([234],'b'))==1) - assert_( t(array([234],'h'))==1) - assert_( t(array([234],'i'))==1) - assert_( t(array([234],'l'))==1) - assert_( t(array([234],'f'))==1) - assert_( t(array([234],'d'))==1) - assert_( t(array([234+3j],'F'))==1) - assert_( t(array([234],'D'))==1) + assert_( t(array([234], 'b'))==1) + assert_( t(array([234], 'h'))==1) + assert_( t(array([234], 'i'))==1) + assert_( t(array([234], 'l'))==1) + assert_( t(array([234], 'f'))==1) + assert_( t(array([234], 'd'))==1) + assert_( t(array([234+3j], 'F'))==1) + assert_( t(array([234], 'D'))==1) assert_( t(array(0))==0) assert_( t(array([0]))==0) assert_( t(array([[0]]))==0) assert_( t(array([0j]))==0) assert_( t(array([1]))==1) - assert_raises(ValueError, t, array([0,0])) + assert_raises(ValueError, t, array([0, 0])) class TestF77ReturnLogical(TestReturnLogical): diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py index 3286e11f2..f9a09f620 100644 --- a/numpy/f2py/tests/test_return_real.py +++ b/numpy/f2py/tests/test_return_real.py @@ -8,7 +8,7 @@ import util class TestReturnReal(util.F2PyTest): def check_function(self, t): - if t.__doc__.split()[0] in ['t0','t4','s0','s4']: + if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: err = 1e-5 else: err = 0.0 @@ -23,14 +23,14 @@ class TestReturnReal(util.F2PyTest): assert_( abs(t(array(234))-234.)<=err) assert_( abs(t(array([234]))-234.)<=err) assert_( abs(t(array([[234]]))-234.)<=err) - assert_( abs(t(array([234],'b'))+22)<=err) - assert_( abs(t(array([234],'h'))-234.)<=err) - assert_( abs(t(array([234],'i'))-234.)<=err) - assert_( abs(t(array([234],'l'))-234.)<=err) - assert_( abs(t(array([234],'B'))-234.)<=err) - assert_( abs(t(array([234],'f'))-234.)<=err) - assert_( abs(t(array([234],'d'))-234.)<=err) - if t.__doc__.split()[0] in ['t0','t4','s0','s4']: + assert_( abs(t(array([234], 'b'))+22)<=err) + assert_( abs(t(array([234], 'h'))-234.)<=err) + assert_( abs(t(array([234], 'i'))-234.)<=err) + assert_( abs(t(array([234], 'l'))-234.)<=err) + assert_( abs(t(array([234], 'B'))-234.)<=err) + assert_( abs(t(array([234], 'f'))-234.)<=err) + assert_( abs(t(array([234], 'd'))-234.)<=err) + if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']: assert_( t(1e200)==t(1e300)) # inf #assert_raises(ValueError, t, array([234], 'S1')) @@ -44,7 +44,7 @@ class TestReturnReal(util.F2PyTest): try: r = t(10**400) - assert_( repr(r) in ['inf','Infinity'],repr(r)) + assert_( repr(r) in ['inf', 'Infinity'], repr(r)) except OverflowError: pass diff --git a/numpy/f2py/tests/test_size.py b/numpy/f2py/tests/test_size.py index 6cc508a19..e4f21b519 100644 --- a/numpy/f2py/tests/test_size.py +++ b/numpy/f2py/tests/test_size.py @@ -17,30 +17,30 @@ class TestSizeSumExample(util.F2PyTest): @dec.slow def test_all(self): - r = self.module.foo([[1,2]]) - assert_equal(r, [3],repr(r)) + r = self.module.foo([[1, 2]]) + assert_equal(r, [3], repr(r)) - r = self.module.foo([[1,2],[3,4]]) - assert_equal(r, [3,7],repr(r)) + r = self.module.foo([[1, 2], [3, 4]]) + assert_equal(r, [3, 7], repr(r)) - r = self.module.foo([[1,2],[3,4],[5,6]]) - assert_equal(r, [3,7,11],repr(r)) + r = self.module.foo([[1, 2], [3, 4], [5, 6]]) + assert_equal(r, [3, 7, 11], repr(r)) @dec.slow def test_transpose(self): - r = self.module.trans([[1,2]]) - assert_equal(r, [[1],[2]],repr(r)) + r = self.module.trans([[1, 2]]) + assert_equal(r, [[1], [2]], repr(r)) - r = self.module.trans([[1,2,3],[4,5,6]]) - assert_equal(r, [[1,4],[2,5],[3,6]],repr(r)) + r = self.module.trans([[1, 2, 3], [4, 5, 6]]) + assert_equal(r, [[1, 4], [2, 5], [3, 6]], repr(r)) @dec.slow def test_flatten(self): - r = self.module.flatten([[1,2]]) - assert_equal(r, [1,2],repr(r)) + r = self.module.flatten([[1, 2]]) + assert_equal(r, [1, 2], repr(r)) - r = self.module.flatten([[1,2,3],[4,5,6]]) - assert_equal(r, [1,2,3,4,5,6],repr(r)) + r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) + assert_equal(r, [1, 2, 3, 4, 5, 6], repr(r)) if __name__ == "__main__": import nose diff --git a/numpy/f2py/use_rules.py b/numpy/f2py/use_rules.py index 9a8054617..6fd72bd77 100644 --- a/numpy/f2py/use_rules.py +++ b/numpy/f2py/use_rules.py @@ -53,7 +53,7 @@ capi_fail: ################ -def buildusevars(m,r): +def buildusevars(m, r): ret={} outmess('\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n'%(m['name'])) varsmap={} @@ -61,7 +61,7 @@ def buildusevars(m,r): if 'map' in r: for k in r['map'].keys(): if r['map'][k] in revmap: - outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n'%(r['map'][k],k,revmap[r['map'][k]])) + outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n'%(r['map'][k], k, revmap[r['map'][k]])) else: revmap[r['map'][k]]=k if 'only' in r and r['only']: @@ -71,9 +71,9 @@ def buildusevars(m,r): if revmap[r['map'][v]]==v: varsmap[v]=r['map'][v] else: - outmess('\t\t\tIgnoring map "%s=>%s". See above.\n'%(v,r['map'][v])) + outmess('\t\t\tIgnoring map "%s=>%s". See above.\n'%(v, r['map'][v])) else: - outmess('\t\t\tNo definition for variable "%s=>%s". Skipping.\n'%(v,r['map'][v])) + outmess('\t\t\tNo definition for variable "%s=>%s". Skipping.\n'%(v, r['map'][v])) else: for v in m['vars'].keys(): if v in revmap: @@ -81,29 +81,29 @@ def buildusevars(m,r): else: varsmap[v]=v for v in varsmap.keys(): - ret=dictappend(ret,buildusevar(v,varsmap[v],m['vars'],m['name'])) + ret=dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) return ret -def buildusevar(name,realname,vars,usemodulename): - outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n'%(name,realname)) +def buildusevar(name, realname, vars, usemodulename): + outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n'%(name, realname)) ret={} vrd={'name':name, 'realname':realname, 'REALNAME':realname.upper(), 'usemodulename':usemodulename, 'USEMODULENAME':usemodulename.upper(), - 'texname':name.replace('_','\\_'), - 'begintitle':gentitle('%s=>%s'%(name,realname)), - 'endtitle':gentitle('end of %s=>%s'%(name,realname)), - 'apiname':'#modulename#_use_%s_from_%s'%(realname,usemodulename) + 'texname':name.replace('_', '\\_'), + 'begintitle':gentitle('%s=>%s'%(name, realname)), + 'endtitle':gentitle('end of %s=>%s'%(name, realname)), + 'apiname':'#modulename#_use_%s_from_%s'%(realname, usemodulename) } nummap={0:'Ro',1:'Ri',2:'Rii',3:'Riii',4:'Riv',5:'Rv',6:'Rvi',7:'Rvii',8:'Rviii',9:'Rix'} vrd['texnamename']=name for i in nummap.keys(): - vrd['texnamename']=vrd['texnamename'].replace(repr(i),nummap[i]) + vrd['texnamename']=vrd['texnamename'].replace(repr(i), nummap[i]) if hasnote(vars[realname]): vrd['note']=vars[realname]['note'] - rd=dictappend({},vrd) + rd=dictappend({}, vrd) var=vars[realname] - print(name,realname,vars[realname]) - ret=applyrules(usemodule_rules,rd) + print(name, realname, vars[realname]) + ret=applyrules(usemodule_rules, rd) return ret diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py index 4961b2989..1fb7bafc4 100644 --- a/numpy/fft/fftpack.py +++ b/numpy/fft/fftpack.py @@ -32,7 +32,7 @@ version of the FFTPACK routines. """ from __future__ import division, absolute_import, print_function -__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', +__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \ @@ -62,11 +62,11 @@ def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti, s = list(a.shape) if s[axis] > n: index = [slice(None)]*len(s) - index[axis] = slice(0,n) + index[axis] = slice(0, n) a = a[index] else: index = [slice(None)]*len(s) - index[axis] = slice(0,s[axis]) + index[axis] = slice(0, s[axis]) s[axis] = n z = zeros(s, a.dtype.char) z[index] = a @@ -273,7 +273,7 @@ def rfft(a, n=None, axis=-1): out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. - If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. If `n` is odd, the length is ``(n+1)/2``. Raises @@ -298,13 +298,13 @@ def rfft(a, n=None, axis=-1): compute the negative frequency terms, and the length of the transformed axis of the output is therefore ``n//2+1``. - When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains + When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains the zero-frequency term 0*fs, which is real due to Hermitian symmetry. - If `n` is even, ``A[-1]`` contains the term representing both positive - and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely - real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains - the largest positive frequency (fs/2*(n-1)/n), and is complex in the + If `n` is even, ``A[-1]`` contains the term representing both positive + and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely + real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains + the largest positive frequency (fs/2*(n-1)/n), and is complex in the general case. If the input `a` contains an imaginary part, it is silently discarded. @@ -619,7 +619,7 @@ def fftn(a, s=None, axes=None): """ - return _raw_fftnd(a,s,axes,fft) + return _raw_fftnd(a, s, axes, fft) def ifftn(a, s=None, axes=None): """ @@ -714,7 +714,7 @@ def ifftn(a, s=None, axes=None): return _raw_fftnd(a, s, axes, ifft) -def fft2(a, s=None, axes=(-2,-1)): +def fft2(a, s=None, axes=(-2, -1)): """ Compute the 2-dimensional discrete Fourier Transform @@ -792,10 +792,10 @@ def fft2(a, s=None, axes=(-2,-1)): """ - return _raw_fftnd(a,s,axes,fft) + return _raw_fftnd(a, s, axes, fft) -def ifft2(a, s=None, axes=(-2,-1)): +def ifft2(a, s=None, axes=(-2, -1)): """ Compute the 2-dimensional inverse discrete Fourier Transform. @@ -965,7 +965,7 @@ def rfftn(a, s=None, axes=None): a = fft(a, s[ii], axes[ii]) return a -def rfft2(a, s=None, axes=(-2,-1)): +def rfft2(a, s=None, axes=(-2, -1)): """ Compute the 2-dimensional FFT of a real array. @@ -1086,7 +1086,7 @@ def irfftn(a, s=None, axes=None): a = irfft(a, s[-1], axes[-1]) return a -def irfft2(a, s=None, axes=(-2,-1)): +def irfft2(a, s=None, axes=(-2, -1)): """ Compute the 2-dimensional inverse FFT of a real array. diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py index 0a475153f..2e35f6db6 100644 --- a/numpy/fft/helper.py +++ b/numpy/fft/helper.py @@ -68,8 +68,8 @@ def fftshift(x, axes=None): for k in axes: n = tmp.shape[k] p2 = (n+1)//2 - mylist = concatenate((arange(p2,n),arange(p2))) - y = take(y,mylist,k) + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) return y @@ -116,8 +116,8 @@ def ifftshift(x, axes=None): for k in axes: n = tmp.shape[k] p2 = n-(n+1)//2 - mylist = concatenate((arange(p2,n),arange(p2))) - y = take(y,mylist,k) + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) return y @@ -125,8 +125,8 @@ def fftfreq(n, d=1.0): """ Return the Discrete Fourier Transform sample frequencies. - The returned float array `f` contains the frequency bin centers in cycles - per unit of the sample spacing (with zero at the start). For instance, if + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if the sample spacing is in seconds, then the frequency unit is cycles/second. Given a window length `n` and a sample spacing `d`:: @@ -140,7 +140,7 @@ def fftfreq(n, d=1.0): Window length. d : scalar, optional Sample spacing (inverse of the sampling rate). Defaults to 1. - + Returns ------- f : ndarray @@ -157,7 +157,7 @@ def fftfreq(n, d=1.0): array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25]) """ - if not (isinstance(n,int) or isinstance(n, integer)): + if not (isinstance(n, int) or isinstance(n, integer)): raise ValueError("n should be an integer") val = 1.0 / (n * d) results = empty(n, int) @@ -172,11 +172,11 @@ def fftfreq(n, d=1.0): def rfftfreq(n, d=1.0): """ - Return the Discrete Fourier Transform sample frequencies + Return the Discrete Fourier Transform sample frequencies (for usage with rfft, irfft). - The returned float array `f` contains the frequency bin centers in cycles - per unit of the sample spacing (with zero at the start). For instance, if + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if the sample spacing is in seconds, then the frequency unit is cycles/second. Given a window length `n` and a sample spacing `d`:: @@ -213,7 +213,7 @@ def rfftfreq(n, d=1.0): array([ 0., 10., 20., 30., 40., 50.]) """ - if not (isinstance(n,int) or isinstance(n, integer)): + if not (isinstance(n, int) or isinstance(n, integer)): raise ValueError("n should be an integer") val = 1.0/(n*d) N = n//2 + 1 diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py index 23a945fce..79f681e55 100644 --- a/numpy/fft/setup.py +++ b/numpy/fft/setup.py @@ -3,7 +3,7 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('fft',parent_package,top_path) + config = Configuration('fft', parent_package, top_path) config.add_data_dir('tests') diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py index f30d9fff6..7eaa99fdb 100644 --- a/numpy/fft/tests/test_helper.py +++ b/numpy/fft/tests/test_helper.py @@ -25,7 +25,7 @@ class TestFFTShift(TestCase): assert_array_almost_equal(fft.ifftshift(y), x) def test_inverse(self): - for n in [1,4,9,100,211]: + for n in [1, 4, 9, 100, 211]: x = np.random.random((n,)) assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index 64a8550c6..73e4b2306 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -24,7 +24,7 @@ from .financial import * from .arrayterator import * from .arraypad import * -__all__ = ['emath','math'] +__all__ = ['emath', 'math'] __all__ += type_check.__all__ __all__ += index_tricks.__all__ __all__ += function_base.__all__ diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py index 8cac117c9..ec642afd3 100644 --- a/numpy/lib/financial.py +++ b/numpy/lib/financial.py @@ -688,7 +688,7 @@ def npv(rate, values): """ values = np.asarray(values) - return (values / (1+rate)**np.arange(0,len(values))).sum(axis=0) + return (values / (1+rate)**np.arange(0, len(values))).sum(axis=0) def mirr(values, finance_rate, reinvest_rate): """ diff --git a/numpy/lib/format.py b/numpy/lib/format.py index fd459e84e..fd5496e96 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -352,7 +352,7 @@ def read_array_header_1_0(fp): # Sanity-check the values. if (not isinstance(d['shape'], tuple) or - not numpy.all([isinstance(x, (int,long)) for x in d['shape']])): + not numpy.all([isinstance(x, (int, long)) for x in d['shape']])): msg = "shape is not valid: %r" raise ValueError(msg % (d['shape'],)) if not isinstance(d['fortran_order'], bool): @@ -366,7 +366,7 @@ def read_array_header_1_0(fp): return d['shape'], d['fortran_order'], dtype -def write_array(fp, array, version=(1,0)): +def write_array(fp, array, version=(1, 0)): """ Write an array to an NPY file, including a header. @@ -485,7 +485,7 @@ def read_array(fp): def open_memmap(filename, mode='r+', dtype=None, shape=None, - fortran_order=False, version=(1,0)): + fortran_order=False, version=(1, 0)): """ Open a .npy file as a memory-mapped array. diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 4285bf793..91eace2ff 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -351,7 +351,7 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): # Compute the bin number each sample falls into. Ncount = {} for i in arange(D): - Ncount[i] = digitize(sample[:,i], edges[i]) + Ncount[i] = digitize(sample[:, i], edges[i]) # Using digitize, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right @@ -362,7 +362,7 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): if not np.isinf(mindiff): decimal = int(-log10(mindiff)) + 6 # Find which points are on the rightmost edge. - on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1], + on_edge = where(around(sample[:, i], decimal) == around(edges[i][-1], decimal))[0] # Shift these points one bin to the left. Ncount[i][on_edge] -= 1 @@ -392,11 +392,11 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None): hist = hist.reshape(sort(nbin)) for i in arange(nbin.size): j = ni.argsort()[i] - hist = hist.swapaxes(i,j) - ni[i],ni[j] = ni[j],ni[i] + hist = hist.swapaxes(i, j) + ni[i], ni[j] = ni[j], ni[i] # Remove outliers (indices 0 and -1 for each dimension). - core = D*[slice(1,-1)] + core = D*[slice(1, -1)] hist = hist[core] # Normalize if normed is True @@ -1196,7 +1196,7 @@ def sort_complex(a): array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) """ - b = array(a,copy=True) + b = array(a, copy=True) b.sort() if not issubclass(b.dtype.type, _nx.complexfloating): if b.dtype.char in 'bhBH': @@ -1269,7 +1269,7 @@ def unique(x): if tmp.size == 0: return tmp tmp.sort() - idx = concatenate(([True],tmp[1:]!=tmp[:-1])) + idx = concatenate(([True], tmp[1:]!=tmp[:-1])) return tmp[idx] except AttributeError: items = sorted(set(x)) @@ -1736,7 +1736,7 @@ def cov(m, y=None, rowvar=1, bias=0, ddof=None): rowvar = 1 if rowvar: axis = 0 - tup = (slice(None),newaxis) + tup = (slice(None), newaxis) else: axis = 1 tup = (newaxis, slice(None)) @@ -1744,7 +1744,7 @@ def cov(m, y=None, rowvar=1, bias=0, ddof=None): if y is not None: y = array(y, copy=False, ndmin=2, dtype=float) - X = concatenate((X,y), axis) + X = concatenate((X, y), axis) X -= X.mean(axis=1-axis)[tup] if rowvar: @@ -1820,7 +1820,7 @@ def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None): d = diag(c) except ValueError: # scalar covariance return 1 - return c/sqrt(multiply.outer(d,d)) + return c/sqrt(multiply.outer(d, d)) def blackman(M): """ @@ -1916,7 +1916,7 @@ def blackman(M): return array([]) if M == 1: return ones(1, float) - n = arange(0,M) + n = arange(0, M) return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) def bartlett(M): @@ -2022,8 +2022,8 @@ def bartlett(M): return array([]) if M == 1: return ones(1, float) - n = arange(0,M) - return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1)) + n = arange(0, M) + return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0-2.0*n/(M-1)) def hanning(M): """ @@ -2120,7 +2120,7 @@ def hanning(M): return array([]) if M == 1: return ones(1, float) - n = arange(0,M) + n = arange(0, M) return 0.5-0.5*cos(2.0*pi*n/(M-1)) def hamming(M): @@ -2216,8 +2216,8 @@ def hamming(M): if M < 1: return array([]) if M == 1: - return ones(1,float) - n = arange(0,M) + return ones(1, float) + n = arange(0, M) return 0.54-0.46*cos(2.0*pi*n/(M-1)) ## Code from cephes for i0 @@ -2285,7 +2285,7 @@ def _chbevl(x, vals): b0 = vals[0] b1 = 0.0 - for i in range(1,len(vals)): + for i in range(1, len(vals)): b2 = b1 b1 = b0 b0 = x*b1 - b2 + vals[i] @@ -2364,7 +2364,7 @@ def i0(x): ## End of cephes code for i0 -def kaiser(M,beta): +def kaiser(M, beta): """ Return the Kaiser window. @@ -2487,7 +2487,7 @@ def kaiser(M,beta): from numpy.dual import i0 if M == 1: return np.array([1.]) - n = arange(0,M) + n = arange(0, M) alpha = (M-1)/2.0 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) @@ -2592,7 +2592,7 @@ def msort(a): ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. """ - b = array(a,subok=True,copy=True) + b = array(a, subok=True, copy=True) b.sort(0) return b @@ -2841,7 +2841,7 @@ def _compute_qth_percentile(sorted, q, axis, out): else: indexer[axis] = slice(i, i+2) j = i + 1 - weights = array([(j - index), (index - i)],float) + weights = array([(j - index), (index - i)], float) wshape = [1]*sorted.ndim wshape[axis] = 2 weights.shape = wshape @@ -2926,8 +2926,8 @@ def trapz(y, x=None, dx=1.0, axis=-1): nd = len(y.shape) slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd - slice1[axis] = slice(1,None) - slice2[axis] = slice(None,-1) + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) try: ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis) except ValueError: # Operations didn't work, cast to ndarray @@ -3212,7 +3212,7 @@ def delete(arr, obj, axis=None): if stop == N: pass else: - slobj[axis] = slice(stop-numtodel,None) + slobj[axis] = slice(stop-numtodel, None) slobj2 = [slice(None)]*ndim slobj2[axis] = slice(stop, None) new[slobj] = arr[slobj2] @@ -3253,9 +3253,9 @@ def delete(arr, obj, axis=None): new = empty(newshape, arr.dtype, arr.flags.fnc) slobj[axis] = slice(None, obj) new[slobj] = arr[slobj] - slobj[axis] = slice(obj,None) + slobj[axis] = slice(obj, None) slobj2 = [slice(None)]*ndim - slobj2[axis] = slice(obj+1,None) + slobj2[axis] = slice(obj+1, None) new[slobj] = arr[slobj2] else: if obj.size == 0 and not isinstance(_obj, np.ndarray): diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index ca3d60869..570cd0f1d 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -6,8 +6,8 @@ __all__ = ['ravel_multi_index', 'ogrid', 'r_', 'c_', 's_', 'index_exp', 'ix_', - 'ndenumerate','ndindex', - 'fill_diagonal','diag_indices','diag_indices_from'] + 'ndenumerate', 'ndindex', + 'fill_diagonal', 'diag_indices', 'diag_indices_from'] import sys import numpy.core.numeric as _nx @@ -144,7 +144,7 @@ class nd_grid(object): """ def __init__(self, sparse=False): self.sparse = sparse - def __getitem__(self,key): + def __getitem__(self, key): try: size = [] typ = int @@ -180,7 +180,7 @@ class nd_grid(object): if self.sparse: slobj = [_nx.newaxis]*len(size) for k in range(len(size)): - slobj[k] = slice(None,None) + slobj[k] = slice(None, None) nn[k] = nn[k][slobj] slobj[k] = _nx.newaxis return nn @@ -195,12 +195,12 @@ class nd_grid(object): if step != 1: step = (key.stop-start)/float(step-1) stop = key.stop+step - return _nx.arange(0, length,1, float)*step + start + return _nx.arange(0, length, 1, float)*step + start else: return _nx.arange(start, stop, step) - def __getslice__(self,i,j): - return _nx.arange(i,j) + def __getslice__(self, i, j): + return _nx.arange(i, j) def __len__(self): return 0 @@ -237,12 +237,12 @@ class AxisConcatenator(object): self.trans1d = trans1d self.ndmin = ndmin - def __getitem__(self,key): + def __getitem__(self, key): trans1d = self.trans1d ndmin = self.ndmin if isinstance(key, str): frame = sys._getframe().f_back - mymat = matrix.bmat(key,frame.f_globals,frame.f_locals) + mymat = matrix.bmat(key, frame.f_globals, frame.f_locals) return mymat if not isinstance(key, tuple): key = (key,) @@ -265,10 +265,10 @@ class AxisConcatenator(object): else: newobj = _nx.arange(start, stop, step) if ndmin > 1: - newobj = array(newobj,copy=False,ndmin=ndmin) + newobj = array(newobj, copy=False, ndmin=ndmin) if trans1d != -1: - newobj = newobj.swapaxes(-1,trans1d) - elif isinstance(key[k],str): + newobj = newobj.swapaxes(-1, trans1d) + elif isinstance(key[k], str): if k != 0: raise ValueError("special directives must be the " "first entry.") @@ -293,7 +293,7 @@ class AxisConcatenator(object): except (ValueError, TypeError): raise ValueError("unknown special directive") elif type(key[k]) in ScalarType: - newobj = array(key[k],ndmin=ndmin) + newobj = array(key[k], ndmin=ndmin) scalars.append(k) scalar = True scalartypes.append(newobj.dtype) @@ -323,11 +323,11 @@ class AxisConcatenator(object): for k in scalars: objs[k] = objs[k].astype(final_dtype) - res = _nx.concatenate(tuple(objs),axis=self.axis) + res = _nx.concatenate(tuple(objs), axis=self.axis) return self._retval(res) - def __getslice__(self,i,j): - res = _nx.arange(i,j) + def __getslice__(self, i, j): + res = _nx.arange(i, j) return self._retval(res) def __len__(self): @@ -657,7 +657,7 @@ def fill_diagonal(a, val, wrap=False): Value to be written on the diagonal, its type must be compatible with that of the array a. - wrap : bool + wrap : bool For tall matrices in NumPy version up to 1.6.2, the diagonal "wrapped" after N columns. You can have this behavior with this option. This affect only tall matrices. diff --git a/numpy/lib/info.py b/numpy/lib/info.py index 94eeae503..3fbbab769 100644 --- a/numpy/lib/info.py +++ b/numpy/lib/info.py @@ -147,5 +147,5 @@ setdiff1d Set difference of 1D arrays with unique elements. """ from __future__ import division, absolute_import, print_function -depends = ['core','testing'] +depends = ['core', 'testing'] global_symbols = ['*'] diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index 5402adc6d..48a012c9c 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -141,7 +141,7 @@ def poly(seq_of_zeros): roots = NX.asarray(seq_of_zeros, complex) pos_roots = sort_complex(NX.compress(roots.imag > 0, roots)) neg_roots = NX.conjugate(sort_complex( - NX.compress(roots.imag < 0,roots))) + NX.compress(roots.imag < 0, roots))) if (len(pos_roots) == len(neg_roots) and NX.alltrue(neg_roots == pos_roots)): a = a.real.copy() @@ -223,7 +223,7 @@ def roots(p): if N > 1: # build companion matrix and find its eigenvalues (the roots) A = diag(NX.ones((N-2,), p.dtype), -1) - A[0, :] = -p[1:] / p[0] + A[0,:] = -p[1:] / p[0] roots = eigvals(A) else: roots = NX.array([]) @@ -589,7 +589,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): if full : return c, resids, rank, s, rcond elif cov : - Vbase = inv(dot(lhs.T,lhs)) + Vbase = inv(dot(lhs.T, lhs)) Vbase /= NX.outer(scale, scale) # Some literature ignores the extra -2.0 factor in the denominator, but # it is included here because the covariance of Multivariate Student-T @@ -599,7 +599,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): if y.ndim == 1: return c, Vbase * fac else: - return c, Vbase[:,:,NX.newaxis] * fac + return c, Vbase[:,:, NX.newaxis] * fac else : return c @@ -828,7 +828,7 @@ def polymul(a1, a2): """ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) - a1,a2 = poly1d(a1),poly1d(a2) + a1, a2 = poly1d(a1), poly1d(a2) val = NX.convolve(a1, a2) if truepoly: val = poly1d(val) @@ -1200,7 +1200,7 @@ class poly1d(object): def __getattr__(self, key): if key in ['r', 'roots']: return roots(self.coeffs) - elif key in ['c','coef','coefficients']: + elif key in ['c', 'coef', 'coefficients']: return self.coeffs elif key in ['o']: return self.order @@ -1261,4 +1261,4 @@ class poly1d(object): # Stuff to do on module import -warnings.simplefilter('always',RankWarning) +warnings.simplefilter('always', RankWarning) diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index 827e60d70..ae4ee56c6 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -900,7 +900,7 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', (r1names, r2names) = (r1.dtype.names, r2.dtype.names) # Check the names for collision - if (set.intersection(set(r1names),set(r2names)).difference(key) and + if (set.intersection(set(r1names), set(r2names)).difference(key) and not (r1postfix or r2postfix)): msg = "r1 and r2 contain common names, r1postfix and r2postfix " msg += "can't be empty" diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py index 2aa08d0ea..3da86d9c8 100644 --- a/numpy/lib/scimath.py +++ b/numpy/lib/scimath.py @@ -17,7 +17,7 @@ correctly handled. See their respective docstrings for specific examples. """ from __future__ import division, absolute_import, print_function -__all__ = ['sqrt', 'log', 'log2', 'logn','log10', 'power', 'arccos', +__all__ = ['sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', 'arctanh'] import numpy.core.numeric as nx @@ -84,7 +84,7 @@ def _tocomplex(arr): array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) """ if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, - nt.ushort,nt.csingle)): + nt.ushort, nt.csingle)): return arr.astype(nt.csingle) else: return arr.astype(nt.cdouble) diff --git a/numpy/lib/setup.py b/numpy/lib/setup.py index edc653f9f..153af314c 100644 --- a/numpy/lib/setup.py +++ b/numpy/lib/setup.py @@ -5,13 +5,13 @@ from os.path import join def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('lib',parent_package,top_path) + config = Configuration('lib', parent_package, top_path) - config.add_include_dirs(join('..','core','include')) + config.add_include_dirs(join('..', 'core', 'include')) config.add_extension('_compiled_base', - sources=[join('src','_compiled_base.c')] + sources=[join('src', '_compiled_base.c')] ) config.add_data_dir('benchmarks') diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index c63f8140d..1363a3213 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -1,7 +1,7 @@ from __future__ import division, absolute_import, print_function -__all__ = ['column_stack','row_stack', 'dstack','array_split','split','hsplit', - 'vsplit','dsplit','apply_over_axes','expand_dims', +__all__ = ['column_stack', 'row_stack', 'dstack', 'array_split', 'split', 'hsplit', + 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', 'apply_along_axis', 'kron', 'tile', 'get_array_wrap'] import numpy.core.numeric as _nx @@ -68,18 +68,18 @@ def apply_along_axis(func1d,axis,arr,*args): axis += nd if (axis >= nd): raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." - % (axis,nd)) + % (axis, nd)) ind = [0]*(nd-1) - i = zeros(nd,'O') + i = zeros(nd, 'O') indlist = list(range(nd)) indlist.remove(axis) - i[axis] = slice(None,None) + i[axis] = slice(None, None) outshape = asarray(arr.shape).take(indlist) i.put(indlist, ind) res = func1d(arr[tuple(i.tolist())],*args) # if res is a number, then we have a smaller output array if isscalar(res): - outarr = zeros(outshape,asarray(res).dtype) + outarr = zeros(outshape, asarray(res).dtype) outarr[tuple(ind)] = res Ntot = product(outshape) k = 1 @@ -91,7 +91,7 @@ def apply_along_axis(func1d,axis,arr,*args): ind[n-1] += 1 ind[n] = 0 n -= 1 - i.put(indlist,ind) + i.put(indlist, ind) res = func1d(arr[tuple(i.tolist())],*args) outarr[tuple(ind)] = res k += 1 @@ -101,7 +101,7 @@ def apply_along_axis(func1d,axis,arr,*args): holdshape = outshape outshape = list(arr.shape) outshape[axis] = len(res) - outarr = zeros(outshape,asarray(res).dtype) + outarr = zeros(outshape, asarray(res).dtype) outarr[tuple(i.tolist())] = res k = 1 while k < Ntot: @@ -182,7 +182,7 @@ def apply_over_axes(func, a, axes): if res.ndim == val.ndim: val = res else: - res = expand_dims(res,axis) + res = expand_dims(res, axis) if res.ndim == val.ndim: val = res else: @@ -288,11 +288,11 @@ def column_stack(tup): """ arrays = [] for v in tup: - arr = array(v,copy=False,subok=True) + arr = array(v, copy=False, subok=True) if arr.ndim < 2: - arr = array(arr,copy=False,subok=True,ndmin=2).T + arr = array(arr, copy=False, subok=True, ndmin=2).T arrays.append(arr) - return _nx.concatenate(arrays,1) + return _nx.concatenate(arrays, 1) def dstack(tup): """ @@ -348,7 +348,7 @@ def _replace_zero_by_x_arrays(sub_arys): for i in range(len(sub_arys)): if len(_nx.shape(sub_arys[i])) == 0: sub_arys[i] = _nx.array([]) - elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]),0)): + elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): sub_arys[i] = _nx.array([]) return sub_arys @@ -383,17 +383,17 @@ def array_split(ary,indices_or_sections,axis = 0): Nsections = int(indices_or_sections) if Nsections <= 0: raise ValueError('number sections must be larger than 0.') - Neach_section,extras = divmod(Ntotal,Nsections) + Neach_section, extras = divmod(Ntotal, Nsections) section_sizes = [0] + \ extras * [Neach_section+1] + \ (Nsections-extras) * [Neach_section] div_points = _nx.array(section_sizes).cumsum() sub_arys = [] - sary = _nx.swapaxes(ary,axis,0) + sary = _nx.swapaxes(ary, axis, 0) for i in range(Nsections): st = div_points[i]; end = div_points[i+1] - sub_arys.append(_nx.swapaxes(sary[st:end],axis,0)) + sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) # there is a weird issue with array slicing that allows # 0x10 arrays and other such things. The following kludge is needed @@ -474,10 +474,10 @@ def split(ary,indices_or_sections,axis=0): N = ary.shape[axis] if N % sections: raise ValueError('array split does not result in an equal division') - res = array_split(ary,indices_or_sections,axis) + res = array_split(ary, indices_or_sections, axis) return res -def hsplit(ary,indices_or_sections): +def hsplit(ary, indices_or_sections): """ Split an array into multiple sub-arrays horizontally (column-wise). @@ -535,11 +535,11 @@ def hsplit(ary,indices_or_sections): if len(_nx.shape(ary)) == 0: raise ValueError('hsplit only works on arrays of 1 or more dimensions') if len(ary.shape) > 1: - return split(ary,indices_or_sections,1) + return split(ary, indices_or_sections, 1) else: - return split(ary,indices_or_sections,0) + return split(ary, indices_or_sections, 0) -def vsplit(ary,indices_or_sections): +def vsplit(ary, indices_or_sections): """ Split an array into multiple sub-arrays vertically (row-wise). @@ -588,9 +588,9 @@ def vsplit(ary,indices_or_sections): """ if len(_nx.shape(ary)) < 2: raise ValueError('vsplit only works on arrays of 2 or more dimensions') - return split(ary,indices_or_sections,0) + return split(ary, indices_or_sections, 0) -def dsplit(ary,indices_or_sections): +def dsplit(ary, indices_or_sections): """ Split array into multiple sub-arrays along the 3rd axis (depth). @@ -633,7 +633,7 @@ def dsplit(ary,indices_or_sections): """ if len(_nx.shape(ary)) < 3: raise ValueError('vsplit only works on arrays of 3 or more dimensions') - return split(ary,indices_or_sections,2) + return split(ary, indices_or_sections, 2) def get_array_prepare(*args): """Find the wrapper for the array with the highest priority. @@ -659,7 +659,7 @@ def get_array_wrap(*args): return wrappers[-1][-1] return None -def kron(a,b): +def kron(a, b): """ Kronecker product of two arrays. @@ -728,10 +728,10 @@ def kron(a,b): """ b = asanyarray(b) - a = array(a,copy=False,subok=True,ndmin=b.ndim) + a = array(a, copy=False, subok=True, ndmin=b.ndim) ndb, nda = b.ndim, a.ndim if (nda == 0 or ndb == 0): - return _nx.multiply(a,b) + return _nx.multiply(a, b) as_ = a.shape bs = b.shape if not a.flags.contiguous: @@ -745,7 +745,7 @@ def kron(a,b): else: bs = (1,)*(nda-ndb) + bs nd = nda - result = outer(a,b).reshape(as_+bs) + result = outer(a, b).reshape(as_+bs) axis = nd-1 for _ in range(nd): result = concatenate(result, axis=axis) @@ -819,14 +819,14 @@ def tile(A, reps): except TypeError: tup = (reps,) d = len(tup) - c = _nx.array(A,copy=False,subok=True,ndmin=d) + c = _nx.array(A, copy=False, subok=True, ndmin=d) shape = list(c.shape) - n = max(c.size,1) + n = max(c.size, 1) if (d < c.ndim): tup = (1,)*(c.ndim-d) + tup for i, nrep in enumerate(tup): if nrep!=1: - c = c.reshape(-1,n).repeat(nrep,0) + c = c.reshape(-1, n).repeat(nrep, 0) dim_in = shape[i] dim_out = dim_in*nrep shape[i] = dim_out diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py index 7b6b06fdc..d092f92a8 100644 --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -115,6 +115,6 @@ def broadcast_arrays(*args): common_shape.append(1) # Construct the new arrays. - broadcasted = [as_strided(x, shape=sh, strides=st) for (x,sh,st) in + broadcasted = [as_strided(x, shape=sh, strides=st) for (x, sh, st) in zip(args, shapes, strides)] return broadcasted diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 4ba6529e4..7e8a7a6f3 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -61,8 +61,8 @@ class TestSetOps(TestCase): # test for structured arrays dt = [('', 'i'), ('', 'i')] - aa = np.array(list(zip(a,a)), dt) - bb = np.array(list(zip(b,b)), dt) + aa = np.array(list(zip(a, a)), dt) + bb = np.array(list(zip(b, b)), dt) check_all(aa, bb, i1, i2, dt) @@ -83,7 +83,7 @@ class TestSetOps(TestCase): c = intersect1d( a, b ) assert_array_equal( c, ed ) - assert_array_equal([], intersect1d([],[])) + assert_array_equal([], intersect1d([], [])) def test_setxor1d( self ): a = np.array( [5, 7, 1, 2] ) @@ -107,19 +107,19 @@ class TestSetOps(TestCase): c = setxor1d( a, b ) assert_array_equal( c, ec ) - assert_array_equal([], setxor1d([],[])) + assert_array_equal([], setxor1d([], [])) def test_ediff1d(self): zero_elem = np.array([]) one_elem = np.array([1]) - two_elem = np.array([1,2]) + two_elem = np.array([1, 2]) - assert_array_equal([],ediff1d(zero_elem)) - assert_array_equal([0],ediff1d(zero_elem,to_begin=0)) - assert_array_equal([0],ediff1d(zero_elem,to_end=0)) - assert_array_equal([-1,0],ediff1d(zero_elem,to_begin=-1,to_end=0)) - assert_array_equal([],ediff1d(one_elem)) - assert_array_equal([1],ediff1d(two_elem)) + assert_array_equal([], ediff1d(zero_elem)) + assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) + assert_array_equal([0], ediff1d(zero_elem, to_end=0)) + assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) + assert_array_equal([], ediff1d(one_elem)) + assert_array_equal([1], ediff1d(two_elem)) def test_in1d(self): # we use two different sizes for the b array here to test the @@ -182,8 +182,8 @@ class TestSetOps(TestCase): assert_array_equal(in1d([], []), []) def test_in1d_char_array( self ): - a = np.array(['a', 'b', 'c','d','e','c','e','b']) - b = np.array(['a','c']) + a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) + b = np.array(['a', 'c']) ec = np.array([True, False, True, False, False, True, False, False]) c = in1d(a, b) @@ -202,9 +202,9 @@ class TestSetOps(TestCase): def test_in1d_ravel(self): # Test that in1d ravels its input arrays. This is not documented # behavior however. The test is to ensure consistentency. - a = np.arange(6).reshape(2,3) - b = np.arange(3,9).reshape(3,2) - long_b = np.arange(3, 63).reshape(30,2) + a = np.arange(6).reshape(2, 3) + b = np.arange(3, 9).reshape(3, 2) + long_b = np.arange(3, 63).reshape(30, 2) ec = np.array([False, False, False, True, True, True]) assert_array_equal(in1d(a, b, assume_unique=True), ec) @@ -220,7 +220,7 @@ class TestSetOps(TestCase): c = union1d( a, b ) assert_array_equal( c, ec ) - assert_array_equal([], union1d([],[])) + assert_array_equal([], union1d([], [])) def test_setdiff1d( self ): a = np.array( [6, 5, 4, 7, 1, 2, 7, 4] ) @@ -236,12 +236,12 @@ class TestSetOps(TestCase): c = setdiff1d( a, b ) assert_array_equal( c, ec ) - assert_array_equal([], setdiff1d([],[])) + assert_array_equal([], setdiff1d([], [])) def test_setdiff1d_char_array(self): - a = np.array(['a','b','c']) - b = np.array(['a','b','s']) - assert_array_equal(setdiff1d(a,b),np.array(['c'])) + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) def test_manyways( self ): a = np.array( [5, 7, 1, 2, 8] ) diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py index 1a276a429..1894da8cb 100644 --- a/numpy/lib/tests/test_financial.py +++ b/numpy/lib/tests/test_financial.py @@ -5,7 +5,7 @@ import numpy as np class TestFinancial(TestCase): def test_rate(self): - assert_almost_equal(np.rate(10,0,-3500,10000), + assert_almost_equal(np.rate(10, 0, -3500, 10000), 0.1107, 4) def test_irr(self): @@ -14,127 +14,127 @@ class TestFinancial(TestCase): 0.0524, 2) def test_pv(self): - assert_almost_equal(np.pv(0.07,20,12000,0), + assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2) def test_fv(self): - assert_almost_equal(np.fv(0.075, 20, -2000,0,0), + assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.36, 2) def test_pmt(self): - assert_almost_equal(np.pmt(0.08/12,5*12,15000), + assert_almost_equal(np.pmt(0.08/12, 5*12, 15000), -304.146, 3) def test_ppmt(self): - np.round(np.ppmt(0.1/12,1,60,55000),2) == 710.25 + np.round(np.ppmt(0.1/12, 1, 60, 55000), 2) == 710.25 def test_ipmt(self): - np.round(np.ipmt(0.1/12,1,24,2000),2) == 16.67 + np.round(np.ipmt(0.1/12, 1, 24, 2000), 2) == 16.67 def test_nper(self): - assert_almost_equal(np.nper(0.075,-2000,0,100000.), + assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), 21.54, 2) def test_nper2(self): - assert_almost_equal(np.nper(0.0,-2000,0,100000.), + assert_almost_equal(np.nper(0.0, -2000, 0, 100000.), 50.0, 1) def test_npv(self): - assert_almost_equal(np.npv(0.05,[-15000,1500,2500,3500,4500,6000]), + assert_almost_equal(np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]), 122.89, 2) def test_mirr(self): - val = [-4500,-800,800,800,600,600,800,800,700,3000] + val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000] assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4) - val = [-120000,39000,30000,21000,37000,46000] + val = [-120000, 39000, 30000, 21000, 37000, 46000] assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6) - val = [100,200,-50,300,-200] + val = [100, 200, -50, 300, -200] assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4) - val = [39000,30000,21000,37000,46000] + val = [39000, 30000, 21000, 37000, 46000] assert_(np.isnan(np.mirr(val, 0.10, 0.12))) def test_when(self): #begin - assert_almost_equal(np.rate(10,20,-3500,10000,1), - np.rate(10,20,-3500,10000,'begin'), 4) + assert_almost_equal(np.rate(10, 20, -3500, 10000, 1), + np.rate(10, 20, -3500, 10000, 'begin'), 4) #end - assert_almost_equal(np.rate(10,20,-3500,10000), - np.rate(10,20,-3500,10000,'end'), 4) - assert_almost_equal(np.rate(10,20,-3500,10000,0), - np.rate(10,20,-3500,10000,'end'), 4) + assert_almost_equal(np.rate(10, 20, -3500, 10000), + np.rate(10, 20, -3500, 10000, 'end'), 4) + assert_almost_equal(np.rate(10, 20, -3500, 10000, 0), + np.rate(10, 20, -3500, 10000, 'end'), 4) # begin - assert_almost_equal(np.pv(0.07,20,12000,0,1), - np.pv(0.07,20,12000,0,'begin'), 2) + assert_almost_equal(np.pv(0.07, 20, 12000, 0, 1), + np.pv(0.07, 20, 12000, 0, 'begin'), 2) # end - assert_almost_equal(np.pv(0.07,20,12000,0), - np.pv(0.07,20,12000,0,'end'), 2) - assert_almost_equal(np.pv(0.07,20,12000,0,0), - np.pv(0.07,20,12000,0,'end'), 2) + assert_almost_equal(np.pv(0.07, 20, 12000, 0), + np.pv(0.07, 20, 12000, 0, 'end'), 2) + assert_almost_equal(np.pv(0.07, 20, 12000, 0, 0), + np.pv(0.07, 20, 12000, 0, 'end'), 2) # begin - assert_almost_equal(np.fv(0.075, 20, -2000,0,1), - np.fv(0.075, 20, -2000,0,'begin'), 4) + assert_almost_equal(np.fv(0.075, 20, -2000, 0, 1), + np.fv(0.075, 20, -2000, 0, 'begin'), 4) # end - assert_almost_equal(np.fv(0.075, 20, -2000,0), - np.fv(0.075, 20, -2000,0,'end'), 4) - assert_almost_equal(np.fv(0.075, 20, -2000,0,0), - np.fv(0.075, 20, -2000,0,'end'), 4) + assert_almost_equal(np.fv(0.075, 20, -2000, 0), + np.fv(0.075, 20, -2000, 0, 'end'), 4) + assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0), + np.fv(0.075, 20, -2000, 0, 'end'), 4) # begin - assert_almost_equal(np.pmt(0.08/12,5*12,15000.,0,1), - np.pmt(0.08/12,5*12,15000.,0,'begin'), 4) + assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 1), + np.pmt(0.08/12, 5*12, 15000., 0, 'begin'), 4) # end - assert_almost_equal(np.pmt(0.08/12,5*12,15000.,0), - np.pmt(0.08/12,5*12,15000.,0,'end'), 4) - assert_almost_equal(np.pmt(0.08/12,5*12,15000.,0,0), - np.pmt(0.08/12,5*12,15000.,0,'end'), 4) + assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0), + np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4) + assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 0), + np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4) # begin - assert_almost_equal(np.ppmt(0.1/12,1,60,55000,0,1), - np.ppmt(0.1/12,1,60,55000,0,'begin'), 4) + assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 1), + np.ppmt(0.1/12, 1, 60, 55000, 0, 'begin'), 4) # end - assert_almost_equal(np.ppmt(0.1/12,1,60,55000,0), - np.ppmt(0.1/12,1,60,55000,0,'end'), 4) - assert_almost_equal(np.ppmt(0.1/12,1,60,55000,0,0), - np.ppmt(0.1/12,1,60,55000,0,'end'), 4) + assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0), + np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4) + assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 0), + np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4) # begin - assert_almost_equal(np.ipmt(0.1/12,1,24,2000,0,1), - np.ipmt(0.1/12,1,24,2000,0,'begin'), 4) + assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 1), + np.ipmt(0.1/12, 1, 24, 2000, 0, 'begin'), 4) # end - assert_almost_equal(np.ipmt(0.1/12,1,24,2000,0), - np.ipmt(0.1/12,1,24,2000,0,'end'), 4) - assert_almost_equal(np.ipmt(0.1/12,1,24,2000,0,0), - np.ipmt(0.1/12,1,24,2000,0,'end'), 4) + assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0), + np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4) + assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 0), + np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4) # begin - assert_almost_equal(np.nper(0.075,-2000,0,100000.,1), - np.nper(0.075,-2000,0,100000.,'begin'), 4) + assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 1), + np.nper(0.075, -2000, 0, 100000., 'begin'), 4) # end - assert_almost_equal(np.nper(0.075,-2000,0,100000.), - np.nper(0.075,-2000,0,100000.,'end'), 4) - assert_almost_equal(np.nper(0.075,-2000,0,100000.,0), - np.nper(0.075,-2000,0,100000.,'end'), 4) + assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), + np.nper(0.075, -2000, 0, 100000., 'end'), 4) + assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 0), + np.nper(0.075, -2000, 0, 100000., 'end'), 4) def test_broadcast(self): - assert_almost_equal(np.nper(0.075,-2000,0,100000.,[0,1]), - [ 21.5449442 , 20.76156441], 4) + assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]), + [ 21.5449442, 20.76156441], 4) - assert_almost_equal(np.ipmt(0.1/12,list(range(5)), 24, 2000), + assert_almost_equal(np.ipmt(0.1/12, list(range(5)), 24, 2000), [-17.29165168, -16.66666667, -16.03647345, -15.40102862, -14.76028842], 4) - assert_almost_equal(np.ppmt(0.1/12,list(range(5)), 24, 2000), - [-74.998201 , -75.62318601, -76.25337923, + assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000), + [-74.998201, -75.62318601, -76.25337923, -76.88882405, -77.52956425], 4) - assert_almost_equal(np.ppmt(0.1/12,list(range(5)), 24, 2000, 0, - [0,0,1,'end','begin']), - [-74.998201 , -75.62318601, -75.62318601, + assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000, 0, + [0, 0, 1, 'end', 'begin']), + [-74.998201, -75.62318601, -75.62318601, -76.88882405, -76.88882405], 4) if __name__ == "__main__": diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 8b809891f..694dc4591 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -331,11 +331,11 @@ for scalar in scalars: # 1-D basic, # 2-D C-contiguous - basic.reshape((3,5)), + basic.reshape((3, 5)), # 2-D F-contiguous - basic.reshape((3,5)).T, + basic.reshape((3, 5)).T, # 2-D non-contiguous - basic.reshape((3,5))[::-1,::2], + basic.reshape((3, 5))[::-1, ::2], ]) # More complicated record arrays. @@ -354,8 +354,8 @@ Pdescr = [ # A plain list of tuples with values for testing: PbufferT = [ # x y z - ([3,2], [[6.,4.],[6.,4.]], 8), - ([4,3], [[7.,5.],[7.,5.]], 9), + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), ] @@ -394,8 +394,8 @@ NbufferT = [ # x Info color info y z # value y2 Info2 name z2 Name Value # name value y3 z3 - ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), - ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), ] record_arrays = [ @@ -520,7 +520,7 @@ def test_bad_magic_args(): def test_large_header(): s = BytesIO() d = {'a':1,'b':2} - format.write_array_header_1_0(s,d) + format.write_array_header_1_0(s, d) s = BytesIO() d = {'a':1,'b':2,'c':'x'*256*256} @@ -538,18 +538,18 @@ def test_bad_header(): assert_raises(ValueError, format.read_array_header_1_0, s) # headers without the exact keys required should fail - d = {"shape":(1,2), + d = {"shape":(1, 2), "descr":"x"} s = BytesIO() - format.write_array_header_1_0(s,d) + format.write_array_header_1_0(s, d) assert_raises(ValueError, format.read_array_header_1_0, s) - d = {"shape":(1,2), + d = {"shape":(1, 2), "fortran_order":False, "descr":"x", "extrakey":-1} s = BytesIO() - format.write_array_header_1_0(s,d) + format.write_array_header_1_0(s, d) assert_raises(ValueError, format.read_array_header_1_0, s) if __name__ == "__main__": diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 65519d0bc..28f094653 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -174,10 +174,10 @@ class TestInsert(TestCase): assert_equal(insert(a, 0, 1), [1, 1, 2, 3]) assert_equal(insert(a, 3, 1), [1, 2, 3, 1]) assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3]) - assert_equal(insert(a, 1,[1,2,3]), [1, 1, 2, 3, 2, 3]) - assert_equal(insert(a,[1,-1,3],9),[1,9,2,9,3,9]) - assert_equal(insert(a,slice(-1,None,-1), 9),[9,1,9,2,9,3]) - assert_equal(insert(a,[-1,1,3], [7,8,9]),[1,8,2,7,3,9]) + assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3]) + assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9]) + assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3]) + assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9]) b = np.array([0, 1], dtype=np.float64) assert_equal(insert(b, 0, b[0]), [0., 0., 1.]) assert_equal(insert(b, [], []), b) @@ -185,42 +185,42 @@ class TestInsert(TestCase): #assert_equal(insert(a, np.array([True]*4), 9), [9,1,9,2,9,3,9]) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', FutureWarning) - assert_equal(insert(a, np.array([True]*4), 9), [1,9,9,9,9,2,3]) + assert_equal(insert(a, np.array([True]*4), 9), [1, 9, 9, 9, 9, 2, 3]) assert_(w[0].category is FutureWarning) def test_multidim(self): a = [[1, 1, 1]] r = [[2, 2, 2], [1, 1, 1]] - assert_equal(insert(a, 0, [1]), [1,1,1,1]) + assert_equal(insert(a, 0, [1]), [1, 1, 1, 1]) assert_equal(insert(a, 0, [2, 2, 2], axis=0), r) assert_equal(insert(a, 0, 2, axis=0), r) assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]]) a = np.array([[1, 1], [2, 2], [3, 3]]) - b = np.arange(1,4).repeat(3).reshape(3,3) - c = np.concatenate((a[:,0:1], np.arange(1,4).repeat(3).reshape(3,3).T, - a[:,1:2]), axis=1) - assert_equal(insert(a, [1], [[1],[2],[3]], axis=1), b) + b = np.arange(1, 4).repeat(3).reshape(3, 3) + c = np.concatenate((a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T, + a[:, 1:2]), axis=1) + assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b) assert_equal(insert(a, [1], [1, 2, 3], axis=1), c) # scalars behave differently, in this case exactly opposite: assert_equal(insert(a, 1, [1, 2, 3], axis=1), b) - assert_equal(insert(a, 1, [[1],[2],[3]], axis=1), c) + assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c) - a = np.arange(4).reshape(2,2) - assert_equal(insert(a[:,:1], 1, a[:,1], axis=1), a) + a = np.arange(4).reshape(2, 2) + assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a) assert_equal(insert(a[:1,:], 1, a[1,:], axis=0), a) # negative axis value - a = np.arange(24).reshape((2,3,4)) - assert_equal(insert(a, 1, a[:,:,3], axis=-1), - insert(a, 1, a[:,:,3], axis=2)) - assert_equal(insert(a, 1, a[:,2,:], axis=-2), - insert(a, 1, a[:,2,:], axis=1)) + a = np.arange(24).reshape((2, 3, 4)) + assert_equal(insert(a, 1, a[:,:, 3], axis=-1), + insert(a, 1, a[:,:, 3], axis=2)) + assert_equal(insert(a, 1, a[:, 2,:], axis=-2), + insert(a, 1, a[:, 2,:], axis=1)) # invalid axis value - assert_raises(IndexError, insert, a, 1, a[:,2,:], axis=3) - assert_raises(IndexError, insert, a, 1, a[:,2,:], axis=-4) + assert_raises(IndexError, insert, a, 1, a[:, 2,:], axis=3) + assert_raises(IndexError, insert, a, 1, a[:, 2,:], axis=-4) def test_0d(self): # This is an error in the future @@ -236,9 +236,9 @@ class TestInsert(TestCase): a = np.arange(10).view(SubClass) assert_(isinstance(np.insert(a, 0, [0]), SubClass)) assert_(isinstance(np.insert(a, [], []), SubClass)) - assert_(isinstance(np.insert(a, [0,1], [1,2]), SubClass)) - assert_(isinstance(np.insert(a, slice(1,2), [1,2]), SubClass)) - assert_(isinstance(np.insert(a, slice(1,-2), []), SubClass)) + assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass)) + assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass)) + assert_(isinstance(np.insert(a, slice(1, -2), []), SubClass)) # This is an error in the future: a = np.array(1).view(SubClass) assert_(isinstance(np.insert(a, 0, [0]), SubClass)) @@ -355,10 +355,10 @@ class TestDiff(TestCase): def test_nd(self): x = 20 * rand(10, 20, 30) - out1 = x[:, :, 1:] - x[:, :, :-1] - out2 = out1[:, :, 1:] - out1[:, :, :-1] - out3 = x[1:, :, :] - x[:-1, :, :] - out4 = out3[1:, :, :] - out3[:-1, :, :] + out1 = x[:,:, 1:] - x[:,:, :-1] + out2 = out1[:,:, 1:] - out1[:,:, :-1] + out3 = x[1:,:,:] - x[:-1,:,:] + out4 = out3[1:,:,:] - out3[:-1,:,:] assert_array_equal(diff(x), out1) assert_array_equal(diff(x, n=2), out2) assert_array_equal(diff(x, axis=0), out3) @@ -368,7 +368,7 @@ class TestDiff(TestCase): class TestDelete(TestCase): def setUp(self): self.a = np.arange(5) - self.nd_a = np.arange(5).repeat(2).reshape(1,5,2) + self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) def _check_inverse_of_slicing(self, indices): a_del = delete(self.a, indices) @@ -380,8 +380,8 @@ class TestDelete(TestCase): indices = indices[(indices >= 0) & (indices < 5)] assert_array_equal(setxor1d(a_del, self.a[indices,]), self.a, err_msg=msg) - xor = setxor1d(nd_a_del[0,:,0], self.nd_a[0,indices,0]) - assert_array_equal(xor, self.nd_a[0,:,0], err_msg=msg) + xor = setxor1d(nd_a_del[0,:, 0], self.nd_a[0, indices, 0]) + assert_array_equal(xor, self.nd_a[0,:, 0], err_msg=msg) def test_slices(self): lims = [-6, -2, 0, 1, 2, 4, 5] @@ -394,7 +394,7 @@ class TestDelete(TestCase): def test_fancy(self): # Deprecation/FutureWarning tests should be kept after change. - self._check_inverse_of_slicing(np.array([[0,1],[2,1]])) + self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]])) assert_raises(DeprecationWarning, delete, self.a, [100]) assert_raises(DeprecationWarning, delete, self.a, [-100]) with warnings.catch_warnings(record=True) as w: @@ -422,9 +422,9 @@ class TestDelete(TestCase): a = self.a.view(SubClass) assert_(isinstance(delete(a, 0), SubClass)) assert_(isinstance(delete(a, []), SubClass)) - assert_(isinstance(delete(a, [0,1]), SubClass)) - assert_(isinstance(delete(a, slice(1,2)), SubClass)) - assert_(isinstance(delete(a, slice(1,-2)), SubClass)) + assert_(isinstance(delete(a, [0, 1]), SubClass)) + assert_(isinstance(delete(a, slice(1, 2)), SubClass)) + assert_(isinstance(delete(a, slice(1, -2)), SubClass)) class TestGradient(TestCase): def test_basic(self): @@ -598,7 +598,7 @@ class TestVectorize(TestCase): while _p: res = res*x + _p.pop(0) return res - vpolyval = np.vectorize(mypolyval, excluded=['p',1]) + vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) ans = [3, 6] assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) @@ -776,18 +776,18 @@ class TestTrapz(TestCase): wz[0] /= 2 wz[-1] /= 2 - q = x[:, None, None] + y[None, :, None] + z[None, None, :] + q = x[:, None, None] + y[None,:, None] + z[None, None,:] qx = (q * wx[:, None, None]).sum(axis=0) - qy = (q * wy[None, :, None]).sum(axis=1) - qz = (q * wz[None, None, :]).sum(axis=2) + qy = (q * wy[None,:, None]).sum(axis=1) + qz = (q * wz[None, None,:]).sum(axis=2) # n-d `x` r = trapz(q, x=x[:, None, None], axis=0) assert_almost_equal(r, qx) - r = trapz(q, x=y[None, :, None], axis=1) + r = trapz(q, x=y[None,:, None], axis=1) assert_almost_equal(r, qy) - r = trapz(q, x=z[None, None, :], axis=2) + r = trapz(q, x=z[None, None,:], axis=2) assert_almost_equal(r, qz) # 1-d `x` @@ -1052,7 +1052,7 @@ class TestHistogramdd(TestCase): def test_identical_samples(self): x = np.zeros((10, 2), int) hist, edges = histogramdd(x, bins=2) - assert_array_equal(edges[0], np.array([-0.5, 0. , 0.5])) + assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) def test_empty(self): a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) @@ -1114,23 +1114,23 @@ class TestCheckFinite(TestCase): class TestCorrCoef(TestCase): A = np.array([[ 0.15391142, 0.18045767, 0.14197213], [ 0.70461506, 0.96474128, 0.27906989], - [ 0.9297531 , 0.32296769, 0.19267156]]) - B = np.array([[ 0.10377691, 0.5417086 , 0.49807457], + [ 0.9297531, 0.32296769, 0.19267156]]) + B = np.array([[ 0.10377691, 0.5417086, 0.49807457], [ 0.82872117, 0.77801674, 0.39226705], - [ 0.9314666 , 0.66800209, 0.03538394]]) - res1 = np.array([[ 1. , 0.9379533 , -0.04931983], - [ 0.9379533 , 1. , 0.30007991], + [ 0.9314666, 0.66800209, 0.03538394]]) + res1 = np.array([[ 1., 0.9379533, -0.04931983], + [ 0.9379533, 1., 0.30007991], [-0.04931983, 0.30007991, 1. ]]) - res2 = np.array([[ 1. , 0.9379533 , -0.04931983, + res2 = np.array([[ 1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], - [ 0.9379533 , 1. , 0.30007991, + [ 0.9379533, 1., 0.30007991, - 0.04781421, 0.88157256, 0.78052386], - [-0.04931983, 0.30007991, 1. , + [-0.04931983, 0.30007991, 1., - 0.96717111, 0.71483595, 0.83053601], [ 0.30151751, -0.04781421, -0.96717111, - 1. , -0.51366032, -0.66173113], + 1., -0.51366032, -0.66173113], [ 0.66318558, 0.88157256, 0.71483595, - - 0.51366032, 1. , 0.98317823], + - 0.51366032, 1., 0.98317823], [ 0.51532523, 0.78052386, 0.83053601, - 0.66173113, 0.98317823, 1. ]]) @@ -1160,10 +1160,10 @@ class TestCov(TestCase): class Test_I0(TestCase): def test_simple(self): assert_almost_equal(i0(0.5), np.array(1.0634833707413234)) - A = np.array([ 0.49842636, 0.6969809 , 0.22011976, 0.0155549]) + A = np.array([ 0.49842636, 0.6969809, 0.22011976, 0.0155549]) assert_almost_equal(i0(A), np.array([ 1.06307822, 1.12518299, 1.01214991, 1.00006049])) - B = np.array([[ 0.827002 , 0.99959078], + B = np.array([[ 0.827002, 0.99959078], [ 0.89694769, 0.39298162], [ 0.37954418, 0.05206293], [ 0.36465447, 0.72446427], @@ -1173,7 +1173,7 @@ class Test_I0(TestCase): [ 1.21147086, 1.0389829 ], [ 1.03633899, 1.00067775], [ 1.03352052, 1.13557954], - [ 1.0588429 , 1.06432317]])) + [ 1.0588429, 1.06432317]])) class TestKaiser(TestCase): @@ -1182,10 +1182,10 @@ class TestKaiser(TestCase): assert_(np.isfinite(kaiser(1, 1.0))) assert_almost_equal(kaiser(2, 1.0), np.array([ 0.78984831, 0.78984831])) assert_almost_equal(kaiser(5, 1.0), - np.array([ 0.78984831, 0.94503323, 1. , + np.array([ 0.78984831, 0.94503323, 1., 0.94503323, 0.78984831])) assert_almost_equal(kaiser(5, 1.56789), - np.array([ 0.58285404, 0.88409679, 1. , + np.array([ 0.58285404, 0.88409679, 1., 0.88409679, 0.58285404])) def test_int_beta(self): diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index 32b3c7036..9002331ce 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -8,42 +8,42 @@ from numpy import ( array, ones, r_, mgrid, unravel_index, zeros, where, class TestRavelUnravelIndex(TestCase): def test_basic(self): - assert_equal(np.unravel_index(2,(2,2)), (1,0)) - assert_equal(np.ravel_multi_index((1,0),(2,2)), 2) - assert_equal(np.unravel_index(254,(17,94)), (2,66)) - assert_equal(np.ravel_multi_index((2,66),(17,94)), 254) - assert_raises(ValueError, np.unravel_index, -1, (2,2)) - assert_raises(TypeError, np.unravel_index, 0.5, (2,2)) - assert_raises(ValueError, np.unravel_index, 4, (2,2)) - assert_raises(ValueError, np.ravel_multi_index, (-3,1), (2,2)) - assert_raises(ValueError, np.ravel_multi_index, (2,1), (2,2)) - assert_raises(ValueError, np.ravel_multi_index, (0,-3), (2,2)) - assert_raises(ValueError, np.ravel_multi_index, (0,2), (2,2)) - assert_raises(TypeError, np.ravel_multi_index, (0.1,0.), (2,2)) - - assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4,3,6)), [2,1,4]) - assert_equal(np.ravel_multi_index([2,1,4], (4,3,6)), (2*3 + 1)*6 + 4) - - arr = np.array([[3,6,6],[4,5,1]]) - assert_equal(np.ravel_multi_index(arr, (7,6)), [22,41,37]) - assert_equal(np.ravel_multi_index(arr, (7,6), order='F'), [31,41,13]) - assert_equal(np.ravel_multi_index(arr, (4,6), mode='clip'), [22,23,19]) - assert_equal(np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')), - [12,13,13]) - assert_equal(np.ravel_multi_index((3,1,4,1), (6,7,8,9)), 1621) - - assert_equal(np.unravel_index(np.array([22, 41, 37]), (7,6)), - [[3, 6, 6],[4, 5, 1]]) - assert_equal(np.unravel_index(np.array([31, 41, 13]), (7,6), order='F'), + assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) + assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) + assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) + assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) + assert_raises(ValueError, np.unravel_index, -1, (2, 2)) + assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) + assert_raises(ValueError, np.unravel_index, 4, (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) + assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) + + assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) + assert_equal(np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) + + arr = np.array([[3, 6, 6], [4, 5, 1]]) + assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) + assert_equal(np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) + assert_equal(np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) + assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), + [12, 13, 13]) + assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) + + assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), [[3, 6, 6], [4, 5, 1]]) - assert_equal(np.unravel_index(1621, (6,7,8,9)), [3,1,4,1]) + assert_equal(np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), + [[3, 6, 6], [4, 5, 1]]) + assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) def test_dtypes(self): # Test with different data types for dtype in [np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64]: - coords = np.array([[1,0,1,2,3,4],[1,6,1,3,2,0]], dtype=dtype) - shape = (5,8) + coords = np.array([[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) + shape = (5, 8) uncoords = 8*coords[0]+coords[1] assert_equal(np.ravel_multi_index(coords, shape), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape)) @@ -51,9 +51,9 @@ class TestRavelUnravelIndex(TestCase): assert_equal(np.ravel_multi_index(coords, shape, order='F'), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) - coords = np.array([[1,0,1,2,3,4],[1,6,1,3,2,0],[1,3,1,0,9,5]], + coords = np.array([[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], dtype=dtype) - shape = (5,8,10) + shape = (5, 8, 10) uncoords = 10*(8*coords[0]+coords[1])+coords[2] assert_equal(np.ravel_multi_index(coords, shape), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape)) @@ -63,12 +63,12 @@ class TestRavelUnravelIndex(TestCase): def test_clipmodes(self): # Test clipmodes - assert_equal(np.ravel_multi_index([5,1,-1,2], (4,3,7,12), mode='wrap'), - np.ravel_multi_index([1,1,6,2], (4,3,7,12))) - assert_equal(np.ravel_multi_index([5,1,-1,2], (4,3,7,12), - mode=('wrap','raise','clip','raise')), - np.ravel_multi_index([1,1,0,2], (4,3,7,12))) - assert_raises(ValueError, np.ravel_multi_index, [5,1,-1,2], (4,3,7,12)) + assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), + np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) + assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), + mode=('wrap', 'raise', 'clip', 'raise')), + np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) + assert_raises(ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) class TestGrid(TestCase): def test_basic(self): @@ -77,63 +77,63 @@ class TestGrid(TestCase): assert_(a.shape == (10,)) assert_(b.shape == (20,)) assert_(a[0] == -1) - assert_almost_equal(a[-1],1) + assert_almost_equal(a[-1], 1) assert_(b[0] == -1) - assert_almost_equal(b[1]-b[0],0.1,11) - assert_almost_equal(b[-1],b[0]+19*0.1,11) - assert_almost_equal(a[1]-a[0],2.0/9.0,11) + assert_almost_equal(b[1]-b[0], 0.1, 11) + assert_almost_equal(b[-1], b[0]+19*0.1, 11) + assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) def test_linspace_equivalence(self): - y,st = np.linspace(2,10,retstep=1) - assert_almost_equal(st,8/49.0) - assert_array_almost_equal(y,mgrid[2:10:50j],13) + y, st = np.linspace(2, 10, retstep=1) + assert_almost_equal(st, 8/49.0) + assert_array_almost_equal(y, mgrid[2:10:50j], 13) def test_nd(self): - c = mgrid[-1:1:10j,-2:2:10j] - d = mgrid[-1:1:0.1,-2:2:0.2] - assert_(c.shape == (2,10,10)) - assert_(d.shape == (2,20,20)) - assert_array_equal(c[0][0,:],-ones(10,'d')) - assert_array_equal(c[1][:,0],-2*ones(10,'d')) - assert_array_almost_equal(c[0][-1,:],ones(10,'d'),11) - assert_array_almost_equal(c[1][:,-1],2*ones(10,'d'),11) - assert_array_almost_equal(d[0,1,:]-d[0,0,:], 0.1*ones(20,'d'),11) - assert_array_almost_equal(d[1,:,1]-d[1,:,0], 0.2*ones(20,'d'),11) + c = mgrid[-1:1:10j, -2:2:10j] + d = mgrid[-1:1:0.1, -2:2:0.2] + assert_(c.shape == (2, 10, 10)) + assert_(d.shape == (2, 20, 20)) + assert_array_equal(c[0][0,:], -ones(10, 'd')) + assert_array_equal(c[1][:, 0], -2*ones(10, 'd')) + assert_array_almost_equal(c[0][-1,:], ones(10, 'd'), 11) + assert_array_almost_equal(c[1][:, -1], 2*ones(10, 'd'), 11) + assert_array_almost_equal(d[0, 1,:]-d[0, 0,:], 0.1*ones(20, 'd'), 11) + assert_array_almost_equal(d[1,:, 1]-d[1,:, 0], 0.2*ones(20, 'd'), 11) class TestConcatenator(TestCase): def test_1d(self): - assert_array_equal(r_[1,2,3,4,5,6],array([1,2,3,4,5,6])) + assert_array_equal(r_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) b = ones(5) - c = r_[b,0,0,b] - assert_array_equal(c,[1,1,1,1,1,0,0,1,1,1,1,1]) + c = r_[b, 0, 0, b] + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) def test_mixed_type(self): g = r_[10.1, 1:10] assert_(g.dtype == 'f8') def test_more_mixed_type(self): - g = r_[-10.1, array([1]), array([2,3,4]), 10.0] + g = r_[-10.1, array([1]), array([2, 3, 4]), 10.0] assert_(g.dtype == 'f8') def test_2d(self): - b = rand(5,5) - c = rand(5,5) - d = r_['1',b,c] # append columns - assert_(d.shape == (5,10)) - assert_array_equal(d[:,:5],b) - assert_array_equal(d[:,5:],c) - d = r_[b,c] - assert_(d.shape == (10,5)) - assert_array_equal(d[:5,:],b) - assert_array_equal(d[5:,:],c) + b = rand(5, 5) + c = rand(5, 5) + d = r_['1', b, c] # append columns + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b) + assert_array_equal(d[:, 5:], c) + d = r_[b, c] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5,:], b) + assert_array_equal(d[5:,:], c) class TestNdenumerate(TestCase): def test_basic(self): - a = array([[1,2], [3,4]]) + a = array([[1, 2], [3, 4]]) assert_equal(list(ndenumerate(a)), - [((0,0), 1), ((0,1), 2), ((1,0), 3), ((1,1), 4)]) + [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) class TestIndexExpression(TestCase): @@ -144,17 +144,17 @@ class TestIndexExpression(TestCase): assert_equal(a[:-1], a[index_exp[:-1]]) def test_simple_1(self): - a = np.random.rand(4,5,6) + a = np.random.rand(4, 5, 6) - assert_equal(a[:,:3,[1,2]], a[index_exp[:,:3,[1,2]]]) - assert_equal(a[:,:3,[1,2]], a[s_[:,:3,[1,2]]]) + assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) + assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) def test_c_(): - a = np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] + a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) def test_fill_diagonal(): - a = zeros((3, 3),int) + a = zeros((3, 3), int) fill_diagonal(a, 5) yield (assert_array_equal, a, array([[5, 0, 0], @@ -162,7 +162,7 @@ def test_fill_diagonal(): [0, 0, 5]])) #Test tall matrix - a = zeros((10, 3),int) + a = zeros((10, 3), int) fill_diagonal(a, 5) yield (assert_array_equal, a, array([[5, 0, 0], @@ -177,7 +177,7 @@ def test_fill_diagonal(): [0, 0, 0]])) #Test tall matrix wrap - a = zeros((10, 3),int) + a = zeros((10, 3), int) fill_diagonal(a, 5, True) yield (assert_array_equal, a, array([[5, 0, 0], @@ -192,7 +192,7 @@ def test_fill_diagonal(): [0, 5, 0]])) #Test wide matrix - a = zeros((3, 10),int) + a = zeros((3, 10), int) fill_diagonal(a, 5) yield (assert_array_equal, a, array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], @@ -223,7 +223,7 @@ def test_diag_indices(): d3 = diag_indices(2, 3) # And use it to set the diagonal of a zeros array to 1: - a = zeros((2, 2, 2),int) + a = zeros((2, 2, 2), int) a[d3] = 1 yield (assert_array_equal, a, array([[[1, 0], @@ -256,7 +256,7 @@ def test_ndindex(): assert_equal(x, [()]) x = list(np.ndindex(())) - assert_equal(x, [()]) + assert_equal(x, [()]) if __name__ == "__main__": diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 4095dd813..af06cd45e 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -583,7 +583,7 @@ class TestLoadTxt(TestCase): ('block', int, (2, 2, 3))]) x = np.loadtxt(c, dtype=dt) a = np.array([('aaaa', 1.0, 8.0, - [[[1, 2, 3], [4, 5, 6]],[[7, 8, 9], [10, 11, 12]]])], + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], dtype=dt) assert_array_equal(x, a) @@ -1554,7 +1554,7 @@ M 33 21.99 def test_gft_using_filename(self): # Test that we can load data from a filename as well as a file object - wanted = np.arange(6).reshape((2,3)) + wanted = np.arange(6).reshape((2, 3)) if sys.version_info[0] >= 3: # python 3k is known to fail for '\r' linesep = ('\n', '\r\n') diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 93a5ef855..292ffdf7a 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -17,7 +17,7 @@ _ndat = np.array( [[ 0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], [ 0.5351, 0.9403, np.nan, 0.2100, 0.4759, 0.2833], [ np.nan, np.nan, np.nan, 0.1042, np.nan, 0.5954], - [ 0.161 , np.nan, np.nan, 0.1859, 0.3146, np.nan]] + [ 0.161, np.nan, np.nan, 0.1859, 0.3146, np.nan]] ) # rows of _ndat with nans removed @@ -131,7 +131,7 @@ class TestNanFunctions_ArgminArgmax(TestCase): assert_(issubclass(w[0].category, NanWarning)) def test_empty(self): - mat = np.zeros((0,3)) + mat = np.zeros((0, 3)) for f in self.nanfuncs: for axis in [0, None]: assert_raises(ValueError, f, mat, axis=axis) @@ -253,7 +253,7 @@ class TestNanFunctions_Sum(TestCase): assert_(len(w) == 0, 'warning raised') def test_empty(self): - mat = np.zeros((0,3)) + mat = np.zeros((0, 3)) if np.__version__[:3] < '1.9': tgt = [np.nan]*3 res = nansum(mat, axis=0) @@ -398,7 +398,7 @@ class TestNanFunctions_MeanVarStd(TestCase): assert_(issubclass(w[0].category, NanWarning)) def test_empty(self): - mat = np.zeros((0,3)) + mat = np.zeros((0, 3)) for f in self.nanfuncs: for axis in [0, None]: with warnings.catch_warnings(record=True) as w: diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index fa166b7f1..617419eee 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -86,29 +86,29 @@ class TestDocs(TestCase): return rundocs() def test_roots(self): - assert_array_equal(np.roots([1,0,0]), [0,0]) + assert_array_equal(np.roots([1, 0, 0]), [0, 0]) def test_str_leading_zeros(self): - p = np.poly1d([4,3,2,1]) + p = np.poly1d([4, 3, 2, 1]) p[3] = 0 assert_equal(str(p), " 2\n" "3 x + 2 x + 1") - p = np.poly1d([1,2]) + p = np.poly1d([1, 2]) p[0] = 0 p[1] = 0 assert_equal(str(p), " \n0") def test_polyfit(self) : c = np.array([3., 2., 1.]) - x = np.linspace(0,2,7) - y = np.polyval(c,x) - err = [1,-1,1,-1,1,-1,1] - weights = np.arange(8,1,-1)**2/7.0 + x = np.linspace(0, 2, 7) + y = np.polyval(c, x) + err = [1, -1, 1, -1, 1, -1, 1] + weights = np.arange(8, 1, -1)**2/7.0 # check 1D case - m, cov = np.polyfit(x,y+err,2,cov=True) + m, cov = np.polyfit(x, y+err, 2, cov=True) est = [3.8571, 0.2857, 1.619] assert_almost_equal(est, m, decimal=4) val0 = [[2.9388, -5.8776, 1.6327], @@ -116,7 +116,7 @@ class TestDocs(TestCase): [1.6327, -4.2449, 2.3220]] assert_almost_equal(val0, cov, decimal=4) - m2, cov2 = np.polyfit(x,y+err,2,w=weights,cov=True) + m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) val = [[ 8.7929, -10.0103, 0.9756], [-10.0103, 13.6134, -1.8178], @@ -124,19 +124,19 @@ class TestDocs(TestCase): assert_almost_equal(val, cov2, decimal=4) # check 2D (n,1) case - y = y[:,np.newaxis] - c = c[:,np.newaxis] - assert_almost_equal(c, np.polyfit(x,y,2)) + y = y[:, np.newaxis] + c = c[:, np.newaxis] + assert_almost_equal(c, np.polyfit(x, y, 2)) # check 2D (n,2) case - yy = np.concatenate((y,y), axis=1) - cc = np.concatenate((c,c), axis=1) - assert_almost_equal(cc, np.polyfit(x,yy,2)) + yy = np.concatenate((y, y), axis=1) + cc = np.concatenate((c, c), axis=1) + assert_almost_equal(cc, np.polyfit(x, yy, 2)) - m, cov = np.polyfit(x,yy + np.array(err)[:,np.newaxis],2,cov=True) - assert_almost_equal(est, m[:,0], decimal=4) - assert_almost_equal(est, m[:,1], decimal=4) - assert_almost_equal(val0, cov[:,:,0], decimal=4) - assert_almost_equal(val0, cov[:,:,1], decimal=4) + m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) + assert_almost_equal(est, m[:, 0], decimal=4) + assert_almost_equal(est, m[:, 1], decimal=4) + assert_almost_equal(val0, cov[:,:, 0], decimal=4) + assert_almost_equal(val0, cov[:,:, 1], decimal=4) def test_objects(self): from decimal import Decimal @@ -153,14 +153,14 @@ class TestDocs(TestCase): def test_complex(self): p = np.poly1d([3j, 2j, 1j]) p2 = p.integ() - assert_((p2.coeffs == [1j,1j,1j,0]).all()) + assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) p2 = p.deriv() - assert_((p2.coeffs == [6j,2j]).all()) + assert_((p2.coeffs == [6j, 2j]).all()) def test_integ_coeffs(self): - p = np.poly1d([3,2,1]) - p2 = p.integ(3, k=[9,7,6]) - assert_((p2.coeffs == [1/4./5.,1/3./4.,1/2./3.,9/1./2.,7,6]).all()) + p = np.poly1d([3, 2, 1]) + p2 = p.integ(3, k=[9, 7, 6]) + assert_((p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) def test_zero_dims(self): try: diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index ef22ca413..b175bcb64 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -660,13 +660,13 @@ class TestJoinBy2(TestCase): assert_equal(test, control) def test_two_keys_two_vars(self): - a = np.array(list(zip(np.tile([10,11],5),np.repeat(np.arange(5),2), - np.arange(50, 60), np.arange(10,20))), - dtype=[('k', int), ('a', int), ('b', int),('c',int)]) + a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), + np.arange(50, 60), np.arange(10, 20))), + dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) - b = np.array(list(zip(np.tile([10,11],5),np.repeat(np.arange(5),2), - np.arange(65, 75), np.arange(0,10))), - dtype=[('k', int), ('a', int), ('b', int), ('c',int)]) + b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2), + np.arange(65, 75), np.arange(0, 10))), + dtype=[('k', int), ('a', int), ('b', int), ('c', int)]) control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1), (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3), @@ -675,7 +675,7 @@ class TestJoinBy2(TestCase): (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)], dtype=[('k', int), ('a', int), ('b1', int), ('b2', int), ('c1', int), ('c2', int)]) - test = join_by(['a','k'], a, b, r1postfix='1', r2postfix='2', jointype='inner') + test = join_by(['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner') assert_equal(test.dtype, control.dtype) assert_equal(test, control) diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index 1e9bacdf5..67808bd39 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -12,22 +12,22 @@ rlevel = 1 class TestRegression(TestCase): def test_poly1d(self,level=rlevel): """Ticket #28""" - assert_equal(np.poly1d([1]) - np.poly1d([1,0]), - np.poly1d([-1,1])) + assert_equal(np.poly1d([1]) - np.poly1d([1, 0]), + np.poly1d([-1, 1])) def test_cov_parameters(self,level=rlevel): """Ticket #91""" - x = np.random.random((3,3)) + x = np.random.random((3, 3)) y = x.copy() np.cov(x, rowvar=1) np.cov(y, rowvar=0) - assert_array_equal(x,y) + assert_array_equal(x, y) def test_mem_digitize(self,level=rlevel): """Ticket #95""" for i in range(100): - np.digitize([1,2,3,4],[1,3]) - np.digitize([0,1,2,3,4],[1,3]) + np.digitize([1, 2, 3, 4], [1, 3]) + np.digitize([0, 1, 2, 3, 4], [1, 3]) def test_unique_zero_sized(self,level=rlevel): """Ticket #205""" @@ -36,51 +36,51 @@ class TestRegression(TestCase): def test_mem_vectorise(self, level=rlevel): """Ticket #325""" vt = np.vectorize(lambda *args: args) - vt(np.zeros((1,2,1)), np.zeros((2,1,1)), np.zeros((1,1,2))) - vt(np.zeros((1,2,1)), np.zeros((2,1,1)), np.zeros((1,1,2)), np.zeros((2,2))) + vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2))) + vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)), np.zeros((2, 2))) def test_mgrid_single_element(self, level=rlevel): """Ticket #339""" - assert_array_equal(np.mgrid[0:0:1j],[0]) - assert_array_equal(np.mgrid[0:0],[]) + assert_array_equal(np.mgrid[0:0:1j], [0]) + assert_array_equal(np.mgrid[0:0], []) def test_refcount_vectorize(self, level=rlevel): """Ticket #378""" - def p(x,y): return 123 + def p(x, y): return 123 v = np.vectorize(p) _assert_valid_refcount(v) def test_poly1d_nan_roots(self, level=rlevel): """Ticket #396""" - p = np.poly1d([np.nan,np.nan,1], r=0) - self.assertRaises(np.linalg.LinAlgError,getattr,p,"r") + p = np.poly1d([np.nan, np.nan, 1], r=0) + self.assertRaises(np.linalg.LinAlgError, getattr, p, "r") def test_mem_polymul(self, level=rlevel): """Ticket #448""" - np.polymul([],[1.]) + np.polymul([], [1.]) def test_mem_string_concat(self, level=rlevel): """Ticket #469""" x = np.array([]) - np.append(x,'asdasd\tasdasd') + np.append(x, 'asdasd\tasdasd') def test_poly_div(self, level=rlevel): """Ticket #553""" - u = np.poly1d([1,2,3]) - v = np.poly1d([1,2,3,4,5]) - q,r = np.polydiv(u,v) + u = np.poly1d([1, 2, 3]) + v = np.poly1d([1, 2, 3, 4, 5]) + q, r = np.polydiv(u, v) assert_equal(q*v + r, u) def test_poly_eq(self, level=rlevel): """Ticket #554""" - x = np.poly1d([1,2,3]) - y = np.poly1d([3,4]) + x = np.poly1d([1, 2, 3]) + y = np.poly1d([3, 4]) assert_(x != y) assert_(x == x) def test_mem_insert(self, level=rlevel): """Ticket #572""" - np.lib.place(1,1,1) + np.lib.place(1, 1, 1) def test_polyfit_build(self): """Ticket #628""" @@ -108,16 +108,16 @@ class TestRegression(TestCase): """Make polydiv work for complex types""" msg = "Wrong type, should be complex" x = np.ones(3, dtype=np.complex) - q,r = np.polydiv(x,x) + q, r = np.polydiv(x, x) assert_(q.dtype == np.complex, msg) msg = "Wrong type, should be float" x = np.ones(3, dtype=np.int) - q,r = np.polydiv(x,x) + q, r = np.polydiv(x, x) assert_(q.dtype == np.float, msg) def test_histogramdd_too_many_bins(self) : """Ticket 928.""" - assert_raises(ValueError, np.histogramdd, np.ones((1,10)), bins=2**10) + assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10) def test_polyint_type(self) : """Ticket #944""" @@ -144,20 +144,20 @@ class TestRegression(TestCase): def dp(): n = 3 a = np.ones((n,)*5) - i = np.random.randint(0,n,size=thesize) - a[np.ix_(i,i,i,i,i)] = 0 + i = np.random.randint(0, n, size=thesize) + a[np.ix_(i, i, i, i, i)] = 0 def dp2(): n = 3 a = np.ones((n,)*5) - i = np.random.randint(0,n,size=thesize) - g = a[np.ix_(i,i,i,i,i)] + i = np.random.randint(0, n, size=thesize) + g = a[np.ix_(i, i, i, i, i)] self.assertRaises(ValueError, dp) self.assertRaises(ValueError, dp2) def test_void_coercion(self, level=rlevel): - dt = np.dtype([('a','f4'),('b','i4')]) - x = np.zeros((1,),dt) - assert_(np.r_[x,x].dtype == dt) + dt = np.dtype([('a', 'f4'), ('b', 'i4')]) + x = np.zeros((1,), dt) + assert_(np.r_[x, x].dtype == dt) def test_who_with_0dim_array(self, level=rlevel) : """ticket #1243""" @@ -194,9 +194,9 @@ class TestRegression(TestCase): """Ticket #1676""" from numpy.lib.recfunctions import append_fields F = False - base = np.array([1,2,3], dtype=np.int32) + base = np.array([1, 2, 3], dtype=np.int32) data = np.eye(3).astype(np.int32) - names = ['a','b','c'] + names = ['a', 'b', 'c'] dlist = [np.float64, np.int32, np.int32] try: a = append_fields(base, names, data, dlist) @@ -214,17 +214,17 @@ class TestRegression(TestCase): x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) - dt = [("a", [("a", 'u1', (1,3)), ("b", 'u1')])] + dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])] x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) - assert_equal(x, np.array([(((0,1,2), 3),)], dtype=dt)) + assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt)) - dt = [("a", 'u1', (2,2))] + dt = [("a", 'u1', (2, 2))] x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt) assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt)) - dt = [("a", 'u1', (2,3,2))] + dt = [("a", 'u1', (2, 3, 2))] x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt) - data = [((((0,1), (2,3), (4,5)), ((6,7), (8,9), (10,11))),)] + data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)] assert_equal(x, np.array(data, dtype=dt)) def test_nansum_with_boolean(self): diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py index a92ddde83..157e1beb3 100644 --- a/numpy/lib/tests/test_shape_base.py +++ b/numpy/lib/tests/test_shape_base.py @@ -7,133 +7,133 @@ from numpy import matrix, asmatrix class TestApplyAlongAxis(TestCase): def test_simple(self): - a = ones((20,10),'d') - assert_array_equal(apply_along_axis(len,0,a),len(a)*ones(shape(a)[1])) + a = ones((20, 10), 'd') + assert_array_equal(apply_along_axis(len, 0, a), len(a)*ones(shape(a)[1])) def test_simple101(self,level=11): - a = ones((10,101),'d') - assert_array_equal(apply_along_axis(len,0,a),len(a)*ones(shape(a)[1])) + a = ones((10, 101), 'd') + assert_array_equal(apply_along_axis(len, 0, a), len(a)*ones(shape(a)[1])) def test_3d(self): - a = arange(27).reshape((3,3,3)) - assert_array_equal(apply_along_axis(sum,0,a), - [[27,30,33],[36,39,42],[45,48,51]]) + a = arange(27).reshape((3, 3, 3)) + assert_array_equal(apply_along_axis(sum, 0, a), + [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) class TestApplyOverAxes(TestCase): def test_simple(self): - a = arange(24).reshape(2,3,4) - aoa_a = apply_over_axes(sum, a, [0,2]) - assert_array_equal(aoa_a, array([[[60],[92],[124]]])) + a = arange(24).reshape(2, 3, 4) + aoa_a = apply_over_axes(sum, a, [0, 2]) + assert_array_equal(aoa_a, array([[[60], [92], [124]]])) class TestArraySplit(TestCase): def test_integer_0_split(self): a = arange(10) try: - res = array_split(a,0) + res = array_split(a, 0) assert_(0) # it should have thrown a value error except ValueError: pass def test_integer_split(self): a = arange(10) - res = array_split(a,1) + res = array_split(a, 1) desired = [arange(10)] - compare_results(res,desired) - - res = array_split(a,2) - desired = [arange(5),arange(5,10)] - compare_results(res,desired) - - res = array_split(a,3) - desired = [arange(4),arange(4,7),arange(7,10)] - compare_results(res,desired) - - res = array_split(a,4) - desired = [arange(3),arange(3,6),arange(6,8),arange(8,10)] - compare_results(res,desired) - - res = array_split(a,5) - desired = [arange(2),arange(2,4),arange(4,6),arange(6,8),arange(8,10)] - compare_results(res,desired) - - res = array_split(a,6) - desired = [arange(2),arange(2,4),arange(4,6),arange(6,8),arange(8,9), - arange(9,10)] - compare_results(res,desired) - - res = array_split(a,7) - desired = [arange(2),arange(2,4),arange(4,6),arange(6,7),arange(7,8), - arange(8,9), arange(9,10)] - compare_results(res,desired) - - res = array_split(a,8) - desired = [arange(2),arange(2,4),arange(4,5),arange(5,6),arange(6,7), - arange(7,8), arange(8,9), arange(9,10)] - compare_results(res,desired) - - res = array_split(a,9) - desired = [arange(2),arange(2,3),arange(3,4),arange(4,5),arange(5,6), - arange(6,7), arange(7,8), arange(8,9), arange(9,10)] - compare_results(res,desired) - - res = array_split(a,10) - desired = [arange(1),arange(1,2),arange(2,3),arange(3,4), - arange(4,5),arange(5,6), arange(6,7), arange(7,8), - arange(8,9), arange(9,10)] - compare_results(res,desired) - - res = array_split(a,11) - desired = [arange(1),arange(1,2),arange(2,3),arange(3,4), - arange(4,5),arange(5,6), arange(6,7), arange(7,8), - arange(8,9), arange(9,10),array([])] - compare_results(res,desired) + compare_results(res, desired) + + res = array_split(a, 2) + desired = [arange(5), arange(5, 10)] + compare_results(res, desired) + + res = array_split(a, 3) + desired = [arange(4), arange(4, 7), arange(7, 10)] + compare_results(res, desired) + + res = array_split(a, 4) + desired = [arange(3), arange(3, 6), arange(6, 8), arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 5) + desired = [arange(2), arange(2, 4), arange(4, 6), arange(6, 8), arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 6) + desired = [arange(2), arange(2, 4), arange(4, 6), arange(6, 8), arange(8, 9), + arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 7) + desired = [arange(2), arange(2, 4), arange(4, 6), arange(6, 7), arange(7, 8), + arange(8, 9), arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 8) + desired = [arange(2), arange(2, 4), arange(4, 5), arange(5, 6), arange(6, 7), + arange(7, 8), arange(8, 9), arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 9) + desired = [arange(2), arange(2, 3), arange(3, 4), arange(4, 5), arange(5, 6), + arange(6, 7), arange(7, 8), arange(8, 9), arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 10) + desired = [arange(1), arange(1, 2), arange(2, 3), arange(3, 4), + arange(4, 5), arange(5, 6), arange(6, 7), arange(7, 8), + arange(8, 9), arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 11) + desired = [arange(1), arange(1, 2), arange(2, 3), arange(3, 4), + arange(4, 5), arange(5, 6), arange(6, 7), arange(7, 8), + arange(8, 9), arange(9, 10), array([])] + compare_results(res, desired) def test_integer_split_2D_rows(self): - a = array([arange(10),arange(10)]) - res = array_split(a,3,axis=0) - desired = [array([arange(10)]),array([arange(10)]),array([])] - compare_results(res,desired) + a = array([arange(10), arange(10)]) + res = array_split(a, 3, axis=0) + desired = [array([arange(10)]), array([arange(10)]), array([])] + compare_results(res, desired) def test_integer_split_2D_cols(self): - a = array([arange(10),arange(10)]) - res = array_split(a,3,axis=-1) - desired = [array([arange(4),arange(4)]), - array([arange(4,7),arange(4,7)]), - array([arange(7,10),arange(7,10)])] - compare_results(res,desired) + a = array([arange(10), arange(10)]) + res = array_split(a, 3, axis=-1) + desired = [array([arange(4), arange(4)]), + array([arange(4, 7), arange(4, 7)]), + array([arange(7, 10), arange(7, 10)])] + compare_results(res, desired) def test_integer_split_2D_default(self): """ This will fail if we change default axis """ - a = array([arange(10),arange(10)]) - res = array_split(a,3) - desired = [array([arange(10)]),array([arange(10)]),array([])] - compare_results(res,desired) + a = array([arange(10), arange(10)]) + res = array_split(a, 3) + desired = [array([arange(10)]), array([arange(10)]), array([])] + compare_results(res, desired) #perhaps should check higher dimensions def test_index_split_simple(self): a = arange(10) - indices = [1,5,7] - res = array_split(a,indices,axis=-1) - desired = [arange(0,1),arange(1,5),arange(5,7),arange(7,10)] - compare_results(res,desired) + indices = [1, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [arange(0, 1), arange(1, 5), arange(5, 7), arange(7, 10)] + compare_results(res, desired) def test_index_split_low_bound(self): a = arange(10) - indices = [0,5,7] - res = array_split(a,indices,axis=-1) - desired = [array([]),arange(0,5),arange(5,7),arange(7,10)] - compare_results(res,desired) + indices = [0, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [array([]), arange(0, 5), arange(5, 7), arange(7, 10)] + compare_results(res, desired) def test_index_split_high_bound(self): a = arange(10) - indices = [0,5,7,10,12] - res = array_split(a,indices,axis=-1) - desired = [array([]),arange(0,5),arange(5,7),arange(7,10), - array([]),array([])] - compare_results(res,desired) + indices = [0, 5, 7, 10, 12] + res = array_split(a, indices, axis=-1) + desired = [array([]), arange(0, 5), arange(5, 7), arange(7, 10), + array([]), array([])] + compare_results(res, desired) class TestSplit(TestCase): @@ -143,14 +143,14 @@ class TestSplit(TestCase): *""" def test_equal_split(self): a = arange(10) - res = split(a,2) - desired = [arange(5),arange(5,10)] - compare_results(res,desired) + res = split(a, 2) + desired = [arange(5), arange(5, 10)] + compare_results(res, desired) def test_unequal_split(self): a = arange(10) try: - res = split(a,3) + res = split(a, 3) assert_(0) # should raise an error except ValueError: pass @@ -159,27 +159,27 @@ class TestSplit(TestCase): class TestDstack(TestCase): def test_0D_array(self): a = array(1); b = array(2); - res=dstack([a,b]) - desired = array([[[1,2]]]) - assert_array_equal(res,desired) + res=dstack([a, b]) + desired = array([[[1, 2]]]) + assert_array_equal(res, desired) def test_1D_array(self): a = array([1]); b = array([2]); - res=dstack([a,b]) - desired = array([[[1,2]]]) - assert_array_equal(res,desired) + res=dstack([a, b]) + desired = array([[[1, 2]]]) + assert_array_equal(res, desired) def test_2D_array(self): - a = array([[1],[2]]); b = array([[1],[2]]); - res=dstack([a,b]) - desired = array([[[1,1]],[[2,2,]]]) - assert_array_equal(res,desired) + a = array([[1], [2]]); b = array([[1], [2]]); + res=dstack([a, b]) + desired = array([[[1, 1]], [[2, 2,]]]) + assert_array_equal(res, desired) def test_2D_array2(self): - a = array([1,2]); b = array([1,2]); - res=dstack([a,b]) - desired = array([[[1,1],[2,2]]]) - assert_array_equal(res,desired) + a = array([1, 2]); b = array([1, 2]); + res=dstack([a, b]) + desired = array([[[1, 1], [2, 2]]]) + assert_array_equal(res, desired) """ array_split has more comprehensive test of splitting. only do simple test on hsplit, vsplit, and dsplit @@ -190,75 +190,75 @@ class TestHsplit(TestCase): def test_0D_array(self): a= array(1) try: - hsplit(a,2) + hsplit(a, 2) assert_(0) except ValueError: pass def test_1D_array(self): - a= array([1,2,3,4]) - res = hsplit(a,2) - desired = [array([1,2]),array([3,4])] - compare_results(res,desired) + a= array([1, 2, 3, 4]) + res = hsplit(a, 2) + desired = [array([1, 2]), array([3, 4])] + compare_results(res, desired) def test_2D_array(self): - a= array([[1,2,3,4], - [1,2,3,4]]) - res = hsplit(a,2) - desired = [array([[1,2],[1,2]]),array([[3,4],[3,4]])] - compare_results(res,desired) + a= array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = hsplit(a, 2) + desired = [array([[1, 2], [1, 2]]), array([[3, 4], [3, 4]])] + compare_results(res, desired) class TestVsplit(TestCase): """ only testing for integer splits. """ def test_1D_array(self): - a= array([1,2,3,4]) + a= array([1, 2, 3, 4]) try: - vsplit(a,2) + vsplit(a, 2) assert_(0) except ValueError: pass def test_2D_array(self): - a= array([[1,2,3,4], - [1,2,3,4]]) - res = vsplit(a,2) - desired = [array([[1,2,3,4]]),array([[1,2,3,4]])] - compare_results(res,desired) + a= array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = vsplit(a, 2) + desired = [array([[1, 2, 3, 4]]), array([[1, 2, 3, 4]])] + compare_results(res, desired) class TestDsplit(TestCase): """ only testing for integer splits. """ def test_2D_array(self): - a= array([[1,2,3,4], - [1,2,3,4]]) + a= array([[1, 2, 3, 4], + [1, 2, 3, 4]]) try: - dsplit(a,2) + dsplit(a, 2) assert_(0) except ValueError: pass def test_3D_array(self): - a= array([[[1,2,3,4], - [1,2,3,4]], - [[1,2,3,4], - [1,2,3,4]]]) - res = dsplit(a,2) - desired = [array([[[1,2],[1,2]],[[1,2],[1,2]]]), - array([[[3,4],[3,4]],[[3,4],[3,4]]])] - compare_results(res,desired) + a= array([[[1, 2, 3, 4], + [1, 2, 3, 4]], + [[1, 2, 3, 4], + [1, 2, 3, 4]]]) + res = dsplit(a, 2) + desired = [array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), + array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] + compare_results(res, desired) class TestSqueeze(TestCase): def test_basic(self): - a = rand(20,10,10,1,1) - b = rand(20,1,10,1,20) - c = rand(1,1,20,10) - assert_array_equal(squeeze(a),reshape(a,(20,10,10))) - assert_array_equal(squeeze(b),reshape(b,(20,10,20))) - assert_array_equal(squeeze(c),reshape(c,(20,10))) + a = rand(20, 10, 10, 1, 1) + b = rand(20, 1, 10, 1, 20) + c = rand(1, 1, 20, 10) + assert_array_equal(squeeze(a), reshape(a, (20, 10, 10))) + assert_array_equal(squeeze(b), reshape(b, (20, 10, 20))) + assert_array_equal(squeeze(c), reshape(c, (20, 10))) # Squeezing to 0-dim should still give an ndarray a = [[[1.5]]] @@ -270,44 +270,44 @@ class TestSqueeze(TestCase): class TestKron(TestCase): def test_return_type(self): - a = ones([2,2]) + a = ones([2, 2]) m = asmatrix(a) - assert_equal(type(kron(a,a)), ndarray) - assert_equal(type(kron(m,m)), matrix) - assert_equal(type(kron(a,m)), matrix) - assert_equal(type(kron(m,a)), matrix) + assert_equal(type(kron(a, a)), ndarray) + assert_equal(type(kron(m, m)), matrix) + assert_equal(type(kron(a, m)), matrix) + assert_equal(type(kron(m, a)), matrix) class myarray(ndarray): __array_priority__ = 0.0 ma = myarray(a.shape, a.dtype, a.data) - assert_equal(type(kron(a,a)), ndarray) - assert_equal(type(kron(ma,ma)), myarray) - assert_equal(type(kron(a,ma)), ndarray) - assert_equal(type(kron(ma,a)), myarray) + assert_equal(type(kron(a, a)), ndarray) + assert_equal(type(kron(ma, ma)), myarray) + assert_equal(type(kron(a, ma)), ndarray) + assert_equal(type(kron(ma, a)), myarray) class TestTile(TestCase): def test_basic(self): - a = array([0,1,2]) - b = [[1,2],[3,4]] - assert_equal(tile(a,2), [0,1,2,0,1,2]) - assert_equal(tile(a,(2,2)), [[0,1,2,0,1,2],[0,1,2,0,1,2]]) - assert_equal(tile(a,(1,2)), [[0,1,2,0,1,2]]) - assert_equal(tile(b, 2), [[1,2,1,2],[3,4,3,4]]) - assert_equal(tile(b,(2,1)),[[1,2],[3,4],[1,2],[3,4]]) - assert_equal(tile(b,(2,2)),[[1,2,1,2],[3,4,3,4], - [1,2,1,2],[3,4,3,4]]) + a = array([0, 1, 2]) + b = [[1, 2], [3, 4]] + assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) + assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) + assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) + assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) + assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], + [1, 2, 1, 2], [3, 4, 3, 4]]) def test_empty(self): a = array([[[]]]) - d = tile(a,(3,2,5)).shape - assert_equal(d,(3,2,0)) + d = tile(a, (3, 2, 5)).shape + assert_equal(d, (3, 2, 0)) def test_kroncompare(self): import numpy.random as nr - reps=[(2,),(1,2),(2,1),(2,2),(2,3,2),(3,2)] - shape=[(3,),(2,3),(3,4,3),(3,2,3),(4,3,2,4),(2,2)] + reps=[(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] + shape=[(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] for s in shape: - b = nr.randint(0,10,size=s) + b = nr.randint(0, 10, size=s) for r in reps: a = ones(r, b.dtype) large = tile(b, r) @@ -331,9 +331,9 @@ class TestMayShareMemory(TestCase): # Utility -def compare_results(res,desired): +def compare_results(res, desired): for i in range(len(desired)): - assert_array_equal(res[i],desired[i]) + assert_array_equal(res[i], desired[i]) if __name__ == "__main__": diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index f815b247f..5d06e0a8c 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -52,10 +52,10 @@ def test_same(): assert_array_equal(y, by) def test_one_off(): - x = np.array([[1,2,3]]) - y = np.array([[1],[2],[3]]) + x = np.array([[1, 2, 3]]) + y = np.array([[1], [2], [3]]) bx, by = broadcast_arrays(x, y) - bx0 = np.array([[1,2,3],[1,2,3],[1,2,3]]) + bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) by0 = bx0.T assert_array_equal(bx0, bx) assert_array_equal(by0, by) @@ -67,13 +67,13 @@ def test_same_input_shapes(): (), (1,), (3,), - (0,1), - (0,3), - (1,0), - (3,0), - (1,3), - (3,1), - (3,3), + (0, 1), + (0, 3), + (1, 0), + (3, 0), + (1, 3), + (3, 1), + (3, 3), ] for shape in data: input_shapes = [shape] @@ -92,18 +92,18 @@ def test_two_compatible_by_ones_input_shapes(): """ data = [ [[(1,), (3,)], (3,)], - [[(1,3), (3,3)], (3,3)], - [[(3,1), (3,3)], (3,3)], - [[(1,3), (3,1)], (3,3)], - [[(1,1), (3,3)], (3,3)], - [[(1,1), (1,3)], (1,3)], - [[(1,1), (3,1)], (3,1)], - [[(1,0), (0,0)], (0,0)], - [[(0,1), (0,0)], (0,0)], - [[(1,0), (0,1)], (0,0)], - [[(1,1), (0,0)], (0,0)], - [[(1,1), (1,0)], (1,0)], - [[(1,1), (0,1)], (0,1)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], ] for input_shapes, expected_shape in data: assert_shapes_correct(input_shapes, expected_shape) @@ -116,25 +116,25 @@ def test_two_compatible_by_prepending_ones_input_shapes(): """ data = [ [[(), (3,)], (3,)], - [[(3,), (3,3)], (3,3)], - [[(3,), (3,1)], (3,3)], - [[(1,), (3,3)], (3,3)], - [[(), (3,3)], (3,3)], - [[(1,1), (3,)], (1,3)], - [[(1,), (3,1)], (3,1)], - [[(1,), (1,3)], (1,3)], - [[(), (1,3)], (1,3)], - [[(), (3,1)], (3,1)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], [[(), (0,)], (0,)], - [[(0,), (0,0)], (0,0)], - [[(0,), (0,1)], (0,0)], - [[(1,), (0,0)], (0,0)], - [[(), (0,0)], (0,0)], - [[(1,1), (0,)], (1,0)], - [[(1,), (0,1)], (0,1)], - [[(1,), (1,0)], (1,0)], - [[(), (1,0)], (1,0)], - [[(), (0,1)], (0,1)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], ] for input_shapes, expected_shape in data: assert_shapes_correct(input_shapes, expected_shape) @@ -146,9 +146,9 @@ def test_incompatible_shapes_raise_valueerror(): """ data = [ [(3,), (4,)], - [(2,3), (2,)], + [(2, 3), (2,)], [(3,), (3,), (4,)], - [(1,3,4), (2,3,3)], + [(1, 3, 4), (2, 3, 3)], ] for input_shapes in data: assert_incompatible_shapes_raise(input_shapes) @@ -160,38 +160,38 @@ def test_same_as_ufunc(): """ data = [ [[(1,), (3,)], (3,)], - [[(1,3), (3,3)], (3,3)], - [[(3,1), (3,3)], (3,3)], - [[(1,3), (3,1)], (3,3)], - [[(1,1), (3,3)], (3,3)], - [[(1,1), (1,3)], (1,3)], - [[(1,1), (3,1)], (3,1)], - [[(1,0), (0,0)], (0,0)], - [[(0,1), (0,0)], (0,0)], - [[(1,0), (0,1)], (0,0)], - [[(1,1), (0,0)], (0,0)], - [[(1,1), (1,0)], (1,0)], - [[(1,1), (0,1)], (0,1)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], [[(), (3,)], (3,)], - [[(3,), (3,3)], (3,3)], - [[(3,), (3,1)], (3,3)], - [[(1,), (3,3)], (3,3)], - [[(), (3,3)], (3,3)], - [[(1,1), (3,)], (1,3)], - [[(1,), (3,1)], (3,1)], - [[(1,), (1,3)], (1,3)], - [[(), (1,3)], (1,3)], - [[(), (3,1)], (3,1)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], [[(), (0,)], (0,)], - [[(0,), (0,0)], (0,0)], - [[(0,), (0,1)], (0,0)], - [[(1,), (0,0)], (0,0)], - [[(), (0,0)], (0,0)], - [[(1,1), (0,)], (1,0)], - [[(1,), (0,1)], (0,1)], - [[(1,), (1,0)], (1,0)], - [[(), (1,0)], (1,0)], - [[(), (0,1)], (0,1)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], ] for input_shapes, expected_shape in data: assert_same_as_ufunc(input_shapes[0], input_shapes[1], diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py index 7e590c1db..4ec5c34a2 100644 --- a/numpy/lib/tests/test_twodim_base.py +++ b/numpy/lib/tests/test_twodim_base.py @@ -14,46 +14,46 @@ from numpy.compat import asbytes, asbytes_nested def get_mat(n): data = arange(n) - data = add.outer(data,data) + data = add.outer(data, data) return data class TestEye(TestCase): def test_basic(self): - assert_equal(eye(4),array([[1,0,0,0], - [0,1,0,0], - [0,0,1,0], - [0,0,0,1]])) - assert_equal(eye(4,dtype='f'),array([[1,0,0,0], - [0,1,0,0], - [0,0,1,0], - [0,0,0,1]],'f')) - assert_equal(eye(3) == 1, eye(3,dtype=bool)) + assert_equal(eye(4), array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]])) + assert_equal(eye(4, dtype='f'), array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]], 'f')) + assert_equal(eye(3) == 1, eye(3, dtype=bool)) def test_diag(self): - assert_equal(eye(4,k=1),array([[0,1,0,0], - [0,0,1,0], - [0,0,0,1], - [0,0,0,0]])) - assert_equal(eye(4,k=-1),array([[0,0,0,0], - [1,0,0,0], - [0,1,0,0], - [0,0,1,0]])) + assert_equal(eye(4, k=1), array([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + assert_equal(eye(4, k=-1), array([[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) def test_2d(self): - assert_equal(eye(4,3),array([[1,0,0], - [0,1,0], - [0,0,1], - [0,0,0]])) - assert_equal(eye(3,4),array([[1,0,0,0], - [0,1,0,0], - [0,0,1,0]])) + assert_equal(eye(4, 3), array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0]])) + assert_equal(eye(3, 4), array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) def test_diag2d(self): - assert_equal(eye(3,4,k=2),array([[0,0,1,0], - [0,0,0,1], - [0,0,0,0]])) - assert_equal(eye(4,3,k=-2),array([[0,0,0], - [0,0,0], - [1,0,0], - [0,1,0]])) + assert_equal(eye(3, 4, k=2), array([[0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + assert_equal(eye(4, 3, k=-2), array([[0, 0, 0], + [0, 0, 0], + [1, 0, 0], + [0, 1, 0]])) def test_eye_bounds(self): assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) @@ -93,7 +93,7 @@ class TestDiag(TestCase): vals = (100 * get_mat(5) + 1).astype('l') b = zeros((5,)) for k in range(5): - b[k] = vals[k,k] + b[k] = vals[k, k] assert_equal(diag(vals), b) b = b * 0 for k in range(3): @@ -123,61 +123,61 @@ class TestFliplr(TestCase): def test_basic(self): self.assertRaises(ValueError, fliplr, ones(4)) a = get_mat(4) - b = a[:,::-1] - assert_equal(fliplr(a),b) - a = [[0,1,2], - [3,4,5]] - b = [[2,1,0], - [5,4,3]] - assert_equal(fliplr(a),b) + b = a[:, ::-1] + assert_equal(fliplr(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[2, 1, 0], + [5, 4, 3]] + assert_equal(fliplr(a), b) class TestFlipud(TestCase): def test_basic(self): a = get_mat(4) b = a[::-1,:] - assert_equal(flipud(a),b) - a = [[0,1,2], - [3,4,5]] - b = [[3,4,5], - [0,1,2]] - assert_equal(flipud(a),b) + assert_equal(flipud(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[3, 4, 5], + [0, 1, 2]] + assert_equal(flipud(a), b) class TestRot90(TestCase): def test_basic(self): self.assertRaises(ValueError, rot90, ones(4)) - a = [[0,1,2], - [3,4,5]] - b1 = [[2,5], - [1,4], - [0,3]] - b2 = [[5,4,3], - [2,1,0]] - b3 = [[3,0], - [4,1], - [5,2]] - b4 = [[0,1,2], - [3,4,5]] - - for k in range(-3,13,4): - assert_equal(rot90(a,k=k),b1) - for k in range(-2,13,4): - assert_equal(rot90(a,k=k),b2) - for k in range(-1,13,4): - assert_equal(rot90(a,k=k),b3) - for k in range(0,13,4): - assert_equal(rot90(a,k=k),b4) + a = [[0, 1, 2], + [3, 4, 5]] + b1 = [[2, 5], + [1, 4], + [0, 3]] + b2 = [[5, 4, 3], + [2, 1, 0]] + b3 = [[3, 0], + [4, 1], + [5, 2]] + b4 = [[0, 1, 2], + [3, 4, 5]] + + for k in range(-3, 13, 4): + assert_equal(rot90(a, k=k), b1) + for k in range(-2, 13, 4): + assert_equal(rot90(a, k=k), b2) + for k in range(-1, 13, 4): + assert_equal(rot90(a, k=k), b3) + for k in range(0, 13, 4): + assert_equal(rot90(a, k=k), b4) def test_axes(self): - a = ones((50,40,3)) - assert_equal(rot90(a).shape,(40,50,3)) + a = ones((50, 40, 3)) + assert_equal(rot90(a).shape, (40, 50, 3)) class TestHistogram2d(TestCase): def test_simple(self): x = array([ 0.41702200, 0.72032449, 0.00011437481, 0.302332573, 0.146755891]) y = array([ 0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) - xedges = np.linspace(0,1,10) - yedges = np.linspace(0,1,10) + xedges = np.linspace(0, 1, 10) + yedges = np.linspace(0, 1, 10) H = histogram2d(x, y, (xedges, yedges))[0] answer = array([[0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], @@ -191,40 +191,40 @@ class TestHistogram2d(TestCase): assert_array_equal(H.T, answer) H = histogram2d(x, y, xedges)[0] assert_array_equal(H.T, answer) - H,xedges,yedges = histogram2d(list(range(10)),list(range(10))) - assert_array_equal(H, eye(10,10)) - assert_array_equal(xedges, np.linspace(0,9,11)) - assert_array_equal(yedges, np.linspace(0,9,11)) + H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) + assert_array_equal(H, eye(10, 10)) + assert_array_equal(xedges, np.linspace(0, 9, 11)) + assert_array_equal(yedges, np.linspace(0, 9, 11)) def test_asym(self): x = array([1, 1, 2, 3, 4, 4, 4, 5]) y = array([1, 3, 2, 0, 1, 2, 3, 4]) - H, xed, yed = histogram2d(x,y, (6, 5), range = [[0,6],[0,5]], normed=True) - answer = array([[0.,0,0,0,0], - [0,1,0,1,0], - [0,0,1,0,0], - [1,0,0,0,0], - [0,1,1,1,0], - [0,0,0,0,1]]) + H, xed, yed = histogram2d(x, y, (6, 5), range = [[0, 6], [0, 5]], normed=True) + answer = array([[0., 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [1, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) assert_array_almost_equal(H, answer/8., 3) - assert_array_equal(xed, np.linspace(0,6,7)) - assert_array_equal(yed, np.linspace(0,5,6)) + assert_array_equal(xed, np.linspace(0, 6, 7)) + assert_array_equal(yed, np.linspace(0, 5, 6)) def test_norm(self): - x = array([1,2,3,1,2,3,1,2,3]) - y = array([1,1,1,2,2,2,3,3,3]) - H, xed, yed = histogram2d(x,y,[[1,2,3,5], [1,2,3,5]], normed=True) - answer=array([[1,1,.5], - [1,1,.5], - [.5,.5,.25]])/9. + x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) + H, xed, yed = histogram2d(x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], normed=True) + answer=array([[1, 1, .5], + [1, 1, .5], + [.5, .5, .25]])/9. assert_array_almost_equal(H, answer, 3) def test_all_outliers(self): r = rand(100)+1. - H, xed, yed = histogram2d(r, r, (4, 5), range=([0,1], [0,1])) + H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) assert_array_equal(H, 0) def test_empty(self): - a, edge1, edge2 = histogram2d([],[], bins=([0,1],[0,1])) + a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) assert_array_max_ulp(a, array([[ 0.]])) a, edge1, edge2 = histogram2d([], [], bins=4) @@ -233,11 +233,11 @@ class TestHistogram2d(TestCase): class TestTri(TestCase): def test_dtype(self): - out = array([[1,0,0], - [1,1,0], - [1,1,1]]) - assert_array_equal(tri(3),out) - assert_array_equal(tri(3,dtype=bool),out.astype(bool)) + out = array([[1, 0, 0], + [1, 1, 0], + [1, 1, 1]]) + assert_array_equal(tri(3), out) + assert_array_equal(tri(3, dtype=bool), out.astype(bool)) def test_tril_triu(): @@ -327,15 +327,15 @@ class TestTriuIndices(object): class TestTrilIndicesFrom(object): def test_exceptions(self): assert_raises(ValueError, tril_indices_from, np.ones((2,))) - assert_raises(ValueError, tril_indices_from, np.ones((2,2,2))) - assert_raises(ValueError, tril_indices_from, np.ones((2,3))) + assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) + assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) class TestTriuIndicesFrom(object): def test_exceptions(self): assert_raises(ValueError, triu_indices_from, np.ones((2,))) - assert_raises(ValueError, triu_indices_from, np.ones((2,2,2))) - assert_raises(ValueError, triu_indices_from, np.ones((2,3))) + assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) + assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) if __name__ == "__main__": diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py index 8b01a974a..3ca093e16 100644 --- a/numpy/lib/tests/test_type_check.py +++ b/numpy/lib/tests/test_type_check.py @@ -20,11 +20,11 @@ def assert_all(x): class TestCommonType(TestCase): def test_basic(self): - ai32 = array([[1,2],[3,4]], dtype=int32) - af32 = array([[1,2],[3,4]], dtype=float32) - af64 = array([[1,2],[3,4]], dtype=float64) - acs = array([[1+5j,2+6j],[3+7j,4+8j]], dtype=csingle) - acd = array([[1+5j,2+6j],[3+7j,4+8j]], dtype=cdouble) + ai32 = array([[1, 2], [3, 4]], dtype=int32) + af32 = array([[1, 2], [3, 4]], dtype=float32) + af64 = array([[1, 2], [3, 4]], dtype=float64) + acs = array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=csingle) + acd = array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=cdouble) assert_(common_type(af32) == float32) assert_(common_type(af64) == float64) assert_(common_type(acs) == csingle) @@ -36,50 +36,50 @@ class TestMintypecode(TestCase): def test_default_1(self): for itype in '1bcsuwil': - assert_equal(mintypecode(itype),'d') - assert_equal(mintypecode('f'),'f') - assert_equal(mintypecode('d'),'d') - assert_equal(mintypecode('F'),'F') - assert_equal(mintypecode('D'),'D') + assert_equal(mintypecode(itype), 'd') + assert_equal(mintypecode('f'), 'f') + assert_equal(mintypecode('d'), 'd') + assert_equal(mintypecode('F'), 'F') + assert_equal(mintypecode('D'), 'D') def test_default_2(self): for itype in '1bcsuwil': - assert_equal(mintypecode(itype+'f'),'f') - assert_equal(mintypecode(itype+'d'),'d') - assert_equal(mintypecode(itype+'F'),'F') - assert_equal(mintypecode(itype+'D'),'D') - assert_equal(mintypecode('ff'),'f') - assert_equal(mintypecode('fd'),'d') - assert_equal(mintypecode('fF'),'F') - assert_equal(mintypecode('fD'),'D') - assert_equal(mintypecode('df'),'d') - assert_equal(mintypecode('dd'),'d') + assert_equal(mintypecode(itype+'f'), 'f') + assert_equal(mintypecode(itype+'d'), 'd') + assert_equal(mintypecode(itype+'F'), 'F') + assert_equal(mintypecode(itype+'D'), 'D') + assert_equal(mintypecode('ff'), 'f') + assert_equal(mintypecode('fd'), 'd') + assert_equal(mintypecode('fF'), 'F') + assert_equal(mintypecode('fD'), 'D') + assert_equal(mintypecode('df'), 'd') + assert_equal(mintypecode('dd'), 'd') #assert_equal(mintypecode('dF',savespace=1),'F') - assert_equal(mintypecode('dF'),'D') - assert_equal(mintypecode('dD'),'D') - assert_equal(mintypecode('Ff'),'F') + assert_equal(mintypecode('dF'), 'D') + assert_equal(mintypecode('dD'), 'D') + assert_equal(mintypecode('Ff'), 'F') #assert_equal(mintypecode('Fd',savespace=1),'F') - assert_equal(mintypecode('Fd'),'D') - assert_equal(mintypecode('FF'),'F') - assert_equal(mintypecode('FD'),'D') - assert_equal(mintypecode('Df'),'D') - assert_equal(mintypecode('Dd'),'D') - assert_equal(mintypecode('DF'),'D') - assert_equal(mintypecode('DD'),'D') + assert_equal(mintypecode('Fd'), 'D') + assert_equal(mintypecode('FF'), 'F') + assert_equal(mintypecode('FD'), 'D') + assert_equal(mintypecode('Df'), 'D') + assert_equal(mintypecode('Dd'), 'D') + assert_equal(mintypecode('DF'), 'D') + assert_equal(mintypecode('DD'), 'D') def test_default_3(self): - assert_equal(mintypecode('fdF'),'D') + assert_equal(mintypecode('fdF'), 'D') #assert_equal(mintypecode('fdF',savespace=1),'F') - assert_equal(mintypecode('fdD'),'D') - assert_equal(mintypecode('fFD'),'D') - assert_equal(mintypecode('dFD'),'D') - - assert_equal(mintypecode('ifd'),'d') - assert_equal(mintypecode('ifF'),'F') - assert_equal(mintypecode('ifD'),'D') - assert_equal(mintypecode('idF'),'D') + assert_equal(mintypecode('fdD'), 'D') + assert_equal(mintypecode('fFD'), 'D') + assert_equal(mintypecode('dFD'), 'D') + + assert_equal(mintypecode('ifd'), 'd') + assert_equal(mintypecode('ifF'), 'F') + assert_equal(mintypecode('ifD'), 'D') + assert_equal(mintypecode('idF'), 'D') #assert_equal(mintypecode('idF',savespace=1),'F') - assert_equal(mintypecode('idD'),'D') + assert_equal(mintypecode('idD'), 'D') class TestIsscalar(TestCase): @@ -97,72 +97,72 @@ class TestReal(TestCase): def test_real(self): y = rand(10,) - assert_array_equal(y,real(y)) + assert_array_equal(y, real(y)) def test_cmplx(self): y = rand(10,)+1j*rand(10,) - assert_array_equal(y.real,real(y)) + assert_array_equal(y.real, real(y)) class TestImag(TestCase): def test_real(self): y = rand(10,) - assert_array_equal(0,imag(y)) + assert_array_equal(0, imag(y)) def test_cmplx(self): y = rand(10,)+1j*rand(10,) - assert_array_equal(y.imag,imag(y)) + assert_array_equal(y.imag, imag(y)) class TestIscomplex(TestCase): def test_fail(self): - z = array([-1,0,1]) + z = array([-1, 0, 1]) res = iscomplex(z) - assert_(not sometrue(res,axis=0)) + assert_(not sometrue(res, axis=0)) def test_pass(self): - z = array([-1j,1,0]) + z = array([-1j, 1, 0]) res = iscomplex(z) - assert_array_equal(res,[1,0,0]) + assert_array_equal(res, [1, 0, 0]) class TestIsreal(TestCase): def test_pass(self): - z = array([-1,0,1j]) + z = array([-1, 0, 1j]) res = isreal(z) - assert_array_equal(res,[1,1,0]) + assert_array_equal(res, [1, 1, 0]) def test_fail(self): - z = array([-1j,1,0]) + z = array([-1j, 1, 0]) res = isreal(z) - assert_array_equal(res,[0,1,1]) + assert_array_equal(res, [0, 1, 1]) class TestIscomplexobj(TestCase): def test_basic(self): - z = array([-1,0,1]) + z = array([-1, 0, 1]) assert_(not iscomplexobj(z)) - z = array([-1j,0,-1]) + z = array([-1j, 0, -1]) assert_(iscomplexobj(z)) class TestIsrealobj(TestCase): def test_basic(self): - z = array([-1,0,1]) + z = array([-1, 0, 1]) assert_(isrealobj(z)) - z = array([-1j,0,-1]) + z = array([-1j, 0, -1]) assert_(not isrealobj(z)) class TestIsnan(TestCase): def test_goodvalues(self): - z = array((-1.,0.,1.)) + z = array((-1., 0., 1.)) res = isnan(z) == 0 - assert_all(alltrue(res,axis=0)) + assert_all(alltrue(res, axis=0)) def test_posinf(self): with errstate(divide='ignore'): @@ -193,9 +193,9 @@ class TestIsnan(TestCase): class TestIsfinite(TestCase): def test_goodvalues(self): - z = array((-1.,0.,1.)) + z = array((-1., 0., 1.)) res = isfinite(z) == 1 - assert_all(alltrue(res,axis=0)) + assert_all(alltrue(res, axis=0)) def test_posinf(self): with errstate(divide='ignore', invalid='ignore'): @@ -226,9 +226,9 @@ class TestIsfinite(TestCase): class TestIsinf(TestCase): def test_goodvalues(self): - z = array((-1.,0.,1.)) + z = array((-1., 0., 1.)) res = isinf(z) == 0 - assert_all(alltrue(res,axis=0)) + assert_all(alltrue(res, axis=0)) def test_posinf(self): with errstate(divide='ignore', invalid='ignore'): @@ -259,7 +259,7 @@ class TestIsposinf(TestCase): def test_generic(self): with errstate(divide='ignore', invalid='ignore'): - vals = isposinf(array((-1.,0,1))/0.) + vals = isposinf(array((-1., 0, 1))/0.) assert_(vals[0] == 0) assert_(vals[1] == 0) assert_(vals[2] == 1) @@ -269,7 +269,7 @@ class TestIsneginf(TestCase): def test_generic(self): with errstate(divide='ignore', invalid='ignore'): - vals = isneginf(array((-1.,0,1))/0.) + vals = isneginf(array((-1., 0, 1))/0.) assert_(vals[0] == 1) assert_(vals[1] == 0) assert_(vals[2] == 0) @@ -279,7 +279,7 @@ class TestNanToNum(TestCase): def test_generic(self): with errstate(divide='ignore', invalid='ignore'): - vals = nan_to_num(array((-1.,0,1))/0.) + vals = nan_to_num(array((-1., 0, 1))/0.) assert_all(vals[0] < -1e10) and assert_all(isfinite(vals[0])) assert_(vals[1] == 0) assert_all(vals[2] > 1e10) and assert_all(isfinite(vals[2])) @@ -319,19 +319,19 @@ class TestRealIfClose(TestCase): a = rand(10) b = real_if_close(a+1e-15j) assert_all(isrealobj(b)) - assert_array_equal(a,b) + assert_array_equal(a, b) b = real_if_close(a+1e-7j) assert_all(iscomplexobj(b)) - b = real_if_close(a+1e-7j,tol=1e-6) + b = real_if_close(a+1e-7j, tol=1e-6) assert_all(isrealobj(b)) class TestArrayConversion(TestCase): def test_asfarray(self): - a = asfarray(array([1,2,3])) - assert_equal(a.__class__,ndarray) - assert_(issubdtype(a.dtype,float)) + a = asfarray(array([1, 2, 3])) + assert_equal(a.__class__, ndarray) + assert_(issubdtype(a.dtype, float)) if __name__ == "__main__": run_module_suite() diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py index 50f3229e8..31dbdba1a 100644 --- a/numpy/lib/tests/test_ufunclike.py +++ b/numpy/lib/tests/test_ufunclike.py @@ -54,7 +54,7 @@ class TestUfunclike(TestCase): a = nx.array([1.1, -1.1]) m = MyArray(a, metadata='foo') f = ufl.fix(m) - assert_array_equal(f, nx.array([1,-1])) + assert_array_equal(f, nx.array([1, -1])) assert_(isinstance(f, MyArray)) assert_equal(f.metadata, 'foo') diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index a39f60220..91df1f4f8 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -3,9 +3,9 @@ """ from __future__ import division, absolute_import, print_function -__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu', - 'tril','vander','histogram2d','mask_indices', - 'tril_indices','tril_indices_from','triu_indices','triu_indices_from', +__all__ = ['diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu', + 'tril', 'vander', 'histogram2d', 'mask_indices', + 'tril_indices', 'tril_indices_from', 'triu_indices', 'triu_indices_from', ] from numpy.core.numeric import asanyarray, equal, subtract, arange, \ @@ -113,7 +113,7 @@ def flipud(m): m = asanyarray(m) if m.ndim < 1: raise ValueError("Input must be >= 1-d.") - return m[::-1,...] + return m[::-1, ...] def rot90(m, k=1): """ @@ -160,12 +160,12 @@ def rot90(m, k=1): if k == 0: return m elif k == 1: - return fliplr(m).swapaxes(0,1) + return fliplr(m).swapaxes(0, 1) elif k == 2: return fliplr(flipud(m)) else: # k == 3 - return fliplr(m.swapaxes(0,1)) + return fliplr(m.swapaxes(0, 1)) def eye(N, M=None, k=0, dtype=float): """ @@ -276,7 +276,7 @@ def diag(v, k=0): s = v.shape if len(s) == 1: n = s[0]+abs(k) - res = zeros((n,n), v.dtype) + res = zeros((n, n), v.dtype) if k >= 0: i = k else: @@ -334,12 +334,12 @@ def diagflat(v, k=0): v = asarray(v).ravel() s = len(v) n = s + abs(k) - res = zeros((n,n), v.dtype) + res = zeros((n, n), v.dtype) if (k >= 0): - i = arange(0,n-k) + i = arange(0, n-k) fi = i+k+i*n else: - i = arange(0,n+k) + i = arange(0, n+k) fi = i+(i-k)*n res.flat[fi] = v if not wrap: @@ -385,7 +385,7 @@ def tri(N, M=None, k=0, dtype=float): """ if M is None: M = N - m = greater_equal(subtract.outer(arange(N), arange(M)),-k) + m = greater_equal(subtract.outer(arange(N), arange(M)), -k) return m.astype(dtype) def tril(m, k=0): @@ -421,7 +421,7 @@ def tril(m, k=0): """ m = asanyarray(m) - out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype),m) + out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype), m) return out def triu(m, k=0): @@ -510,9 +510,9 @@ def vander(x, N=None): x = asarray(x) if N is None: N=len(x) - X = ones( (len(x),N), x.dtype) + X = ones( (len(x), N), x.dtype) for i in range(N - 1): - X[:,i] = x**(N - i - 1) + X[:, i] = x**(N - i - 1) return X @@ -650,7 +650,7 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None): if N != 1 and N != 2: xedges = yedges = asarray(bins, float) bins = [xedges, yedges] - hist, edges = histogramdd([x,y], bins, range, normed, weights) + hist, edges = histogramdd([x, y], bins, range, normed, weights) return hist, edges[0], edges[1] @@ -719,7 +719,7 @@ def mask_indices(n, mask_func, k=0): array([1, 2, 5]) """ - m = ones((n,n), int) + m = ones((n, n), int) a = mask_func(m, k) return where(a != 0) @@ -929,4 +929,4 @@ def triu_indices_from(arr, k=0): """ if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]): raise ValueError("input array must be 2-d and square") - return triu_indices(arr.shape[0],k) + return triu_indices(arr.shape[0], k) diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py index da9e13847..1ed5bf32a 100644 --- a/numpy/lib/type_check.py +++ b/numpy/lib/type_check.py @@ -3,9 +3,9 @@ """ from __future__ import division, absolute_import, print_function -__all__ = ['iscomplexobj','isrealobj','imag','iscomplex', - 'isreal','nan_to_num','real','real_if_close', - 'typename','asfarray','mintypecode','asscalar', +__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', + 'isreal', 'nan_to_num', 'real', 'real_if_close', + 'typename', 'asfarray', 'mintypecode', 'asscalar', 'common_type'] import numpy.core.numeric as _nx @@ -68,7 +68,7 @@ def mintypecode(typechars,typeset='GDFgdf',default='d'): l = [] for t in intersection: i = _typecodes_by_elsize.index(t) - l.append((i,t)) + l.append((i, t)) l.sort() return l[0][1] @@ -102,7 +102,7 @@ def asfarray(a, dtype=_nx.float_): dtype = _nx.obj2sctype(dtype) if not issubclass(dtype, _nx.inexact): dtype = _nx.float_ - return asarray(a,dtype=dtype) + return asarray(a, dtype=dtype) def real(val): """ @@ -603,4 +603,3 @@ def common_type(*arrays): return array_type[1][precision] else: return array_type[0][precision] - diff --git a/numpy/lib/user_array.py b/numpy/lib/user_array.py index d675d3702..f62f6db59 100644 --- a/numpy/lib/user_array.py +++ b/numpy/lib/user_array.py @@ -12,7 +12,7 @@ from numpy.core import ( bitwise_xor, invert, less, less_equal, not_equal, equal, greater, greater_equal, shape, reshape, arange, sin, sqrt, transpose ) -from numpy.compat import long +from numpy.compat import long class container(object): def __init__(self, data, dtype=None, copy=True): @@ -39,9 +39,9 @@ class container(object): def __setitem__(self, index, value): - self.array[index] = asarray(value,self.dtype) + self.array[index] = asarray(value, self.dtype) def __setslice__(self, i, j, value): - self.array[i:j] = asarray(value,self.dtype) + self.array[i:j] = asarray(value, self.dtype) def __abs__(self): return self._rc(absolute(self.array)) @@ -65,16 +65,16 @@ class container(object): return self def __mul__(self, other): - return self._rc(multiply(self.array,asarray(other))) + return self._rc(multiply(self.array, asarray(other))) __rmul__ = __mul__ def __imul__(self, other): multiply(self.array, other, self.array) return self def __div__(self, other): - return self._rc(divide(self.array,asarray(other))) + return self._rc(divide(self.array, asarray(other))) def __rdiv__(self, other): - return self._rc(divide(asarray(other),self.array)) + return self._rc(divide(asarray(other), self.array)) def __idiv__(self, other): divide(self.array, other, self.array) return self @@ -88,32 +88,32 @@ class container(object): return self def __divmod__(self, other): - return (self._rc(divide(self.array,other)), + return (self._rc(divide(self.array, other)), self._rc(remainder(self.array, other))) def __rdivmod__(self, other): return (self._rc(divide(other, self.array)), self._rc(remainder(other, self.array))) - def __pow__(self,other): - return self._rc(power(self.array,asarray(other))) - def __rpow__(self,other): - return self._rc(power(asarray(other),self.array)) - def __ipow__(self,other): + def __pow__(self, other): + return self._rc(power(self.array, asarray(other))) + def __rpow__(self, other): + return self._rc(power(asarray(other), self.array)) + def __ipow__(self, other): power(self.array, other, self.array) return self - def __lshift__(self,other): + def __lshift__(self, other): return self._rc(left_shift(self.array, other)) - def __rshift__(self,other): + def __rshift__(self, other): return self._rc(right_shift(self.array, other)) - def __rlshift__(self,other): + def __rlshift__(self, other): return self._rc(left_shift(other, self.array)) - def __rrshift__(self,other): + def __rrshift__(self, other): return self._rc(right_shift(other, self.array)) - def __ilshift__(self,other): + def __ilshift__(self, other): left_shift(self.array, other, self.array) return self - def __irshift__(self,other): + def __irshift__(self, other): right_shift(self.array, other, self.array) return self @@ -163,12 +163,12 @@ class container(object): def __hex__(self): return self._scalarfunc(hex) def __oct__(self): return self._scalarfunc(oct) - def __lt__(self,other): return self._rc(less(self.array,other)) - def __le__(self,other): return self._rc(less_equal(self.array,other)) - def __eq__(self,other): return self._rc(equal(self.array,other)) - def __ne__(self,other): return self._rc(not_equal(self.array,other)) - def __gt__(self,other): return self._rc(greater(self.array,other)) - def __ge__(self,other): return self._rc(greater_equal(self.array,other)) + def __lt__(self, other): return self._rc(less(self.array, other)) + def __le__(self, other): return self._rc(less_equal(self.array, other)) + def __eq__(self, other): return self._rc(equal(self.array, other)) + def __ne__(self, other): return self._rc(not_equal(self.array, other)) + def __gt__(self, other): return self._rc(greater(self.array, other)) + def __ge__(self, other): return self._rc(greater_equal(self.array, other)) def copy(self): return self._rc(self.array.copy()) @@ -185,7 +185,7 @@ class container(object): def __array_wrap__(self, *args): return self.__class__(args[0]) - def __setattr__(self,attr,value): + def __setattr__(self, attr, value): if attr == 'array': object.__setattr__(self, attr, value) return @@ -195,7 +195,7 @@ class container(object): object.__setattr__(self, attr, value) # Only called after other approaches fail. - def __getattr__(self,attr): + def __getattr__(self, attr): if (attr == 'array'): return object.__getattribute__(self, attr) return self.array.__getattribute__(attr) @@ -204,19 +204,19 @@ class container(object): # Test of class container ############################################################# if __name__ == '__main__': - temp=reshape(arange(10000),(100,100)) + temp=reshape(arange(10000), (100, 100)) ua=container(temp) # new object created begin test print(dir(ua)) - print(shape(ua),ua.shape) # I have changed Numeric.py + print(shape(ua), ua.shape) # I have changed Numeric.py - ua_small=ua[:3,:5] + ua_small=ua[:3, :5] print(ua_small) - ua_small[0,0]=10 # this did not change ua[0,0], which is not normal behavior - print(ua_small[0,0],ua[0,0]) + ua_small[0, 0]=10 # this did not change ua[0,0], which is not normal behavior + print(ua_small[0, 0], ua[0, 0]) print(sin(ua_small)/3.*6.+sqrt(ua_small**2)) - print(less(ua_small,103),type(less(ua_small,103))) - print(type(ua_small*reshape(arange(15),shape(ua_small)))) - print(reshape(ua_small,(5,3))) + print(less(ua_small, 103), type(less(ua_small, 103))) + print(type(ua_small*reshape(arange(15), shape(ua_small)))) + print(reshape(ua_small, (5, 3))) print(transpose(ua_small)) diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index f54946722..1b968f1fc 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -320,7 +320,7 @@ def who(vardict=None): sta = [] cache = {} for name in vardict.keys(): - if isinstance(vardict[name],ndarray): + if isinstance(vardict[name], ndarray): var = vardict[name] idv = id(var) if idv in cache.keys(): @@ -351,9 +351,9 @@ def who(vardict=None): totalbytes += int(val[2]) if len(sta) > 0: - sp1 = max(10,maxname) - sp2 = max(10,maxshape) - sp3 = max(10,maxbyte) + sp1 = max(10, maxname) + sp2 = max(10, maxshape) + sp3 = max(10, maxbyte) prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ') print(prval + "\n" + "="*(len(prval)+5) + "\n") @@ -409,7 +409,7 @@ def _makenamedict(module='numpy'): break thisdict = totraverse.pop(0) for x in thisdict.keys(): - if isinstance(thisdict[x],types.ModuleType): + if isinstance(thisdict[x], types.ModuleType): modname = thisdict[x].__name__ if modname not in dictlist: moddict = thisdict[x].__dict__ @@ -470,7 +470,7 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'): # Local import to speed up numpy's import time. import pydoc, inspect - if hasattr(object,'_ppimport_importer') or \ + if hasattr(object, '_ppimport_importer') or \ hasattr(object, '_ppimport_module'): object = object._ppimport_module elif hasattr(object, '_ppimport_attr'): @@ -537,7 +537,7 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'): print(" " + argstr + "\n", file=output) doc1 = inspect.getdoc(object) if doc1 is None: - if hasattr(object,'__init__'): + if hasattr(object, '__init__'): print(inspect.getdoc(object.__init__), file=output) else: print(inspect.getdoc(object), file=output) @@ -565,7 +565,7 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'): else: arguments = "()" - if hasattr(object,'name'): + if hasattr(object, 'name'): name = "%s" % object.name else: name = "<name>" @@ -972,7 +972,7 @@ class SafeEval(object): if sys.version_info[0] < 3: def visit(self, node, **kw): cls = node.__class__ - meth = getattr(self,'visit'+cls.__name__,self.default) + meth = getattr(self, 'visit'+cls.__name__, self.default) return meth(node, **kw) def default(self, node, **kw): @@ -987,7 +987,7 @@ class SafeEval(object): return node.value def visitDict(self, node,**kw): - return dict([(self.visit(k),self.visit(v)) for k,v in node.items]) + return dict([(self.visit(k), self.visit(v)) for k, v in node.items]) def visitTuple(self, node, **kw): return tuple([self.visit(i) for i in node.nodes]) diff --git a/numpy/linalg/_gufuncs_linalg.py b/numpy/linalg/_gufuncs_linalg.py index 169d39f86..ef19c2121 100644 --- a/numpy/linalg/_gufuncs_linalg.py +++ b/numpy/linalg/_gufuncs_linalg.py @@ -15,7 +15,7 @@ as the iteration used when used on vectors. This can result in faster execution as well. In addition, there are some ufuncs thrown in that implement fused operations -over numpy vectors that can result in faster execution on large vector +over numpy vectors that can result in faster execution on large vector compared to non-fused versions (for example: multiply_add, multiply3). In fact, gufuncs_linalg is a very thin wrapper of python code that wraps @@ -73,9 +73,9 @@ Fused Operations Error Handling ================ Unlike the numpy.linalg module, this module does not use exceptions to notify -errors in the execution of the kernels. As these functions are thougth to be +errors in the execution of the kernels. As these functions are thougth to be used in a vector way it didn't seem appropriate to raise exceptions on failure -of an element. So instead, when an error computing an element occurs its +of an element. So instead, when an error computing an element occurs its associated result will be set to an invalid value (all NaNs). Exceptions can occur if the arguments fail to map properly to the underlying @@ -91,7 +91,7 @@ That's not always the case, as due to limitations of the gufunc interface some functions cannot be mapped straight into a kernel. Two cases come to mind: -- An uniform parameter is needed to configure the way the computation is +- An uniform parameter is needed to configure the way the computation is performed (like UPLO in the functions working on symmetric/hermitian matrices) - svd, where it was impossible to map the function to a gufunc signature. @@ -114,9 +114,9 @@ from __future__ import division, absolute_import, print_function __all__ = ['inner1d', 'dotc1d', 'innerwt', 'matrix_multiply', 'det', 'slogdet', - 'inv', 'cholesky', 'quadratic_form', 'add3', 'multiply3', - 'multiply3_add', 'multiply_add', 'multiply_add2', 'multiply4', - 'multiply4_add', 'eig', 'eigvals', 'eigh', 'eigvalsh', 'solve', + 'inv', 'cholesky', 'quadratic_form', 'add3', 'multiply3', + 'multiply3_add', 'multiply_add', 'multiply_add2', 'multiply4', + 'multiply4_add', 'eig', 'eigvals', 'eigh', 'eigvalsh', 'solve', 'svd', 'chosolve', 'poinv'] import numpy as np @@ -354,7 +354,7 @@ def slogdet(a, **kwargs): Compute the sign and (natural) logarithm of the determinant of an array, with broadcasting. - If an array has a very small or very large determinant, then a call to + If an array has a very small or very large determinant, then a call to `det` may overflow or underflow. This routine is more robust against such issues, because it computes the logarithm of the determinant rather than the determinant itself @@ -368,7 +368,7 @@ def slogdet(a, **kwargs): ------- sign : (...) array An array of numbers representing the sign of the determinants. For real - matrices, this is 1, 0, or -1. For complex matrices, this is a complex + matrices, this is 1, 0, or -1. For complex matrices, this is a complex number with absolute value 1 (i.e., it is on the unit circle), or else 0. logdet : (...) array @@ -1398,11 +1398,11 @@ def chosolve(A, B, UPLO='L', **kw_args): def poinv(A, UPLO='L', **kw_args): """ - Compute the (multiplicative) inverse of symmetric/hermitian positive + Compute the (multiplicative) inverse of symmetric/hermitian positive definite matrices, with broadcasting. - Given a square symmetic/hermitian positive-definite matrix `a`, return - the matrix `ainv` satisfying ``matrix_multiply(a, ainv) = + Given a square symmetic/hermitian positive-definite matrix `a`, return + the matrix `ainv` satisfying ``matrix_multiply(a, ainv) = matrix_multiply(ainv, a) = Identity matrix``. Parameters diff --git a/numpy/linalg/lapack_lite/blas_lite.c b/numpy/linalg/lapack_lite/blas_lite.c index bd24768c3..3ac680167 100644 --- a/numpy/linalg/lapack_lite/blas_lite.c +++ b/numpy/linalg/lapack_lite/blas_lite.c @@ -21132,4 +21132,3 @@ L20: /* End of ZTRSV . */ } /* ztrsv_ */ - diff --git a/numpy/linalg/lapack_lite/dlamch.c b/numpy/linalg/lapack_lite/dlamch.c index bf1dfdb05..fd2d58ad7 100644 --- a/numpy/linalg/lapack_lite/dlamch.c +++ b/numpy/linalg/lapack_lite/dlamch.c @@ -5,49 +5,49 @@ #ifndef HAVE_CONFIG doublereal dlamch_(char *cmach) { -/* -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 - - - Purpose - ======= - - DLAMCH determines double precision machine parameters. - - Arguments - ========= - - CMACH (input) CHARACTER*1 - Specifies the value to be returned by DLAMCH: - = 'E' or 'e', DLAMCH := eps - = 'S' or 's , DLAMCH := sfmin - = 'B' or 'b', DLAMCH := base - = 'P' or 'p', DLAMCH := eps*base - = 'N' or 'n', DLAMCH := t - = 'R' or 'r', DLAMCH := rnd - = 'M' or 'm', DLAMCH := emin - = 'U' or 'u', DLAMCH := rmin - = 'L' or 'l', DLAMCH := emax - = 'O' or 'o', DLAMCH := rmax - - where - - eps = relative machine precision - sfmin = safe minimum, such that 1/sfmin does not overflow - base = base of the machine - prec = eps*base - t = number of (base) digits in the mantissa - rnd = 1.0 when rounding occurs in addition, 0.0 otherwise - emin = minimum exponent before (gradual) underflow - rmin = underflow threshold - base**(emin-1) - emax = largest exponent before overflow - rmax = overflow threshold - (base**emax)*(1-eps) - - ===================================================================== +/* -- LAPACK auxiliary routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + October 31, 1992 + + + Purpose + ======= + + DLAMCH determines double precision machine parameters. + + Arguments + ========= + + CMACH (input) CHARACTER*1 + Specifies the value to be returned by DLAMCH: + = 'E' or 'e', DLAMCH := eps + = 'S' or 's , DLAMCH := sfmin + = 'B' or 'b', DLAMCH := base + = 'P' or 'p', DLAMCH := eps*base + = 'N' or 'n', DLAMCH := t + = 'R' or 'r', DLAMCH := rnd + = 'M' or 'm', DLAMCH := emin + = 'U' or 'u', DLAMCH := rmin + = 'L' or 'l', DLAMCH := emax + = 'O' or 'o', DLAMCH := rmax + + where + + eps = relative machine precision + sfmin = safe minimum, such that 1/sfmin does not overflow + base = base of the machine + prec = eps*base + t = number of (base) digits in the mantissa + rnd = 1.0 when rounding occurs in addition, 0.0 otherwise + emin = minimum exponent before (gradual) underflow + rmin = underflow threshold - base**(emin-1) + emax = largest exponent before overflow + rmax = overflow threshold - (base**emax)*(1-eps) + + ===================================================================== */ -/* >>Start of File<< +/* >>Start of File<< Initialized data */ static logical first = TRUE_; /* System generated locals */ @@ -64,7 +64,7 @@ doublereal dlamch_(char *cmach) static doublereal rmin, rmax, t, rmach; extern logical lsame_(char *, char *); static doublereal small, sfmin; - extern /* Subroutine */ int dlamc2_(integer *, integer *, logical *, + extern /* Subroutine */ int dlamc2_(integer *, integer *, logical *, doublereal *, integer *, doublereal *, integer *, doublereal *); static integer it; static doublereal rnd, eps; @@ -93,7 +93,7 @@ doublereal dlamch_(char *cmach) if (small >= sfmin) { /* Use SMALL plus a bit, to avoid the possibility of rou -nding +nding causing overflow when computing 1/sfmin. */ sfmin = small * (eps + 1.); @@ -130,56 +130,56 @@ nding } /* dlamch_ */ -/* Subroutine */ int dlamc1_(integer *beta, integer *t, logical *rnd, logical +/* Subroutine */ int dlamc1_(integer *beta, integer *t, logical *rnd, logical *ieee1) { -/* -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 +/* -- LAPACK auxiliary routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + October 31, 1992 - Purpose - ======= + Purpose + ======= - DLAMC1 determines the machine parameters given by BETA, T, RND, and - IEEE1. + DLAMC1 determines the machine parameters given by BETA, T, RND, and + IEEE1. - Arguments - ========= + Arguments + ========= - BETA (output) INTEGER - The base of the machine. + BETA (output) INTEGER + The base of the machine. - T (output) INTEGER - The number of ( BETA ) digits in the mantissa. + T (output) INTEGER + The number of ( BETA ) digits in the mantissa. - RND (output) LOGICAL - Specifies whether proper rounding ( RND = .TRUE. ) or - chopping ( RND = .FALSE. ) occurs in addition. This may not - - be a reliable guide to the way in which the machine performs - - its arithmetic. + RND (output) LOGICAL + Specifies whether proper rounding ( RND = .TRUE. ) or + chopping ( RND = .FALSE. ) occurs in addition. This may not - IEEE1 (output) LOGICAL - Specifies whether rounding appears to be done in the IEEE - 'round to nearest' style. + be a reliable guide to the way in which the machine performs - Further Details - =============== + its arithmetic. - The routine is based on the routine ENVRON by Malcolm and - incorporates suggestions by Gentleman and Marovich. See + IEEE1 (output) LOGICAL + Specifies whether rounding appears to be done in the IEEE + 'round to nearest' style. - Malcolm M. A. (1972) Algorithms to reveal properties of - floating-point arithmetic. Comms. of the ACM, 15, 949-951. + Further Details + =============== - Gentleman W. M. and Marovich S. B. (1974) More on algorithms - that reveal properties of floating point arithmetic units. - Comms. of the ACM, 17, 276-277. + The routine is based on the routine ENVRON by Malcolm and + incorporates suggestions by Gentleman and Marovich. See - ===================================================================== + Malcolm M. A. (1972) Algorithms to reveal properties of + floating-point arithmetic. Comms. of the ACM, 15, 949-951. + + Gentleman W. M. and Marovich S. B. (1974) More on algorithms + that reveal properties of floating point arithmetic units. + Comms. of the ACM, 17, 276-277. + + ===================================================================== */ /* Initialized data */ static logical first = TRUE_; @@ -203,18 +203,18 @@ nding one = 1.; /* LBETA, LIEEE1, LT and LRND are the local values of BE -TA, - IEEE1, T and RND. +TA, + IEEE1, T and RND. Throughout this routine we use the function DLAMC3 to ens -ure - that relevant values are stored and not held in registers, - or - are not affected by optimizers. +ure + that relevant values are stored and not held in registers, + or + are not affected by optimizers. Compute a = 2.0**m with the smallest positive integer m s -uch - that +uch + that fl( a + 1.0 ) = a. */ @@ -230,11 +230,11 @@ L10: c = dlamc3_(&c, &d__1); goto L10; } -/* + END WHILE +/* + END WHILE - Now compute b = 2.0**m with the smallest positive integer -m - such that + Now compute b = 2.0**m with the smallest positive integer +m + such that fl( a + b ) .gt. a. */ @@ -248,14 +248,14 @@ L20: c = dlamc3_(&a, &b); goto L20; } -/* + END WHILE +/* + END WHILE Now compute the base. a and c are neighbouring floating po -int +int numbers in the interval ( beta**t, beta**( t + 1 ) ) and - so + so their difference is beta. Adding 0.25 to c is to ensure that - it + it is truncated to beta and not ( beta - 1 ). */ qtr = one / 4; @@ -265,8 +265,8 @@ int lbeta = (integer) (c + qtr); /* Now determine whether rounding or chopping occurs, by addin -g a - bit less than beta/2 and a bit more than beta/2 to +g a + bit less than beta/2 and a bit more than beta/2 to a. */ b = (doublereal) lbeta; @@ -288,13 +288,13 @@ g a } /* Try and decide whether rounding is done in the IEEE 'round - to - nearest' style. B/2 is half a unit in the last place of the -two - numbers A and SAVEC. Furthermore, A is even, i.e. has last -bit + to + nearest' style. B/2 is half a unit in the last place of the +two + numbers A and SAVEC. Furthermore, A is even, i.e. has last +bit zero, and SAVEC is odd. Thus adding B/2 to A should not cha -nge +nge A, but adding B/2 to SAVEC should change SAVEC. */ d__1 = b / 2; @@ -304,12 +304,12 @@ nge lieee1 = t1 == a && t2 > savec && lrnd; /* Now find the mantissa, t. It should be the integer part - of + of log to the base beta of a, however it is safer to determine - t - by powering. So we find t as the smallest positive integer -for - which + t + by powering. So we find t as the smallest positive integer +for + which fl( beta**t + 1.0 ) = 1.0. */ @@ -342,73 +342,73 @@ L30: } /* dlamc1_ */ -/* Subroutine */ int dlamc2_(integer *beta, integer *t, logical *rnd, - doublereal *eps, integer *emin, doublereal *rmin, integer *emax, +/* Subroutine */ int dlamc2_(integer *beta, integer *t, logical *rnd, + doublereal *eps, integer *emin, doublereal *rmin, integer *emax, doublereal *rmax) { -/* -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 +/* -- LAPACK auxiliary routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + October 31, 1992 + + + Purpose + ======= + + DLAMC2 determines the machine parameters specified in its argument + list. + Arguments + ========= - Purpose - ======= + BETA (output) INTEGER + The base of the machine. - DLAMC2 determines the machine parameters specified in its argument - list. + T (output) INTEGER + The number of ( BETA ) digits in the mantissa. - Arguments - ========= + RND (output) LOGICAL + Specifies whether proper rounding ( RND = .TRUE. ) or + chopping ( RND = .FALSE. ) occurs in addition. This may not - BETA (output) INTEGER - The base of the machine. + be a reliable guide to the way in which the machine performs - T (output) INTEGER - The number of ( BETA ) digits in the mantissa. + its arithmetic. - RND (output) LOGICAL - Specifies whether proper rounding ( RND = .TRUE. ) or - chopping ( RND = .FALSE. ) occurs in addition. This may not - - be a reliable guide to the way in which the machine performs - - its arithmetic. + EPS (output) DOUBLE PRECISION + The smallest positive number such that - EPS (output) DOUBLE PRECISION - The smallest positive number such that + fl( 1.0 - EPS ) .LT. 1.0, - fl( 1.0 - EPS ) .LT. 1.0, + where fl denotes the computed value. - where fl denotes the computed value. + EMIN (output) INTEGER + The minimum exponent before (gradual) underflow occurs. - EMIN (output) INTEGER - The minimum exponent before (gradual) underflow occurs. + RMIN (output) DOUBLE PRECISION + The smallest normalized number for the machine, given by + BASE**( EMIN - 1 ), where BASE is the floating point value - RMIN (output) DOUBLE PRECISION - The smallest normalized number for the machine, given by - BASE**( EMIN - 1 ), where BASE is the floating point value - - of BETA. + of BETA. - EMAX (output) INTEGER - The maximum exponent before overflow occurs. + EMAX (output) INTEGER + The maximum exponent before overflow occurs. - RMAX (output) DOUBLE PRECISION - The largest positive number for the machine, given by - BASE**EMAX * ( 1 - EPS ), where BASE is the floating point - - value of BETA. + RMAX (output) DOUBLE PRECISION + The largest positive number for the machine, given by + BASE**EMAX * ( 1 - EPS ), where BASE is the floating point - Further Details - =============== + value of BETA. - The computation of EPS is based on a routine PARANOIA by - W. Kahan of the University of California at Berkeley. + Further Details + =============== - ===================================================================== + The computation of EPS is based on a routine PARANOIA by + W. Kahan of the University of California at Berkeley. + + ===================================================================== */ - + /* Initialized data */ static logical first = TRUE_; static logical iwarn = FALSE_; @@ -428,12 +428,12 @@ L30: static doublereal small; static integer gpmin; static doublereal third, lrmin, lrmax, sixth; - extern /* Subroutine */ int dlamc1_(integer *, integer *, logical *, + extern /* Subroutine */ int dlamc1_(integer *, integer *, logical *, logical *); extern doublereal dlamc3_(doublereal *, doublereal *); static logical lieee1; - extern /* Subroutine */ int dlamc4_(integer *, doublereal *, integer *), - dlamc5_(integer *, integer *, integer *, logical *, integer *, + extern /* Subroutine */ int dlamc4_(integer *, doublereal *, integer *), + dlamc5_(integer *, integer *, integer *, logical *, integer *, doublereal *); static integer lt, ngnmin, ngpmin; static doublereal one, two; @@ -447,16 +447,16 @@ L30: two = 2.; /* LBETA, LT, LRND, LEPS, LEMIN and LRMIN are the local values - of - BETA, T, RND, EPS, EMIN and RMIN. + of + BETA, T, RND, EPS, EMIN and RMIN. Throughout this routine we use the function DLAMC3 to ens -ure - that relevant values are stored and not held in registers, - or - are not affected by optimizers. +ure + that relevant values are stored and not held in registers, + or + are not affected by optimizers. - DLAMC1 returns the parameters LBETA, LT, LRND and LIEEE1. + DLAMC1 returns the parameters LBETA, LT, LRND and LIEEE1. */ dlamc1_(&lbeta, <, &lrnd, &lieee1); @@ -511,12 +511,12 @@ L10: leps = a; } -/* Computation of EPS complete. +/* Computation of EPS complete. Now find EMIN. Let A = + or - 1, and + or - (1 + BASE**(-3 -)). +)). Keep dividing A by BETA until (gradual) underflow occurs. T -his +his is detected when we cannot recover the previous A. */ rbase = one / lbeta; @@ -539,13 +539,13 @@ his if (ngpmin == gpmin) { lemin = ngpmin; /* ( Non twos-complement machines, no gradual under -flow; +flow; e.g., VAX ) */ } else if (gpmin - ngpmin == 3) { lemin = ngpmin - 1 + lt; ieee = TRUE_; /* ( Non twos-complement machines, with gradual und -erflow; +erflow; e.g., IEEE standard followers ) */ } else { lemin = min(ngpmin,gpmin); @@ -557,7 +557,7 @@ erflow; if ((i__1 = ngpmin - ngnmin, abs(i__1)) == 1) { lemin = max(ngpmin,ngnmin); /* ( Twos-complement machines, no gradual underflow -; +; e.g., CYBER 205 ) */ } else { lemin = min(ngpmin,ngnmin); @@ -570,7 +570,7 @@ erflow; if (gpmin - min(ngpmin,ngnmin) == 3) { lemin = max(ngpmin,ngnmin) - 1 + lt; /* ( Twos-complement machines with gradual underflo -w; +w; no known machine ) */ } else { lemin = min(ngpmin,ngnmin); @@ -585,33 +585,33 @@ w; /* ( A guess; no known machine ) */ iwarn = TRUE_; } -/* ** +/* ** Comment out this if block if EMIN is ok */ if (iwarn) { first = TRUE_; printf("\n\n WARNING. The value EMIN may be incorrect:- "); printf("EMIN = %8i\n",lemin); printf("If, after inspection, the value EMIN looks acceptable"); - printf("please comment out \n the IF block as marked within the"); - printf("code of routine DLAMC2, \n otherwise supply EMIN"); + printf("please comment out \n the IF block as marked within the"); + printf("code of routine DLAMC2, \n otherwise supply EMIN"); printf("explicitly.\n"); } -/* ** +/* ** Assume IEEE arithmetic if we found denormalised numbers abo -ve, +ve, or if arithmetic seems to round in the IEEE style, determi -ned +ned in routine DLAMC1. A true IEEE machine should have both thi -ngs +ngs true; however, faulty machines may have one or the other. */ ieee = ieee || lieee1; /* Compute RMIN by successive division by BETA. We could comp -ute +ute RMIN as BASE**( EMIN - 1 ), but some machines underflow dur -ing +ing this computation. */ lrmin = 1.; @@ -647,30 +647,30 @@ ing doublereal dlamc3_(doublereal *a, doublereal *b) { -/* -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 +/* -- LAPACK auxiliary routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + October 31, 1992 + + + Purpose + ======= + DLAMC3 is intended to force A and B to be stored prior to doing - Purpose - ======= + the addition of A and B , for use in situations where optimizers - DLAMC3 is intended to force A and B to be stored prior to doing - - the addition of A and B , for use in situations where optimizers - - might hold one of these in a register. + might hold one of these in a register. - Arguments - ========= + Arguments + ========= - A, B (input) DOUBLE PRECISION - The values A and B. + A, B (input) DOUBLE PRECISION + The values A and B. - ===================================================================== + ===================================================================== */ -/* >>Start of File<< +/* >>Start of File<< System generated locals */ volatile doublereal ret_val; @@ -688,33 +688,33 @@ doublereal dlamc3_(doublereal *a, doublereal *b) #ifndef HAVE_CONFIG /* Subroutine */ int dlamc4_(integer *emin, doublereal *start, integer *base) { -/* -- LAPACK auxiliary routine (version 2.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 +/* -- LAPACK auxiliary routine (version 2.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + October 31, 1992 - Purpose - ======= + Purpose + ======= - DLAMC4 is a service routine for DLAMC2. + DLAMC4 is a service routine for DLAMC2. - Arguments - ========= + Arguments + ========= - EMIN (output) EMIN - The minimum exponent before (gradual) underflow, computed by - - setting A = START and dividing by BASE until the previous A - can not be recovered. + EMIN (output) EMIN + The minimum exponent before (gradual) underflow, computed by - START (input) DOUBLE PRECISION - The starting point for determining EMIN. + setting A = START and dividing by BASE until the previous A + can not be recovered. - BASE (input) INTEGER - The base of the machine. + START (input) DOUBLE PRECISION + The starting point for determining EMIN. - ===================================================================== + BASE (input) INTEGER + The base of the machine. + + ===================================================================== */ /* System generated locals */ integer i__1; @@ -739,7 +739,7 @@ doublereal dlamc3_(doublereal *a, doublereal *b) c2 = a; d1 = a; d2 = a; -/* + WHILE( ( C1.EQ.A ).AND.( C2.EQ.A ).AND. +/* + WHILE( ( C1.EQ.A ).AND.( C2.EQ.A ).AND. $ ( D1.EQ.A ).AND.( D2.EQ.A ) )LOOP */ L10: if (c1 == a && c2 == a && d1 == a && d2 == a) { @@ -776,60 +776,60 @@ L10: } /* dlamc4_ */ -/* Subroutine */ int dlamc5_(integer *beta, integer *p, integer *emin, +/* Subroutine */ int dlamc5_(integer *beta, integer *p, integer *emin, logical *ieee, integer *emax, doublereal *rmax) { -/* -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 +/* -- LAPACK auxiliary routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + October 31, 1992 + + + Purpose + ======= + DLAMC5 attempts to compute RMAX, the largest machine floating-point + number, without overflow. It assumes that EMAX + abs(EMIN) sum + approximately to a power of 2. It will fail on machines where this + assumption does not hold, for example, the Cyber 205 (EMIN = -28625, - Purpose - ======= + EMAX = 28718). It will also fail if the value supplied for EMIN is + too large (i.e. too close to zero), probably with overflow. - DLAMC5 attempts to compute RMAX, the largest machine floating-point - number, without overflow. It assumes that EMAX + abs(EMIN) sum - approximately to a power of 2. It will fail on machines where this - assumption does not hold, for example, the Cyber 205 (EMIN = -28625, - - EMAX = 28718). It will also fail if the value supplied for EMIN is - too large (i.e. too close to zero), probably with overflow. + Arguments + ========= - Arguments - ========= + BETA (input) INTEGER + The base of floating-point arithmetic. - BETA (input) INTEGER - The base of floating-point arithmetic. + P (input) INTEGER + The number of base BETA digits in the mantissa of a + floating-point value. - P (input) INTEGER - The number of base BETA digits in the mantissa of a - floating-point value. + EMIN (input) INTEGER + The minimum exponent before (gradual) underflow. - EMIN (input) INTEGER - The minimum exponent before (gradual) underflow. + IEEE (input) LOGICAL + A logical flag specifying whether or not the arithmetic + system is thought to comply with the IEEE standard. - IEEE (input) LOGICAL - A logical flag specifying whether or not the arithmetic - system is thought to comply with the IEEE standard. + EMAX (output) INTEGER + The largest exponent before overflow - EMAX (output) INTEGER - The largest exponent before overflow + RMAX (output) DOUBLE PRECISION + The largest machine floating-point number. - RMAX (output) DOUBLE PRECISION - The largest machine floating-point number. + ===================================================================== - ===================================================================== - - First compute LEXP and UEXP, two powers of 2 that bound - abs(EMIN). We then assume that EMAX + abs(EMIN) will sum - approximately to the bound that is closest to abs(EMIN). + First compute LEXP and UEXP, two powers of 2 that bound + abs(EMIN). We then assume that EMAX + abs(EMIN) will sum + approximately to the bound that is closest to abs(EMIN). (EMAX is the exponent of the required number RMAX). */ /* Table of constant values */ static doublereal c_b5 = 0.; - + /* System generated locals */ integer i__1; doublereal d__1; @@ -861,8 +861,8 @@ L10: ++exbits; } -/* Now -LEXP is less than or equal to EMIN, and -UEXP is greater - than or equal to EMIN. EXBITS is the number of bits needed to +/* Now -LEXP is less than or equal to EMIN, and -UEXP is greater + than or equal to EMIN. EXBITS is the number of bits needed to store the exponent. */ if (uexp + *emin > -lexp - *emin) { @@ -871,32 +871,32 @@ L10: expsum = uexp << 1; } -/* EXPSUM is the exponent range, approximately equal to +/* EXPSUM is the exponent range, approximately equal to EMAX - EMIN + 1 . */ *emax = expsum + *emin - 1; nbits = exbits + 1 + *p; -/* NBITS is the total number of bits needed to store a +/* NBITS is the total number of bits needed to store a floating-point number. */ if (nbits % 2 == 1 && *beta == 2) { -/* Either there are an odd number of bits used to store a - floating-point number, which is unlikely, or some bits are - +/* Either there are an odd number of bits used to store a + floating-point number, which is unlikely, or some bits are + not used in the representation of numbers, which is possible -, - (e.g. Cray machines) or the mantissa has an implicit bit, +, + (e.g. Cray machines) or the mantissa has an implicit bit, (e.g. IEEE machines, Dec Vax machines), which is perhaps the - - most likely. We have to assume the last alternative. - If this is true, then we need to reduce EMAX by one because - + + most likely. We have to assume the last alternative. + If this is true, then we need to reduce EMAX by one because + there must be some way of representing zero in an implicit-b -it - system. On machines like Cray, we are reducing EMAX by one - +it + system. On machines like Cray, we are reducing EMAX by one + unnecessarily. */ --(*emax); @@ -905,16 +905,16 @@ it if (*ieee) { /* Assume we are on an IEEE machine which reserves one exponent - + for infinity and NaN. */ --(*emax); } -/* Now create RMAX, the largest machine number, which should - be equal to (1.0 - BETA**(-P)) * BETA**EMAX . +/* Now create RMAX, the largest machine number, which should + be equal to (1.0 - BETA**(-P)) * BETA**EMAX . - First compute 1.0 - BETA**(-P), being careful that the + First compute 1.0 - BETA**(-P), being careful that the result is less than 1.0 . */ recbas = 1. / *beta; diff --git a/numpy/linalg/lapack_lite/dlapack_lite.c b/numpy/linalg/lapack_lite/dlapack_lite.c index 6a36fe6a8..6b65397bd 100644 --- a/numpy/linalg/lapack_lite/dlapack_lite.c +++ b/numpy/linalg/lapack_lite/dlapack_lite.c @@ -100830,4 +100830,3 @@ L250: /* End of STRTRI */ } /* strtri_ */ - diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py index 6aa5c3e80..e2cb879cf 100755 --- a/numpy/linalg/lapack_lite/make_lite.py +++ b/numpy/linalg/lapack_lite/make_lite.py @@ -151,7 +151,7 @@ class LapackLibrary(FortranLibrary): return routine def allRoutinesByType(self, typename): - routines = sorted((r.name,r) for r in self.allRoutines() if r.type == typename) + routines = sorted((r.name, r) for r in self.allRoutines() if r.type == typename) return [a[1] for a in routines] def printRoutineNames(desc, routines): diff --git a/numpy/linalg/lapack_lite/zlapack_lite.c b/numpy/linalg/lapack_lite/zlapack_lite.c index 1b43c6270..e6b03429b 100644 --- a/numpy/linalg/lapack_lite/zlapack_lite.c +++ b/numpy/linalg/lapack_lite/zlapack_lite.c @@ -27003,4 +27003,3 @@ L130: /* End of ZUNMTR */ } /* zunmtr_ */ - diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 851db6e8c..78e487a25 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -13,7 +13,7 @@ from __future__ import division, absolute_import, print_function __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', - 'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank', + 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', 'LinAlgError'] import warnings @@ -271,7 +271,7 @@ def tensorsolve(a, b, axes=None): True """ - a,wrap = _makearray(a) + a, wrap = _makearray(a) b = asarray(b) an = a.ndim @@ -766,7 +766,7 @@ def qr(a, mode='reduced'): # handle modes that don't return q if mode == 'r': - r = _fastCopyAndTranspose(result_t, a[:,:mn]) + r = _fastCopyAndTranspose(result_t, a[:, :mn]) return wrap(triu(r)) if mode == 'raw': @@ -808,7 +808,7 @@ def qr(a, mode='reduced'): raise LinAlgError('%s returns %d' % (routine_name, results['info'])) q = _fastCopyAndTranspose(result_t, q[:mc]) - r = _fastCopyAndTranspose(result_t, a[:,:mc]) + r = _fastCopyAndTranspose(result_t, a[:, :mc]) return wrap(q), wrap(triu(r)) @@ -1415,10 +1415,10 @@ def cond(x, p=None): """ x = asarray(x) # in case we have a matrix if p is None: - s = svd(x,compute_uv=False) + s = svd(x, compute_uv=False) return s[0]/s[-1] else: - return norm(x,p)*norm(inv(x),p) + return norm(x, p)*norm(inv(x), p) def matrix_rank(M, tol=None): @@ -1585,7 +1585,7 @@ def pinv(a, rcond=1e-15 ): s[i] = 1./s[i] else: s[i] = 0.; - res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u))) + res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u))) return wrap(res) # Determinant @@ -1824,7 +1824,7 @@ def lstsq(a, b, rcond=-1): result_real_t = _realType(result_t) real_t = _linalgRealType(t) bstar = zeros((ldb, n_rhs), t) - bstar[:b.shape[0],:n_rhs] = b.copy() + bstar[:b.shape[0], :n_rhs] = b.copy() a, bstar = _fastCopyAndTranspose(t, a, bstar) a, bstar = _to_native_byte_order(a, bstar) s = zeros((min(m, n),), real_t) diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py index 1c73c86d3..282c3423c 100644 --- a/numpy/linalg/setup.py +++ b/numpy/linalg/setup.py @@ -6,7 +6,7 @@ import sys def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info - config = Configuration('linalg',parent_package,top_path) + config = Configuration('linalg', parent_package, top_path) config.add_data_dir('tests') @@ -15,15 +15,15 @@ def configuration(parent_package='',top_path=None): src_dir = 'lapack_lite' lapack_lite_src = [ os.path.join(src_dir, 'python_xerbla.c'), - os.path.join(src_dir, 'zlapack_lite.c'), + os.path.join(src_dir, 'zlapack_lite.c'), os.path.join(src_dir, 'dlapack_lite.c'), - os.path.join(src_dir, 'blas_lite.c'), + os.path.join(src_dir, 'blas_lite.c'), os.path.join(src_dir, 'dlamch.c'), os.path.join(src_dir, 'f2c_lite.c'), os.path.join(src_dir, 'f2c.h'), ] - lapack_info = get_info('lapack_opt',0) # and {} + lapack_info = get_info('lapack_opt', 0) # and {} def get_lapack_lite_sources(ext, build_dir): if not lapack_info: print("### Warning: Using unoptimized lapack ###") diff --git a/numpy/linalg/tests/test_gufuncs_linalg.py b/numpy/linalg/tests/test_gufuncs_linalg.py index d3299b7b5..40f8c4058 100644 --- a/numpy/linalg/tests/test_gufuncs_linalg.py +++ b/numpy/linalg/tests/test_gufuncs_linalg.py @@ -71,7 +71,7 @@ def assert_almost_equal(a, b, **kw): def assert_valid_eigen_no_broadcast(M, w, v, **kw): - lhs = gula.matrix_multiply(M,v) + lhs = gula.matrix_multiply(M, v) rhs = w*v assert_almost_equal(lhs, rhs, **kw) @@ -127,39 +127,39 @@ def assert_valid_eigenvals(M, w, **kw): class MatrixGenerator(object): def real_matrices(self): - a = [[1,2], - [3,4]] + a = [[1, 2], + [3, 4]] - b = [[4,3], - [2,1]] + b = [[4, 3], + [2, 1]] return a, b - + def real_symmetric_matrices(self): - a = [[ 2 ,-1], - [-1 , 2]] + a = [[ 2, -1], + [-1, 2]] - b = [[4,3], - [2,1]] + b = [[4, 3], + [2, 1]] return a, b def complex_matrices(self): - a = [[1+2j,2+3j], - [3+4j,4+5j]] + a = [[1+2j, 2+3j], + [3+4j, 4+5j]] - b = [[4+3j,3+2j], - [2+1j,1+0j]] + b = [[4+3j, 3+2j], + [2+1j, 1+0j]] return a, b def complex_hermitian_matrices(self): - a = [[2,-1], + a = [[2, -1], [-1, 2]] - b = [[4+3j,3+2j], - [2-1j,1+0j]] - + b = [[4+3j, 3+2j], + [2-1j, 1+0j]] + return a, b def real_matrices_vector(self): @@ -181,7 +181,7 @@ class MatrixGenerator(object): class GeneralTestCase(MatrixGenerator): def test_single(self): - a,b = self.real_matrices() + a, b = self.real_matrices() self.do(array(a, dtype=single), array(b, dtype=single)) @@ -201,7 +201,7 @@ class GeneralTestCase(MatrixGenerator): array(b, dtype=cdouble)) def test_vector_single(self): - a,b = self.real_matrices_vector() + a, b = self.real_matrices_vector() self.do(array(a, dtype=single), array(b, dtype=single)) @@ -223,7 +223,7 @@ class GeneralTestCase(MatrixGenerator): class HermitianTestCase(MatrixGenerator): def test_single(self): - a,b = self.real_symmetric_matrices() + a, b = self.real_symmetric_matrices() self.do(array(a, dtype=single), array(b, dtype=single)) @@ -243,7 +243,7 @@ class HermitianTestCase(MatrixGenerator): array(b, dtype=cdouble)) def test_vector_single(self): - a,b = self.real_symmetric_matrices_vector() + a, b = self.real_symmetric_matrices_vector() self.do(array(a, dtype=single), array(b, dtype=single)) @@ -265,17 +265,17 @@ class HermitianTestCase(MatrixGenerator): class TestMatrixMultiply(GeneralTestCase): def do(self, a, b): - res = gula.matrix_multiply(a,b) + res = gula.matrix_multiply(a, b) if a.ndim == 2: - assert_almost_equal(res, np.dot(a,b)) + assert_almost_equal(res, np.dot(a, b)) else: - assert_almost_equal(res[0], np.dot(a[0],b[0])) + assert_almost_equal(res[0], np.dot(a[0], b[0])) def test_column_matrix(self): - A = np.arange(2*2).reshape((2,2)) - B = np.arange(2*1).reshape((2,1)) - res = gula.matrix_multiply(A,B) - assert_almost_equal(res, np.dot(A,B)) + A = np.arange(2*2).reshape((2, 2)) + B = np.arange(2*1).reshape((2, 1)) + res = gula.matrix_multiply(A, B) + assert_almost_equal(res, np.dot(A, B)) class TestInv(GeneralTestCase, TestCase): def do(self, a, b): @@ -291,7 +291,7 @@ class TestPoinv(HermitianTestCase, TestCase): a_inv = gula.poinv(a) ident = identity(a.shape[-1]) if 3 == len(a.shape): - ident = ident.reshape((1,ident.shape[0], ident.shape[1])) + ident = ident.reshape((1, ident.shape[0], ident.shape[1])) assert_almost_equal(a_inv, gula.inv(a)) assert_almost_equal(gula.matrix_multiply(a, a_inv), ident) @@ -329,15 +329,15 @@ class TestDet(GeneralTestCase, TestCase): assert_equal(gula.slogdet(array([[0.0]], dtype=cdouble)), (0.0, -inf)) def test_types(self): - for typ in [(single, single), - (double, double), + for typ in [(single, single), + (double, double), (csingle, single), (cdouble, double)]: for x in [ [0], [[0]], [[[0]]] ]: assert_equal(gula.det(array(x, dtype=typ[0])).dtype, typ[0]) assert_equal(gula.slogdet(array(x, dtype=typ[0]))[0].dtype, typ[0]) assert_equal(gula.slogdet(array(x, dtype=typ[0]))[1].dtype, typ[1]) - + class TestEig(GeneralTestCase, TestCase): def do(self, a, b): @@ -369,15 +369,15 @@ class TestEigh(HermitianTestCase, TestCase): assert_almost_equal(ev_up, evalues_up) -class TestSolve(GeneralTestCase,TestCase): +class TestSolve(GeneralTestCase, TestCase): def do(self, a, b): - x = gula.solve(a,b) - assert_almost_equal(b, gula.matrix_multiply(a,x)) + x = gula.solve(a, b) + assert_almost_equal(b, gula.matrix_multiply(a, x)) class TestChosolve(HermitianTestCase, TestCase): def do(self, a, b): - """ + """ inner1d not defined for complex types. todo: implement alternative test """ @@ -389,8 +389,8 @@ class TestChosolve(HermitianTestCase, TestCase): assert_almost_equal(x_lo, x_up) # inner1d not defined for complex types # todo: implement alternative test - assert_almost_equal(b, gula.matrix_multiply(a,x_lo)) - assert_almost_equal(b, gula.matrix_multiply(a,x_up)) + assert_almost_equal(b, gula.matrix_multiply(a, x_lo)) + assert_almost_equal(b, gula.matrix_multiply(a, x_up)) class TestSVD(GeneralTestCase, TestCase): @@ -417,7 +417,7 @@ class TestCholesky(HermitianTestCase, TestCase): # - multiply4_add class UfuncTestCase(object): - parameter = range(0,10) + parameter = range(0, 10) def _check_for_type(self, typ): a = np.array(self.__class__.parameter, dtype=typ) @@ -427,7 +427,7 @@ class UfuncTestCase(object): parameter = self.__class__.parameter a = np.array([parameter, parameter], dtype=typ) self.do(a) - + def test_single(self): self._check_for_type(single) @@ -455,43 +455,43 @@ class UfuncTestCase(object): class TestAdd3(UfuncTestCase, TestCase): def do(self, a): - r = gula.add3(a,a,a) + r = gula.add3(a, a, a) assert_almost_equal(r, a+a+a) class TestMultiply3(UfuncTestCase, TestCase): def do(self, a): - r = gula.multiply3(a,a,a) + r = gula.multiply3(a, a, a) assert_almost_equal(r, a*a*a) class TestMultiply3Add(UfuncTestCase, TestCase): def do(self, a): - r = gula.multiply3_add(a,a,a,a) + r = gula.multiply3_add(a, a, a, a) assert_almost_equal(r, a*a*a+a) class TestMultiplyAdd(UfuncTestCase, TestCase): def do(self, a): - r = gula.multiply_add(a,a,a) + r = gula.multiply_add(a, a, a) assert_almost_equal(r, a*a+a) class TestMultiplyAdd2(UfuncTestCase, TestCase): def do(self, a): - r = gula.multiply_add2(a,a,a,a) + r = gula.multiply_add2(a, a, a, a) assert_almost_equal(r, a*a+a+a) class TestMultiply4(UfuncTestCase, TestCase): def do(self, a): - r = gula.multiply4(a,a,a,a) + r = gula.multiply4(a, a, a, a) assert_almost_equal(r, a*a*a*a) class TestMultiply4_add(UfuncTestCase, TestCase): def do(self, a): - r = gula.multiply4_add(a,a,a,a,a) + r = gula.multiply4_add(a, a, a, a, a) assert_almost_equal(r, a*a*a*a+a) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 7f102634e..2dd270521 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -39,32 +39,32 @@ def get_complex_dtype(dtype): class LinalgTestCase(object): def test_single(self): - a = array([[1.,2.], [3.,4.]], dtype=single) + a = array([[1., 2.], [3., 4.]], dtype=single) b = array([2., 1.], dtype=single) self.do(a, b) def test_double(self): - a = array([[1.,2.], [3.,4.]], dtype=double) + a = array([[1., 2.], [3., 4.]], dtype=double) b = array([2., 1.], dtype=double) self.do(a, b) def test_double_2(self): - a = array([[1.,2.], [3.,4.]], dtype=double) + a = array([[1., 2.], [3., 4.]], dtype=double) b = array([[2., 1., 4.], [3., 4., 6.]], dtype=double) self.do(a, b) def test_csingle(self): - a = array([[1.+2j,2+3j], [3+4j,4+5j]], dtype=csingle) + a = array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=csingle) b = array([2.+1j, 1.+2j], dtype=csingle) self.do(a, b) def test_cdouble(self): - a = array([[1.+2j,2+3j], [3+4j,4+5j]], dtype=cdouble) + a = array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble) b = array([2.+1j, 1.+2j], dtype=cdouble) self.do(a, b) def test_cdouble_2(self): - a = array([[1.+2j,2+3j], [3+4j,4+5j]], dtype=cdouble) + a = array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble) b = array([[2.+1j, 1.+2j, 1+3j], [1-2j, 1-3j, 1-6j]], dtype=cdouble) self.do(a, b) @@ -78,71 +78,71 @@ class LinalgTestCase(object): pass def test_nonarray(self): - a = [[1,2], [3,4]] + a = [[1, 2], [3, 4]] b = [2, 1] - self.do(a,b) + self.do(a, b) def test_matrix_b_only(self): """Check that matrix type is preserved.""" - a = array([[1.,2.], [3.,4.]]) + a = array([[1., 2.], [3., 4.]]) b = matrix([2., 1.]).T self.do(a, b) def test_matrix_a_and_b(self): """Check that matrix type is preserved.""" - a = matrix([[1.,2.], [3.,4.]]) + a = matrix([[1., 2.], [3., 4.]]) b = matrix([2., 1.]).T self.do(a, b) class LinalgNonsquareTestCase(object): def test_single_nsq_1(self): - a = array([[1.,2.,3.], [3.,4.,6.]], dtype=single) + a = array([[1., 2., 3.], [3., 4., 6.]], dtype=single) b = array([2., 1.], dtype=single) self.do(a, b) def test_single_nsq_2(self): - a = array([[1.,2.], [3.,4.], [5.,6.]], dtype=single) + a = array([[1., 2.], [3., 4.], [5., 6.]], dtype=single) b = array([2., 1., 3.], dtype=single) self.do(a, b) def test_double_nsq_1(self): - a = array([[1.,2.,3.], [3.,4.,6.]], dtype=double) + a = array([[1., 2., 3.], [3., 4., 6.]], dtype=double) b = array([2., 1.], dtype=double) self.do(a, b) def test_double_nsq_2(self): - a = array([[1.,2.], [3.,4.], [5.,6.]], dtype=double) + a = array([[1., 2.], [3., 4.], [5., 6.]], dtype=double) b = array([2., 1., 3.], dtype=double) self.do(a, b) def test_csingle_nsq_1(self): - a = array([[1.+1j,2.+2j,3.-3j], [3.-5j,4.+9j,6.+2j]], dtype=csingle) + a = array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=csingle) b = array([2.+1j, 1.+2j], dtype=csingle) self.do(a, b) def test_csingle_nsq_2(self): - a = array([[1.+1j,2.+2j], [3.-3j,4.-9j], [5.-4j,6.+8j]], dtype=csingle) + a = array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=csingle) b = array([2.+1j, 1.+2j, 3.-3j], dtype=csingle) self.do(a, b) def test_cdouble_nsq_1(self): - a = array([[1.+1j,2.+2j,3.-3j], [3.-5j,4.+9j,6.+2j]], dtype=cdouble) + a = array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble) b = array([2.+1j, 1.+2j], dtype=cdouble) self.do(a, b) def test_cdouble_nsq_2(self): - a = array([[1.+1j,2.+2j], [3.-3j,4.-9j], [5.-4j,6.+8j]], dtype=cdouble) + a = array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble) b = array([2.+1j, 1.+2j, 3.-3j], dtype=cdouble) self.do(a, b) def test_cdouble_nsq_1_2(self): - a = array([[1.+1j,2.+2j,3.-3j], [3.-5j,4.+9j,6.+2j]], dtype=cdouble) + a = array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble) b = array([[2.+1j, 1.+2j], [1-1j, 2-2j]], dtype=cdouble) self.do(a, b) def test_cdouble_nsq_2_2(self): - a = array([[1.+1j,2.+2j], [3.-3j,4.-9j], [5.-4j,6.+8j]], dtype=cdouble) + a = array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble) b = array([[2.+1j, 1.+2j], [1-1j, 2-2j], [1-1j, 2-2j]], dtype=cdouble) self.do(a, b) @@ -211,14 +211,14 @@ class TestSolve(LinalgTestCase, LinalgGeneralizedTestCase, TestCase): a = np.arange(8).reshape(2, 2, 2) b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) - expected = linalg.solve(a, b)[:,0:0,:] - result = linalg.solve(a[:,0:0,0:0], b[:,0:0,:]) + expected = linalg.solve(a, b)[:, 0:0,:] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0,:]) assert_array_equal(result, expected) assert_(isinstance(result, ArraySubclass)) # Test errors for non-square and only b's dimension being 0 - assert_raises(linalg.LinAlgError, linalg.solve, a[:,0:0,0:1], b) - assert_raises(ValueError, linalg.solve, a, b[:,0:0,:]) + assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) + assert_raises(ValueError, linalg.solve, a, b[:, 0:0,:]) # Test broadcasting error b = np.arange(6).reshape(1, 3, 2) # broadcasting error @@ -227,15 +227,15 @@ class TestSolve(LinalgTestCase, LinalgGeneralizedTestCase, TestCase): # Test zero "single equations" with 0x0 matrices. b = np.arange(2).reshape(1, 2).view(ArraySubclass) - expected = linalg.solve(a, b)[:,0:0] - result = linalg.solve(a[:,0:0,0:0], b[:,0:0]) + expected = linalg.solve(a, b)[:, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0]) assert_array_equal(result, expected) assert_(isinstance(result, ArraySubclass)) b = np.arange(3).reshape(1, 3) assert_raises(ValueError, linalg.solve, a, b) assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) - assert_raises(ValueError, linalg.solve, a[:,0:0,0:0], b) + assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) class TestInv(LinalgTestCase, LinalgGeneralizedTestCase, TestCase): @@ -256,13 +256,13 @@ class TestInv(LinalgTestCase, LinalgGeneralizedTestCase, TestCase): # Check that all kinds of 0-sized arrays work class ArraySubclass(np.ndarray): pass - a = np.zeros((0,1,1), dtype=np.int_).view(ArraySubclass) + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) res = linalg.inv(a) assert_(res.dtype.type is np.float64) assert_equal(a.shape, res.shape) assert_(isinstance(a, ArraySubclass)) - a = np.zeros((0,0), dtype=np.complex64).view(ArraySubclass) + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) res = linalg.inv(a) assert_(res.dtype.type is np.complex64) assert_equal(a.shape, res.shape) @@ -288,7 +288,7 @@ class TestEig(LinalgTestCase, LinalgGeneralizedTestCase, TestCase): def do(self, a, b): evalues, evectors = linalg.eig(a) if evectors.ndim == 3: - assert_almost_equal(dot_generalized(a, evectors), evectors * evalues[:,None,:]) + assert_almost_equal(dot_generalized(a, evectors), evectors * evalues[:, None,:]) else: assert_almost_equal(dot(a, evectors), multiply(evectors, evalues)) assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix))) @@ -313,7 +313,7 @@ class TestSVD(LinalgTestCase, LinalgGeneralizedTestCase, TestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) if u.ndim == 3: - assert_almost_equal(a, dot_generalized(u * s[:,None,:], vt)) + assert_almost_equal(a, dot_generalized(u * s[:, None,:], vt)) else: assert_almost_equal(a, dot(multiply(u, s), vt)) assert_(imply(isinstance(a, matrix), isinstance(u, matrix))) @@ -344,13 +344,13 @@ class TestCond2(LinalgTestCase, TestCase): def do(self, a, b): c = asarray(a) # a might be a matrix s = linalg.svd(c, compute_uv=False) - old_assert_almost_equal(s[0]/s[-1], linalg.cond(a,2), decimal=5) + old_assert_almost_equal(s[0]/s[-1], linalg.cond(a, 2), decimal=5) class TestCondInf(TestCase): def test(self): - A = array([[1.,0,0],[0,-2.,0],[0,0,3.]]) - assert_almost_equal(linalg.cond(A,inf),3.) + A = array([[1., 0, 0], [0, -2., 0], [0, 0, 3.]]) + assert_almost_equal(linalg.cond(A, inf), 3.) class TestPinv(LinalgTestCase, TestCase): @@ -426,10 +426,10 @@ class TestLstsq(LinalgTestCase, LinalgNonsquareTestCase, TestCase): class TestMatrixPower(object): - R90 = array([[0,1],[-1,0]]) - Arb22 = array([[4,-7],[-2,10]]) - noninv = array([[1,0],[0,0]]) - arbfloat = array([[0.1,3.2],[1.2,0.7]]) + R90 = array([[0, 1], [-1, 0]]) + Arb22 = array([[4, -7], [-2, 10]]) + noninv = array([[1, 0], [0, 0]]) + arbfloat = array([[0.1, 3.2], [1.2, 0.7]]) large = identity(10) t = large[1,:].copy() @@ -437,14 +437,14 @@ class TestMatrixPower(object): large[0,:] = t def test_large_power(self): - assert_equal(matrix_power(self.R90,2**100+2**10+2**5+1),self.R90) + assert_equal(matrix_power(self.R90, 2**100+2**10+2**5+1), self.R90) def test_large_power_trailing_zero(self): - assert_equal(matrix_power(self.R90,2**100+2**10+2**5),identity(2)) + assert_equal(matrix_power(self.R90, 2**100+2**10+2**5), identity(2)) def testip_zero(self): def tz(M): - mz = matrix_power(M,0) + mz = matrix_power(M, 0) assert_equal(mz, identity(M.shape[0])) assert_equal(mz.dtype, M.dtype) for M in [self.Arb22, self.arbfloat, self.large]: @@ -452,7 +452,7 @@ class TestMatrixPower(object): def testip_one(self): def tz(M): - mz = matrix_power(M,1) + mz = matrix_power(M, 1) assert_equal(mz, M) assert_equal(mz.dtype, M.dtype) for M in [self.Arb22, self.arbfloat, self.large]: @@ -460,46 +460,46 @@ class TestMatrixPower(object): def testip_two(self): def tz(M): - mz = matrix_power(M,2) - assert_equal(mz, dot(M,M)) + mz = matrix_power(M, 2) + assert_equal(mz, dot(M, M)) assert_equal(mz.dtype, M.dtype) for M in [self.Arb22, self.arbfloat, self.large]: yield tz, M def testip_invert(self): def tz(M): - mz = matrix_power(M,-1) - assert_almost_equal(identity(M.shape[0]), dot(mz,M)) + mz = matrix_power(M, -1) + assert_almost_equal(identity(M.shape[0]), dot(mz, M)) for M in [self.R90, self.Arb22, self.arbfloat, self.large]: yield tz, M def test_invert_noninvertible(self): import numpy.linalg assert_raises(numpy.linalg.linalg.LinAlgError, - lambda: matrix_power(self.noninv,-1)) + lambda: matrix_power(self.noninv, -1)) class TestBoolPower(TestCase): def test_square(self): - A = array([[True,False],[True,True]]) - assert_equal(matrix_power(A,2),A) + A = array([[True, False], [True, True]]) + assert_equal(matrix_power(A, 2), A) class HermitianTestCase(object): def test_single(self): - a = array([[1.,2.], [2.,1.]], dtype=single) + a = array([[1., 2.], [2., 1.]], dtype=single) self.do(a, None) def test_double(self): - a = array([[1.,2.], [2.,1.]], dtype=double) + a = array([[1., 2.], [2., 1.]], dtype=double) self.do(a, None) def test_csingle(self): - a = array([[1.,2+3j], [2-3j,1]], dtype=csingle) + a = array([[1., 2+3j], [2-3j, 1]], dtype=csingle) self.do(a, None) def test_cdouble(self): - a = array([[1.,2+3j], [2-3j,1]], dtype=cdouble) + a = array([[1., 2+3j], [2-3j, 1]], dtype=cdouble) self.do(a, None) def test_empty(self): @@ -507,17 +507,17 @@ class HermitianTestCase(object): assert_raises(linalg.LinAlgError, self.do, a, None) def test_nonarray(self): - a = [[1,2], [2,1]] + a = [[1, 2], [2, 1]] self.do(a, None) def test_matrix_b_only(self): """Check that matrix type is preserved.""" - a = array([[1.,2.], [2.,1.]]) + a = array([[1., 2.], [2., 1.]]) self.do(a, None) def test_matrix_a_and_b(self): """Check that matrix type is preserved.""" - a = matrix([[1.,2.], [2.,1.]]) + a = matrix([[1., 2.], [2., 1.]]) self.do(a, None) @@ -623,7 +623,7 @@ class _TestNorm(TestCase): # or column separately. A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: - expected0 = [norm(A[:,k], ord=order) for k in range(A.shape[1])] + expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] assert_almost_equal(norm(A, ord=order, axis=0), expected0) expected1 = [norm(A[k,:], ord=order) for k in range(A.shape[0])] assert_almost_equal(norm(A, ord=order, axis=1), expected1) @@ -644,11 +644,11 @@ class _TestNorm(TestCase): assert_almost_equal(n, expected) n = norm(B, ord=order, axis=(0, 2)) - expected = [norm(B[:,k,:], ord=order) for k in range(B.shape[1])] + expected = [norm(B[:, k,:], ord=order) for k in range(B.shape[1])] assert_almost_equal(n, expected) n = norm(B, ord=order, axis=(0, 1)) - expected = [norm(B[:,:,k], ord=order) for k in range(B.shape[2])] + expected = [norm(B[:,:, k], ord=order) for k in range(B.shape[2])] assert_almost_equal(n, expected) def test_bad_args(self): @@ -696,17 +696,17 @@ class TestMatrixRank(object): # Full rank matrix yield assert_equal, 4, matrix_rank(np.eye(4)) # rank deficient matrix - I=np.eye(4); I[-1,-1] = 0. + I=np.eye(4); I[-1, -1] = 0. yield assert_equal, matrix_rank(I), 3 # All zeros - zero rank - yield assert_equal, matrix_rank(np.zeros((4,4))), 0 + yield assert_equal, matrix_rank(np.zeros((4, 4))), 0 # 1 dimension - rank 1 unless all 0 yield assert_equal, matrix_rank([1, 0, 0, 0]), 1 yield assert_equal, matrix_rank(np.zeros((4,))), 0 # accepts array-like yield assert_equal, matrix_rank([1]), 1 # greater than 2 dimensions raises error - yield assert_raises, TypeError, matrix_rank, np.zeros((2,2,2)) + yield assert_raises, TypeError, matrix_rank, np.zeros((2, 2, 2)) # works on scalar yield assert_equal, matrix_rank(1), 1 @@ -769,7 +769,7 @@ class TestQR(TestCase): def test_qr_empty(self): - a = np.zeros((0,2)) + a = np.zeros((0, 2)) self.assertRaises(linalg.LinAlgError, linalg.qr, a) @@ -864,7 +864,7 @@ def test_generalized_raise_multiloop(): x = np.zeros([4, 4, 2, 2])[1::2] x[...] = invertible - x[0,0] = non_invertible + x[0, 0] = non_invertible assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index 76fe4be10..4ff14a6a5 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -20,13 +20,13 @@ class TestRegression(TestCase): -6.57612485e-01+10.41755503j, -6.57612485e-01-10.41755503j, 1.82126812e+01 +0.j, - 1.06011014e+01 +0.j , - 7.80732773e+00 +0.j , + 1.06011014e+01 +0.j, + 7.80732773e+00 +0.j, -7.65390898e-01 +0.j, - 1.51971555e-15 +0.j , + 1.51971555e-15 +0.j, -1.51308713e-15 +0.j]) a = arange(13*13, dtype = float64) - a.shape = (13,13) + a.shape = (13, 13) a = a%17 va, ve = linalg.eig(a) va.sort() diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src index c5303a077..5e75a6419 100644 --- a/numpy/linalg/umath_linalg.c.src +++ b/numpy/linalg/umath_linalg.c.src @@ -1056,7 +1056,7 @@ static void #TYPE=FLOAT,DOUBLE,CFLOAT,CDOUBLE# #typ=npy_float,npy_double,COMPLEX_t,DOUBLECOMPLEX_t# #add=FLOAT_add,DOUBLE_add,CFLOAT_add,CDOUBLE_add# - #mul=FLOAT_mul,DOUBLE_mul,CFLOAT_mul,CDOUBLE_mul# + #mul=FLOAT_mul,DOUBLE_mul,CFLOAT_mul,CDOUBLE_mul# #zero=s_zero, d_zero,c_zero,z_zero# */ @@ -2483,7 +2483,7 @@ init_@lapack_func@(GEEV_PARAMS_t* params, @typ@ work_size_query; fortran_int do_size_query = -1; fortran_int rv; - size_t total_size = a_size + w_size + vl_size + vr_size + rwork_size; + size_t total_size = a_size + w_size + vl_size + vr_size + rwork_size; mem_buff = malloc(total_size); if (!mem_buff) @@ -3860,7 +3860,7 @@ static char eigh_types[] = { static char eighvals_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, - NPY_CFLOAT, NPY_FLOAT, + NPY_CFLOAT, NPY_FLOAT, NPY_CDOUBLE, NPY_DOUBLE }; @@ -3941,7 +3941,7 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { }, { "slogdet", - "(m,m)->(),()", + "(m,m)->(),()", "slogdet on the last two dimensions and broadcast on the rest. \n"\ "Results in two arrays, one with sign and the other with log of the"\ " determinants. \n"\ diff --git a/numpy/ma/API_CHANGES.txt b/numpy/ma/API_CHANGES.txt index 7452eb5fa..a3d792a1f 100644 --- a/numpy/ma/API_CHANGES.txt +++ b/numpy/ma/API_CHANGES.txt @@ -61,7 +61,7 @@ Previously, ``putmask`` was used like this:: putmask(x,mask,[3]) which translated to:: - + x[~mask] = [3] (Note that a ``True``-value in a mask suppresses a value.) @@ -133,4 +133,3 @@ New features (non exhaustive list) -------- The ``anom`` method returns the deviations from the average (anomalies). - diff --git a/numpy/ma/README.txt b/numpy/ma/README.txt index e25c26485..a725b54c3 100644 --- a/numpy/ma/README.txt +++ b/numpy/ma/README.txt @@ -234,7 +234,3 @@ Revision notes * 08/25/2007 : Creation of this page * 01/23/2007 : The package has been moved to the SciPy sandbox, and is regularly updated: please check out your SVN version! - - - - diff --git a/numpy/ma/bench.py b/numpy/ma/bench.py index b8d085130..75e6d90c8 100644 --- a/numpy/ma/bench.py +++ b/numpy/ma/bench.py @@ -17,8 +17,8 @@ import numpy #####--------------------------------------------------------------------------- # Small arrays .................................. -xs = numpy.random.uniform(-1,1,6).reshape(2,3) -ys = numpy.random.uniform(-1,1,6).reshape(2,3) +xs = numpy.random.uniform(-1, 1, 6).reshape(2, 3) +ys = numpy.random.uniform(-1, 1, 6).reshape(2, 3) zs = xs + 1j * ys m1 = [[True, False, False], [False, False, True]] m2 = [[True, False, True], [False, False, True]] @@ -26,8 +26,8 @@ nmxs = numpy.ma.array(xs, mask=m1) nmys = numpy.ma.array(ys, mask=m2) nmzs = numpy.ma.array(zs, mask=m1) # Big arrays .................................... -xl = numpy.random.uniform(-1,1,100*100).reshape(100,100) -yl = numpy.random.uniform(-1,1,100*100).reshape(100,100) +xl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) +yl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100) zl = xl + 1j * yl maskx = xl > 0.8 masky = yl < -0.8 @@ -42,7 +42,7 @@ nmzl = numpy.ma.array(zl, mask=maskx) def timer(s, v='', nloop=500, nrep=3): units = ["s", "ms", "µs", "ns"] scaling = [1, 1e3, 1e6, 1e9] - print("%s : %-50s : " % (v,s), end=' ') + print("%s : %-50s : " % (v, s), end=' ') varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz'] setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames) Timer = timeit.Timer(stmt=s, setup=setup) @@ -64,11 +64,11 @@ def compare_functions_1v(func, nloop=500, funcname = func.__name__ print("-"*50) print("%s on small arrays" % funcname) - module, data = "numpy.ma","nmxs" + module, data = "numpy.ma", "nmxs" timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) # print("%s on large arrays" % funcname) - module, data = "numpy.ma","nmxl" + module, data = "numpy.ma", "nmxl" timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) return @@ -92,11 +92,11 @@ def compare_functions_2v(func, nloop=500, test=True, funcname = func.__name__ print("-"*50) print("%s on small arrays" % funcname) - module, data = "numpy.ma","nmxs,nmys" + module, data = "numpy.ma", "nmxs,nmys" timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) # print("%s on large arrays" % funcname) - module, data = "numpy.ma","nmxl,nmyl" + module, data = "numpy.ma", "nmxl,nmyl" timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) return @@ -139,28 +139,28 @@ if __name__ == '__main__': compare_functions_2v(numpy.divide) compare_functions_2v(numpy.power) #.................................................................... - compare_methods('ravel','', nloop=1000) - compare_methods('conjugate','','z', nloop=1000) - compare_methods('transpose','', nloop=1000) - compare_methods('compressed','', nloop=1000) - compare_methods('__getitem__','0', nloop=1000) - compare_methods('__getitem__','(0,0)', nloop=1000) - compare_methods('__getitem__','[0,-1]', nloop=1000) - compare_methods('__setitem__','0, 17', nloop=1000, test=False) - compare_methods('__setitem__','(0,0), 17', nloop=1000, test=False) + compare_methods('ravel', '', nloop=1000) + compare_methods('conjugate', '', 'z', nloop=1000) + compare_methods('transpose', '', nloop=1000) + compare_methods('compressed', '', nloop=1000) + compare_methods('__getitem__', '0', nloop=1000) + compare_methods('__getitem__', '(0,0)', nloop=1000) + compare_methods('__getitem__', '[0,-1]', nloop=1000) + compare_methods('__setitem__', '0, 17', nloop=1000, test=False) + compare_methods('__setitem__', '(0,0), 17', nloop=1000, test=False) #.................................................................... print("-"*50) print("__setitem__ on small arrays") - timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000) + timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) print("-"*50) print("__setitem__ on large arrays") - timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000) + timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000) #.................................................................... print("-"*50) print("where on small arrays") - timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ',nloop=1000) + timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ', nloop=1000) print("-"*50) print("where on large arrays") - timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ',nloop=100) + timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ', nloop=100) diff --git a/numpy/ma/setup.py b/numpy/ma/setup.py index 2175628d6..5486ff46a 100644 --- a/numpy/ma/setup.py +++ b/numpy/ma/setup.py @@ -10,7 +10,7 @@ import os def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('ma',parent_package,top_path) + config = Configuration('ma', parent_package, top_path) config.add_data_dir('tests') return config diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 088e203b2..6c7baa48e 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -34,7 +34,7 @@ class TestMaskedArray(TestCase): y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) @@ -79,8 +79,8 @@ class TestMaskedArray(TestCase): assert_equal(xm.shape, s) assert_equal(xm.dtype, x.dtype) assert_equal(zm.dtype, z.dtype) - assert_equal(xm.size , reduce(lambda x, y:x * y, s)) - assert_equal(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) assert_array_equal(xm, xf) assert_array_equal(filled(xm, 1.e20), xf) assert_array_equal(x, xm) @@ -100,8 +100,8 @@ class TestMaskedArray(TestCase): self.assertTrue(isMaskedArray(xm)) assert_equal(shape(xm), s) assert_equal(xm.shape, s) - assert_equal(xm.size , reduce(lambda x, y:x * y, s)) - assert_equal(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) assert_equal(xm, xf) assert_equal(filled(xm, 1.e20), xf) assert_equal(x, xm) @@ -658,7 +658,7 @@ class TestMaskedArrayArithmetic(TestCase): y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) @@ -717,7 +717,7 @@ class TestMaskedArrayArithmetic(TestCase): assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) # - z = x / y[None, :] + z = x / y[None,:] assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]]) assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]]) # @@ -1547,7 +1547,7 @@ class TestFillingValues(TestCase): "Test the behavior of fill_value in view" # Create initial masked array - x = array([1,2,3], fill_value=1, dtype=np.int64) + x = array([1, 2, 3], fill_value=1, dtype=np.int64) # Check that fill_value is preserved by default y = x.view() @@ -1778,7 +1778,7 @@ class TestMaskedArrayInPlaceArithmetics(TestCase): x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) # @@ -1952,10 +1952,10 @@ class TestMaskedArrayMethods(TestCase): "Test class for miscellaneous MaskedArrays methods." def setUp(self): "Base data definition." - x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, + x = np.array([ 8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) X = x.reshape(6, 6) @@ -2125,10 +2125,10 @@ class TestMaskedArrayMethods(TestCase): def test_clip(self): "Tests clip on MaskedArrays." - x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, + x = np.array([ 8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, @@ -2428,10 +2428,10 @@ class TestMaskedArrayMethods(TestCase): def test_swapaxes(self): "Tests swapaxes on MaskedArrays." - x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, + x = np.array([ 8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) m = np.array([0, 1, 0, 1, 0, 0, @@ -2467,7 +2467,7 @@ class TestMaskedArrayMethods(TestCase): def test_take_masked_indices(self): "Test take w/ masked indices" a = np.array((40, 18, 37, 9, 22)) - indices = np.arange(3)[None, :] + np.arange(5)[:, None] + indices = np.arange(3)[None,:] + np.arange(5)[:, None] mindices = array(indices, mask=(indices >= len(a))) # No mask test = take(a, mindices, mode='clip') @@ -2615,10 +2615,10 @@ class TestMaskedArrayMathMethods(TestCase): def setUp(self): "Base data definition." - x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, + x = np.array([ 8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) X = x.reshape(6, 6) @@ -2840,10 +2840,10 @@ class TestMaskedArrayMathMethodsComplex(TestCase): "Test class for miscellaneous MaskedArrays methods." def setUp(self): "Base data definition." - x = np.array([ 8.375j, 7.545j, 8.828j, 8.5j , 1.757j, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, + x = np.array([ 8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479j, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) X = x.reshape(6, 6) @@ -2897,7 +2897,7 @@ class TestMaskedArrayFunctions(TestCase): y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) @@ -3079,7 +3079,7 @@ class TestMaskedArrayFunctions(TestCase): y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) @@ -3110,7 +3110,7 @@ class TestMaskedArrayFunctions(TestCase): x[3] = masked c = x >= 8 # Set False to masked - z = where(c , x, masked) + z = where(c, x, masked) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is masked) @@ -3119,7 +3119,7 @@ class TestMaskedArrayFunctions(TestCase): assert_(z[9] is not masked) assert_equal(x, z) # Set True to masked - z = where(c , masked, x) + z = where(c, masked, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) @@ -3369,7 +3369,7 @@ class TestMaskedArrayFunctions(TestCase): # Test compress function on ndarray and masked array # Address Github #2495. arr = np.arange(8) - arr.shape = 4,2 + arr.shape = 4, 2 cond = np.array([True, False, True, True]) control = arr[[0, 2, 3]] test = np.ma.compress(cond, arr, axis=0) @@ -3457,11 +3457,11 @@ class TestMaskedFields(TestCase): ndtype = [('a', int), ('b', float)] test = empty(3, dtype=ndtype) assert_equal(getmaskarray(test), - np.array([(0, 0) , (0, 0), (0, 0)], + np.array([(0, 0), (0, 0), (0, 0)], dtype=[('a', '|b1'), ('b', '|b1')])) test[:] = masked assert_equal(getmaskarray(test), - np.array([(1, 1) , (1, 1), (1, 1)], + np.array([(1, 1), (1, 1), (1, 1)], dtype=[('a', '|b1'), ('b', '|b1')])) # def test_view(self): diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index 4b30813b2..d1886f84a 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -277,8 +277,8 @@ class TestConcatenator(TestCase): assert_array_equal(d.mask, np.r_['1', m_1, m_2]) d = mr_[b_1, b_2] self.assertTrue(d.shape == (10, 5)) - assert_array_equal(d[:5, :], b_1) - assert_array_equal(d[5:, :], b_2) + assert_array_equal(d[:5,:], b_1) + assert_array_equal(d[5:,:], b_2) assert_array_equal(d.mask, np.r_[m_1, m_2]) @@ -688,7 +688,7 @@ class TestPolynomial(TestCase): assert_almost_equal(a, a_) # (C, R, K, S, D) = polyfit(x, y, 3, full=True) - (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) # diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index 9938e55de..866979041 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -39,12 +39,12 @@ class TestMRecords(TestCase): def setup(self): "Generic setup" - ilist = [1,2,3,4,5] - flist = [1.1,2.2,3.3,4.4,5.5] - slist = asbytes_nested(['one','two','three','four','five']) - ddtype = [('a',int),('b',float),('c','|S8')] - mask = [0,1,0,0,1] - self.base = ma.array(list(zip(ilist,flist,slist)), + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = asbytes_nested(['one', 'two', 'three', 'four', 'five']) + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mask = [0, 1, 0, 0, 1] + self.base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) def test_byview(self): @@ -55,7 +55,7 @@ class TestMRecords(TestCase): assert_equal_records(mbase._mask, base._mask) assert_(isinstance(mbase._data, recarray)) assert_equal_records(mbase._data, base._data.view(recarray)) - for field in ('a','b','c'): + for field in ('a', 'b', 'c'): assert_equal(base[field], mbase[field]) assert_equal_records(mbase.view(mrecarray), mbase) @@ -64,14 +64,14 @@ class TestMRecords(TestCase): base = self.base.copy() mbase = base.view(mrecarray) # As fields.......... - for field in ('a','b','c'): - assert_equal(getattr(mbase,field), mbase[field]) + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase, field), mbase[field]) assert_equal(base[field], mbase[field]) # as elements ....... mbase_first = mbase[0] assert_(isinstance(mbase_first, mrecarray)) assert_equal(mbase_first.dtype, mbase.dtype) - assert_equal(mbase_first.tolist(), (1,1.1,asbytes('one'))) + assert_equal(mbase_first.tolist(), (1, 1.1, asbytes('one'))) # Used to be mask, now it's recordmask assert_equal(mbase_first.recordmask, nomask) assert_equal(mbase_first._mask.item(), (False, False, False)) @@ -79,7 +79,7 @@ class TestMRecords(TestCase): mbase_last = mbase[-1] assert_(isinstance(mbase_last, mrecarray)) assert_equal(mbase_last.dtype, mbase.dtype) - assert_equal(mbase_last.tolist(), (None,None,None)) + assert_equal(mbase_last.tolist(), (None, None, None)) # Used to be mask, now it's recordmask assert_equal(mbase_last.recordmask, True) assert_equal(mbase_last._mask.item(), (True, True, True)) @@ -90,24 +90,24 @@ class TestMRecords(TestCase): assert_(isinstance(mbase_sl, mrecarray)) assert_equal(mbase_sl.dtype, mbase.dtype) # Used to be mask, now it's recordmask - assert_equal(mbase_sl.recordmask, [0,1]) + assert_equal(mbase_sl.recordmask, [0, 1]) assert_equal_records(mbase_sl.mask, - np.array([(False,False,False),(True,True,True)], + np.array([(False, False, False), (True, True, True)], dtype=mbase._mask.dtype)) assert_equal_records(mbase_sl, base[:2].view(mrecarray)) - for field in ('a','b','c'): - assert_equal(getattr(mbase_sl,field), base[:2][field]) + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase_sl, field), base[:2][field]) def test_set_fields(self): "Tests setting fields." base = self.base.copy() mbase = base.view(mrecarray) mbase = mbase.copy() - mbase.fill_value = (999999,1e20,'N/A') + mbase.fill_value = (999999, 1e20, 'N/A') # Change the data, the mask should be conserved mbase.a._data[:] = 5 - assert_equal(mbase['a']._data, [5,5,5,5,5]) - assert_equal(mbase['a']._mask, [0,1,0,0,1]) + assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) + assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) # Change the elements, and the mask will follow mbase.a = 1 assert_equal(mbase['a']._data, [1]*5) @@ -115,7 +115,7 @@ class TestMRecords(TestCase): # Use to be _mask, now it's recordmask assert_equal(mbase.recordmask, [False]*5) assert_equal(mbase._mask.tolist(), - np.array([(0,0,0),(0,1,1),(0,0,0),(0,0,0),(0,1,1)], + np.array([(0, 0, 0), (0, 1, 1), (0, 0, 0), (0, 0, 0), (0, 1, 1)], dtype=bool)) # Set a field to mask ........................ mbase.c = masked @@ -125,27 +125,27 @@ class TestMRecords(TestCase): assert_equal(ma.getmaskarray(mbase['c']), [1]*5) assert_equal(ma.getdata(mbase['c']), [asbytes('N/A')]*5) assert_equal(mbase._mask.tolist(), - np.array([(0,0,1),(0,1,1),(0,0,1),(0,0,1),(0,1,1)], + np.array([(0, 0, 1), (0, 1, 1), (0, 0, 1), (0, 0, 1), (0, 1, 1)], dtype=bool)) # Set fields by slices ....................... mbase = base.view(mrecarray).copy() mbase.a[3:] = 5 - assert_equal(mbase.a, [1,2,3,5,5]) - assert_equal(mbase.a._mask, [0,1,0,0,0]) + assert_equal(mbase.a, [1, 2, 3, 5, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) mbase.b[3:] = masked assert_equal(mbase.b, base['b']) - assert_equal(mbase.b._mask, [0,1,0,1,1]) + assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) # Set fields globally.......................... - ndtype = [('alpha','|S1'),('num',int)] - data = ma.array([('a',1),('b',2),('c',3)], dtype=ndtype) + ndtype = [('alpha', '|S1'), ('num', int)] + data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) rdata = data.view(MaskedRecords) - val = ma.array([10,20,30], mask=[1,0,0]) + val = ma.array([10, 20, 30], mask=[1, 0, 0]) # with warnings.catch_warnings(): warnings.simplefilter("ignore") rdata['num'] = val assert_equal(rdata.num, val) - assert_equal(rdata.num.mask, [1,0,0]) + assert_equal(rdata.num.mask, [1, 0, 0]) def test_set_fields_mask(self): "Tests setting the mask of a field." @@ -153,14 +153,14 @@ class TestMRecords(TestCase): # This one has already a mask.... mbase = base.view(mrecarray) mbase['a'][-2] = masked - assert_equal(mbase.a, [1,2,3,4,5]) - assert_equal(mbase.a._mask, [0,1,0,1,1]) + assert_equal(mbase.a, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) # This one has not yet mbase = fromarrays([np.arange(5), np.random.rand(5)], - dtype=[('a',int),('b',float)]) + dtype=[('a', int), ('b', float)]) mbase['a'][-2] = masked - assert_equal(mbase.a, [0,1,2,3,4]) - assert_equal(mbase.a._mask, [0,0,0,1,0]) + assert_equal(mbase.a, [0, 1, 2, 3, 4]) + assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) # def test_set_mask(self): base = self.base.copy() @@ -171,42 +171,42 @@ class TestMRecords(TestCase): assert_equal(mbase['a']._mask, mbase['b']._mask) assert_equal(mbase['a']._mask, mbase['c']._mask) assert_equal(mbase._mask.tolist(), - np.array([(1,1,1)]*5, dtype=bool)) + np.array([(1, 1, 1)]*5, dtype=bool)) # Delete the mask ............................ mbase.mask = nomask assert_equal(ma.getmaskarray(mbase['c']), [0]*5) assert_equal(mbase._mask.tolist(), - np.array([(0,0,0)]*5, dtype=bool)) + np.array([(0, 0, 0)]*5, dtype=bool)) # def test_set_mask_fromarray(self): base = self.base.copy() mbase = base.view(mrecarray) # Sets the mask w/ an array - mbase.mask = [1,0,0,0,1] - assert_equal(mbase.a.mask, [1,0,0,0,1]) - assert_equal(mbase.b.mask, [1,0,0,0,1]) - assert_equal(mbase.c.mask, [1,0,0,0,1]) + mbase.mask = [1, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) # Yay, once more ! - mbase.mask = [0,0,0,0,1] - assert_equal(mbase.a.mask, [0,0,0,0,1]) - assert_equal(mbase.b.mask, [0,0,0,0,1]) - assert_equal(mbase.c.mask, [0,0,0,0,1]) + mbase.mask = [0, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) # def test_set_mask_fromfields(self): mbase = self.base.copy().view(mrecarray) # - nmask = np.array([(0,1,0),(0,1,0),(1,0,1),(1,0,1),(0,0,0)], - dtype=[('a',bool),('b',bool),('c',bool)]) + nmask = np.array([(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], + dtype=[('a', bool), ('b', bool), ('c', bool)]) mbase.mask = nmask - assert_equal(mbase.a.mask, [0,0,1,1,0]) - assert_equal(mbase.b.mask, [1,1,0,0,0]) - assert_equal(mbase.c.mask, [0,0,1,1,0]) + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) # Reinitalizes and redo mbase.mask = False mbase.fieldmask = nmask - assert_equal(mbase.a.mask, [0,0,1,1,0]) - assert_equal(mbase.b.mask, [1,1,0,0,0]) - assert_equal(mbase.c.mask, [0,0,1,1,0]) + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) # def test_set_elements(self): base = self.base.copy() @@ -214,30 +214,30 @@ class TestMRecords(TestCase): mbase = base.view(mrecarray).copy() mbase[-2] = masked assert_equal(mbase._mask.tolist(), - np.array([(0,0,0),(1,1,1),(0,0,0),(1,1,1),(1,1,1)], + np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], dtype=bool)) # Used to be mask, now it's recordmask! - assert_equal(mbase.recordmask, [0,1,0,1,1]) + assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) # Set slices ................................. mbase = base.view(mrecarray).copy() - mbase[:2] = (5,5,5) - assert_equal(mbase.a._data, [5,5,3,4,5]) - assert_equal(mbase.a._mask, [0,0,0,0,1]) - assert_equal(mbase.b._data, [5.,5.,3.3,4.4,5.5]) - assert_equal(mbase.b._mask, [0,0,0,0,1]) + mbase[:2] = (5, 5, 5) + assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) assert_equal(mbase.c._data, - asbytes_nested(['5','5','three','four','five'])) - assert_equal(mbase.b._mask, [0,0,0,0,1]) + asbytes_nested(['5', '5', 'three', 'four', 'five'])) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) # mbase = base.view(mrecarray).copy() mbase[:2] = masked - assert_equal(mbase.a._data, [1,2,3,4,5]) - assert_equal(mbase.a._mask, [1,1,0,0,1]) - assert_equal(mbase.b._data, [1.1,2.2,3.3,4.4,5.5]) - assert_equal(mbase.b._mask, [1,1,0,0,1]) + assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) assert_equal(mbase.c._data, - asbytes_nested(['one','two','three','four','five'])) - assert_equal(mbase.b._mask, [1,1,0,0,1]) + asbytes_nested(['one', 'two', 'three', 'four', 'five'])) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) # def test_setslices_hardmask(self): "Tests setting slices w/ hardmask." @@ -245,12 +245,12 @@ class TestMRecords(TestCase): mbase = base.view(mrecarray) mbase.harden_mask() try: - mbase[-2:] = (5,5,5) - assert_equal(mbase.a._data, [1,2,3,5,5]) - assert_equal(mbase.b._data, [1.1,2.2,3.3,5,5.5]) + mbase[-2:] = (5, 5, 5) + assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) assert_equal(mbase.c._data, - asbytes_nested(['one','two','three','5','five'])) - assert_equal(mbase.a._mask, [0,1,0,0,1]) + asbytes_nested(['one', 'two', 'three', '5', 'five'])) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) assert_equal(mbase.b._mask, mbase.a._mask) assert_equal(mbase.b._mask, mbase.c._mask) except NotImplementedError: @@ -282,9 +282,9 @@ class TestMRecords(TestCase): mbase.mask = nomask # So, the mask of a field is no longer set to nomask... assert_equal_records(mbase._mask, - ma.make_mask_none(base.shape,base.dtype)) + ma.make_mask_none(base.shape, base.dtype)) self.assertTrue(ma.make_mask(mbase['b']._mask) is nomask) - assert_equal(mbase['a']._mask,mbase['b']._mask) + assert_equal(mbase['a']._mask, mbase['b']._mask) # def test_pickling(self): "Test pickling" @@ -299,29 +299,29 @@ class TestMRecords(TestCase): # def test_filled(self): "Test filling the array" - _a = ma.array([1,2,3],mask=[0,0,1],dtype=int) - _b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float) - _c = ma.array(['one','two','three'],mask=[0,0,1],dtype='|S8') - ddtype = [('a',int),('b',float),('c','|S8')] - mrec = fromarrays([_a,_b,_c], dtype=ddtype, - fill_value=(99999,99999.,'N/A')) + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) mrecfilled = mrec.filled() - assert_equal(mrecfilled['a'], np.array((1,2,99999), dtype=int)) - assert_equal(mrecfilled['b'], np.array((1.1,2.2,99999.), dtype=float)) - assert_equal(mrecfilled['c'], np.array(('one','two','N/A'), dtype='|S8')) + assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) + assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), dtype=float)) + assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), dtype='|S8')) # def test_tolist(self): "Test tolist." - _a = ma.array([1,2,3],mask=[0,0,1],dtype=int) - _b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float) - _c = ma.array(['one','two','three'],mask=[1,0,0],dtype='|S8') - ddtype = [('a',int),('b',float),('c','|S8')] - mrec = fromarrays([_a,_b,_c], dtype=ddtype, - fill_value=(99999,99999.,'N/A')) + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) # assert_equal(mrec.tolist(), - [(1,1.1,None),(2,2.2,asbytes('two')), - (None,None,asbytes('three'))]) + [(1, 1.1, None), (2, 2.2, asbytes('two')), + (None, None, asbytes('three'))]) # @@ -333,9 +333,9 @@ class TestMRecords(TestCase): # def test_exotic_formats(self): "Test that 'exotic' formats are processed properly" - easy = mrecarray(1, dtype=[('i',int), ('s','|S8'), ('f',float)]) + easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) easy[0] = masked - assert_equal(easy.filled(1).item(), (1,asbytes('1'),1.)) + assert_equal(easy.filled(1).item(), (1, asbytes('1'), 1.)) # solo = mrecarray(1, dtype=[('f0', '<f8', (2, 2))]) solo[0] = masked @@ -347,19 +347,19 @@ class TestMRecords(TestCase): mult[1] = (1, 1, 1) mult.filled(0) assert_equal_records(mult.filled(0), - np.array([(0,0,0),(1,1,1)], dtype=mult.dtype)) + np.array([(0, 0, 0), (1, 1, 1)], dtype=mult.dtype)) class TestView(TestCase): # def setUp(self): (a, b) = (np.arange(10), np.random.rand(10)) - ndtype = [('a',np.float), ('b',np.float)] - arr = np.array(list(zip(a,b)), dtype=ndtype) + ndtype = [('a', np.float), ('b', np.float)] + arr = np.array(list(zip(a, b)), dtype=ndtype) rec = arr.view(np.recarray) # - marr = ma.array(list(zip(a,b)), dtype=ndtype, fill_value=(-9., -99.)) - mrec = fromarrays([a,b], dtype=ndtype, fill_value=(-9., -99.)) + marr = ma.array(list(zip(a, b)), dtype=ndtype, fill_value=(-9., -99.)) + mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.)) mrec.mask[3] = (False, True) self.data = (mrec, a, b, arr) # @@ -375,12 +375,12 @@ class TestView(TestCase): ntype = (np.float, 2) test = mrec.view(ntype) self.assertTrue(isinstance(test, ma.MaskedArray)) - assert_equal(test, np.array(list(zip(a,b)), dtype=np.float)) - self.assertTrue(test[3,1] is ma.masked) + assert_equal(test, np.array(list(zip(a, b)), dtype=np.float)) + self.assertTrue(test[3, 1] is ma.masked) # def test_view_flexible_type(self): (mrec, a, b, arr) = self.data - alttype = [('A',np.float), ('B',np.float)] + alttype = [('A', np.float), ('B', np.float)] test = mrec.view(alttype) self.assertTrue(isinstance(test, MaskedRecords)) assert_equal_records(test, arr.view(alttype)) @@ -398,26 +398,26 @@ class TestMRecordsImport(TestCase): def setup(self): "Generic setup" - _a = ma.array([1,2,3],mask=[0,0,1],dtype=int) - _b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float) - _c = ma.array(list(map(asbytes,['one','two','three'])), - mask=[0,0,1],dtype='|S8') - ddtype = [('a',int),('b',float),('c','|S8')] - mrec = fromarrays([_a,_b,_c], dtype=ddtype, - fill_value=(asbytes('99999'),asbytes('99999.'), + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(list(map(asbytes, ['one', 'two', 'three'])), + mask=[0, 0, 1], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(asbytes('99999'), asbytes('99999.'), asbytes('N/A'))) - nrec = recfromarrays((_a._data,_b._data,_c._data), dtype=ddtype) + nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype) self.data = (mrec, nrec, ddtype) def test_fromarrays(self): - _a = ma.array([1,2,3],mask=[0,0,1],dtype=int) - _b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float) - _c = ma.array(['one','two','three'],mask=[0,0,1],dtype='|S8') + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') (mrec, nrec, _) = self.data - for (f,l) in zip(('a','b','c'),(_a,_b,_c)): - assert_equal(getattr(mrec,f)._mask, l._mask) + for (f, l) in zip(('a', 'b', 'c'), (_a, _b, _c)): + assert_equal(getattr(mrec, f)._mask, l._mask) # One record only - _x = ma.array([1,1.1,'one'], mask=[1,0,0],) + _x = ma.array([1, 1.1, 'one'], mask=[1, 0, 0],) assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0]) @@ -431,7 +431,7 @@ class TestMRecordsImport(TestCase): (0, ' ', 0.40000000596046448, 0)] pa = recfromrecords(palist, names='c1, c2, c3, c4') mpa = fromrecords(palist, names='c1, c2, c3, c4') - assert_equal_records(pa,mpa) + assert_equal_records(pa, mpa) #..... _mrec = fromrecords(nrec) assert_equal(_mrec.dtype, mrec.dtype) @@ -439,9 +439,9 @@ class TestMRecordsImport(TestCase): assert_equal(getattr(_mrec, field), getattr(mrec._data, field)) # _mrec = fromrecords(nrec.tolist(), names='c1,c2,c3') - assert_equal(_mrec.dtype, [('c1',int),('c2',float),('c3','|S5')]) - for (f,n) in zip(('c1','c2','c3'), ('a','b','c')): - assert_equal(getattr(_mrec,f), getattr(mrec._data, n)) + assert_equal(_mrec.dtype, [('c1', int), ('c2', float), ('c3', '|S5')]) + for (f, n) in zip(('c1', 'c2', 'c3'), ('a', 'b', 'c')): + assert_equal(getattr(_mrec, f), getattr(mrec._data, n)) # _mrec = fromrecords(mrec) assert_equal(_mrec.dtype, mrec.dtype) @@ -452,13 +452,13 @@ class TestMRecordsImport(TestCase): "Tests construction from records w/ mask." (mrec, nrec, ddtype) = self.data # - _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=[0,1,0,]) + _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=[0, 1, 0,]) assert_equal_records(_mrec._data, mrec._data) - assert_equal(_mrec._mask.tolist(), [(0,0,0),(1,1,1),(0,0,0)]) + assert_equal(_mrec._mask.tolist(), [(0, 0, 0), (1, 1, 1), (0, 0, 0)]) # _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=True) assert_equal_records(_mrec._data, mrec._data) - assert_equal(_mrec._mask.tolist(), [(1,1,1),(1,1,1),(1,1,1)]) + assert_equal(_mrec._mask.tolist(), [(1, 1, 1), (1, 1, 1), (1, 1, 1)]) # _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=mrec._mask) assert_equal_records(_mrec._data, mrec._data) @@ -480,21 +480,21 @@ class TestMRecordsImport(TestCase): """) import os import tempfile - (tmp_fd,tmp_fl) = tempfile.mkstemp() + (tmp_fd, tmp_fl) = tempfile.mkstemp() os.write(tmp_fd, fcontent) os.close(tmp_fd) - mrectxt = fromtextfile(tmp_fl, delimitor=',',varnames='ABCDEFG') + mrectxt = fromtextfile(tmp_fl, delimitor=',', varnames='ABCDEFG') os.remove(tmp_fl) # self.assertTrue(isinstance(mrectxt, MaskedRecords)) - assert_equal(mrectxt.F, [1,1,1,1]) - assert_equal(mrectxt.E._mask, [1,1,1,1]) - assert_equal(mrectxt.C, [1,2,3.e+5,-1e-10]) + assert_equal(mrectxt.F, [1, 1, 1, 1]) + assert_equal(mrectxt.E._mask, [1, 1, 1, 1]) + assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10]) def test_addfield(self): "Tests addfield" (mrec, nrec, ddtype) = self.data - (d,m) = ([100,200,300], [1,0,0]) + (d, m) = ([100, 200, 300], [1, 0, 0]) mrec = addfield(mrec, ma.array(d, mask=m)) assert_equal(mrec.f3, d) assert_equal(mrec.f3._mask, m) @@ -505,7 +505,7 @@ def test_record_array_with_object_field(): Trac #1839 """ y = ma.masked_array( - [(1,'2'), (3, '4')], + [(1, '2'), (3, '4')], mask=[(0, 0), (0, 1)], dtype=[('a', int), ('b', np.object)]) x = y[1] diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index 50247df37..d0864436c 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -26,7 +26,7 @@ class TestMa(TestCase): y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = array(x, mask=m1) ym = array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) @@ -44,8 +44,8 @@ class TestMa(TestCase): self.assertEqual(shape(xm), s) self.assertEqual(xm.shape, s) self.assertEqual(xm.dtype, x.dtype) - self.assertEqual(xm.size , reduce(lambda x, y:x * y, s)) - self.assertEqual(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1)) + self.assertEqual(xm.size, reduce(lambda x, y:x * y, s)) + self.assertEqual(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) self.assertTrue(eq(xm, xf)) self.assertTrue(eq(filled(xm, 1.e20), xf)) self.assertTrue(eq(x, xm)) @@ -64,8 +64,8 @@ class TestMa(TestCase): self.assertTrue(isMaskedArray(xm)) self.assertEqual(shape(xm), s) self.assertEqual(xm.shape, s) - self.assertEqual(xm.size , reduce(lambda x, y:x * y, s)) - self.assertEqual(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1)) + self.assertEqual(xm.size, reduce(lambda x, y:x * y, s)) + self.assertEqual(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) self.assertTrue(eq(xm, xf)) self.assertTrue(eq(filled(xm, 1.e20), xf)) self.assertTrue(eq(x, xm)) @@ -333,7 +333,7 @@ class TestMa(TestCase): c = x >= 8 assert_(count(where(c, masked, masked)) == 0) assert_(shape(where(c, masked, masked)) == c.shape) - z = where(c , x, masked) + z = where(c, x, masked) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is masked) @@ -341,7 +341,7 @@ class TestMa(TestCase): assert_(z[8] is not masked) assert_(z[9] is not masked) assert_(eq(x, z)) - z = where(c , masked, x) + z = where(c, masked, x) assert_(z.dtype is x.dtype) assert_(z[3] is masked) assert_(z[4] is not masked) @@ -574,12 +574,12 @@ class TestMa(TestCase): a = arange(6) b = arange(6) * 3 r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) - self.assertEqual(shape(r1) , shape(w1)) - self.assertEqual(r1.shape , w1.shape) + self.assertEqual(shape(r1), shape(w1)) + self.assertEqual(r1.shape, w1.shape) r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) - self.assertEqual(shape(w2) , shape(r2)) + self.assertEqual(shape(w2), shape(r2)) r2, w2 = average(ones((2, 2, 3)), returned=1) - self.assertEqual(shape(w2) , shape(r2)) + self.assertEqual(shape(w2), shape(r2)) r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) self.assertTrue(shape(w2) == shape(r2)) a2d = array([[1, 2], [0, 4]], float) @@ -722,10 +722,10 @@ class TestUfuncs(TestCase): class TestArrayMethods(TestCase): def setUp(self): - x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, - 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, + x = np.array([ 8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, - 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) X = x.reshape(6, 6) diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py index eb301aa05..1274833b2 100644 --- a/numpy/ma/tests/test_regression.py +++ b/numpy/ma/tests/test_regression.py @@ -10,32 +10,32 @@ rlevel = 1 class TestRegression(TestCase): def test_masked_array_create(self,level=rlevel): """Ticket #17""" - x = np.ma.masked_array([0,1,2,3,0,4,5,6],mask=[0,0,0,1,1,1,0,0]) - assert_array_equal(np.ma.nonzero(x),[[1,2,6,7]]) + x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], mask=[0, 0, 0, 1, 1, 1, 0, 0]) + assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) def test_masked_array(self,level=rlevel): """Ticket #61""" - x = np.ma.array(1,mask=[1]) + x = np.ma.array(1, mask=[1]) def test_mem_masked_where(self,level=rlevel): """Ticket #62""" from numpy.ma import masked_where, MaskType - a = np.zeros((1,1)) + a = np.zeros((1, 1)) b = np.zeros(a.shape, MaskType) - c = masked_where(b,a) + c = masked_where(b, a) a-c def test_masked_array_multiply(self,level=rlevel): """Ticket #254""" - a = np.ma.zeros((4,1)) - a[2,0] = np.ma.masked - b = np.zeros((4,2)) + a = np.ma.zeros((4, 1)) + a[2, 0] = np.ma.masked + b = np.zeros((4, 2)) a*b b*a def test_masked_array_repeat(self, level=rlevel): """Ticket #271""" - np.ma.array([1],mask=False).repeat(10) + np.ma.array([1], mask=False).repeat(10) def test_masked_array_repr_unicode(self): """Ticket #1256""" @@ -63,8 +63,8 @@ class TestRegression(TestCase): def test_ddof_corrcoef(self): # See gh-3336 - x = np.ma.masked_equal([1,2,3,4,5], 4) - y = np.array([2,2.5,3.1,3,5]) + x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) + y = np.array([2, 2.5, 3.1, 3, 5]) r0 = np.ma.corrcoef(x, y, ddof=0) r1 = np.ma.corrcoef(x, y, ddof=1) # ddof should not have an effect (it gets cancelled out) diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py index 7014246d4..19058fec2 100644 --- a/numpy/ma/tests/test_subclassing.py +++ b/numpy/ma/tests/test_subclassing.py @@ -26,23 +26,23 @@ class SubArray(np.ndarray): x.info = info return x def __array_finalize__(self, obj): - self.info = getattr(obj,'info',{}) + self.info = getattr(obj, 'info', {}) return def __add__(self, other): result = np.ndarray.__add__(self, other) - result.info.update({'added':result.info.pop('added',0)+1}) + result.info.update({'added':result.info.pop('added', 0)+1}) return result subarray = SubArray -class MSubArray(SubArray,MaskedArray): +class MSubArray(SubArray, MaskedArray): def __new__(cls, data, info={}, mask=nomask): subarr = SubArray(data, info) _data = MaskedArray.__new__(cls, data=subarr, mask=mask) _data.info = subarr.info return _data - def __array_finalize__(self,obj): - MaskedArray.__array_finalize__(self,obj) + def __array_finalize__(self, obj): + MaskedArray.__array_finalize__(self, obj) SubArray.__array_finalize__(self, obj) return def _get_series(self): @@ -58,9 +58,9 @@ class MMatrix(MaskedArray, np.matrix,): mat = np.matrix(data) _data = MaskedArray.__new__(cls, data=mat, mask=mask) return _data - def __array_finalize__(self,obj): + def __array_finalize__(self, obj): np.matrix.__array_finalize__(self, obj) - MaskedArray.__array_finalize__(self,obj) + MaskedArray.__array_finalize__(self, obj) return def _get_series(self): _view = self.view(MaskedArray) @@ -81,7 +81,7 @@ class TestSubclassing(TestCase): def test_data_subclassing(self): "Tests whether the subclass is kept." x = np.arange(5) - m = [0,0,1,0,0] + m = [0, 0, 1, 0, 0] xsub = SubArray(x) xmsub = masked_array(xsub, mask=m) self.assertTrue(isinstance(xmsub, MaskedArray)) @@ -104,21 +104,21 @@ class TestSubclassing(TestCase): "Tests masked_binary_operation" (x, mx) = self.data # Result should be a mmatrix - self.assertTrue(isinstance(add(mx,mx), mmatrix)) - self.assertTrue(isinstance(add(mx,x), mmatrix)) + self.assertTrue(isinstance(add(mx, mx), mmatrix)) + self.assertTrue(isinstance(add(mx, x), mmatrix)) # Result should work - assert_equal(add(mx,x), mx+x) - self.assertTrue(isinstance(add(mx,mx)._data, np.matrix)) - self.assertTrue(isinstance(add.outer(mx,mx), mmatrix)) - self.assertTrue(isinstance(hypot(mx,mx), mmatrix)) - self.assertTrue(isinstance(hypot(mx,x), mmatrix)) + assert_equal(add(mx, x), mx+x) + self.assertTrue(isinstance(add(mx, mx)._data, np.matrix)) + self.assertTrue(isinstance(add.outer(mx, mx), mmatrix)) + self.assertTrue(isinstance(hypot(mx, mx), mmatrix)) + self.assertTrue(isinstance(hypot(mx, x), mmatrix)) def test_masked_binary_operations2(self): "Tests domained_masked_binary_operation" (x, mx) = self.data xmx = masked_array(mx.data.__array__(), mask=mx.mask) - self.assertTrue(isinstance(divide(mx,mx), mmatrix)) - self.assertTrue(isinstance(divide(mx,x), mmatrix)) + self.assertTrue(isinstance(divide(mx, mx), mmatrix)) + self.assertTrue(isinstance(divide(mx, x), mmatrix)) assert_equal(divide(mx, mx), divide(xmx, xmx)) def test_attributepropagation(self): @@ -127,7 +127,7 @@ class TestSubclassing(TestCase): ym = msubarray(x) # z = (my+1) - self.assertTrue(isinstance(z,MaskedArray)) + self.assertTrue(isinstance(z, MaskedArray)) self.assertTrue(not isinstance(z, MSubArray)) self.assertTrue(isinstance(z._data, SubArray)) assert_equal(z._data.info, {}) @@ -138,10 +138,10 @@ class TestSubclassing(TestCase): self.assertTrue(isinstance(z._data, SubArray)) self.assertTrue(z._data.info['added'] > 0) # - ym._set_mask([1,0,0,0,1]) - assert_equal(ym._mask, [1,0,0,0,1]) - ym._series._set_mask([0,0,0,0,1]) - assert_equal(ym._mask, [0,0,0,0,1]) + ym._set_mask([1, 0, 0, 0, 1]) + assert_equal(ym._mask, [1, 0, 0, 0, 1]) + ym._series._set_mask([0, 0, 0, 0, 1]) + assert_equal(ym._mask, [0, 0, 0, 0, 1]) # xsub = subarray(x, info={'name':'x'}) mxsub = masked_array(xsub) @@ -151,8 +151,8 @@ class TestSubclassing(TestCase): def test_subclasspreservation(self): "Checks that masked_array(...,subok=True) preserves the class." x = np.arange(5) - m = [0,0,1,0,0] - xinfo = [(i,j) for (i,j) in zip(x,m)] + m = [0, 0, 1, 0, 0] + xinfo = [(i, j) for (i, j) in zip(x, m)] xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) # mxsub = masked_array(xsub, subok=False) diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py index 350412b85..b1c056cfc 100644 --- a/numpy/ma/timer_comparison.py +++ b/numpy/ma/timer_comparison.py @@ -83,7 +83,7 @@ class moduletester(object): header=header, names=('x', 'y')) assert cond, msg - val = comparison(x,y) + val = comparison(x, y) if m is not self.nomask and fill_value: val = self.masked_array(val, mask=m) if isinstance(val, bool): @@ -112,45 +112,45 @@ class moduletester(object): def test_0(self): "Tests creation" - x = np.array([1.,1.,1.,-2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] xm = self.masked_array(x, mask=m) xm[0] def test_1(self): "Tests creation" - x = np.array([1.,1.,1.,-2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5.,0.,3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 ,0, 1] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = self.masked_array(x, mask=m1) ym = self.masked_array(y, mask=m2) z = np.array([-.5, 0., .5, .8]) - zm = self.masked_array(z, mask=[0,1,0,0]) + zm = self.masked_array(z, mask=[0, 1, 0, 0]) xf = np.where(m1, 1.e+20, x) xm.set_fill_value(1.e+20) assert((xm-ym).filled(0).any()) #fail_if_equal(xm.mask.astype(int_), ym.mask.astype(int_)) s = x.shape - assert(xm.size == reduce(lambda x,y:x*y, s)) - assert(self.count(xm) == len(m1) - reduce(lambda x,y:x+y, m1)) + assert(xm.size == reduce(lambda x, y:x*y, s)) + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) - for s in [(4,3), (6,2)]: + for s in [(4, 3), (6, 2)]: x.shape = s y.shape = s xm.shape = s ym.shape = s xf.shape = s - assert(self.count(xm) == len(m1) - reduce(lambda x,y:x+y, m1)) + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) def test_2(self): "Tests conversions and indexing" - x1 = np.array([1,2,4,3]) - x2 = self.array(x1, mask=[1,0,0,0]) - x3 = self.array(x1, mask=[0,1,0,1]) + x1 = np.array([1, 2, 4, 3]) + x2 = self.array(x1, mask=[1, 0, 0, 0]) + x3 = self.array(x1, mask=[0, 1, 0, 1]) x4 = self.array(x1) # test conversion to strings junk, garbage = str(x2), repr(x2) @@ -164,7 +164,7 @@ class moduletester(object): # assert self.allequal(x1[1:], x3[1:]) x1[2] = 9 x2[2] = 9 - self.assert_array_equal(x1,x2) + self.assert_array_equal(x1, x2) x1[1:3] = 99 x2[1:3] = 99 # assert self.allequal(x1,x2) @@ -175,22 +175,22 @@ class moduletester(object): x2[:] = x1 x2[1] = self.masked # assert self.allequal(self.getmask(x2),self.array([0,1,0,0])) - x3[:] = self.masked_array([1,2,3,4],[0,1,1,0]) + x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) # assert self.allequal(self.getmask(x3), self.array([0,1,1,0])) - x4[:] = self.masked_array([1,2,3,4],[0,1,1,0]) + x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) # assert self.allequal(self.getmask(x4), self.array([0,1,1,0])) # assert self.allequal(x4, self.array([1,2,3,4])) x1 = np.arange(5)*1.0 x2 = self.masked_values(x1, 3.0) # assert self.allequal(x1,x2) # assert self.allequal(self.array([0,0,0,1,0], self.MaskType), x2.mask) - x1 = self.array([1,'hello',2,3],object) - x2 = np.array([1,'hello',2,3],object) + x1 = self.array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) s1 = x1[1] s2 = x2[1] assert x1[1:1].shape == (0,) # Tests copy-size - n = [0,0,1,0,0] + n = [0, 0, 1, 0, 0] m = self.make_mask(n) m2 = self.make_mask(m) assert(m is m2) @@ -203,16 +203,16 @@ class moduletester(object): x4 = self.arange(4) x4[2] = self.masked y4 = self.resize(x4, (8,)) - assert self.allequal(self.concatenate([x4,x4]), y4) - assert self.allequal(self.getmask(y4),[0,0,1,0,0,0,1,0]) - y5 = self.repeat(x4, (2,2,2,2), axis=0) - self.assert_array_equal(y5, [0,0,1,1,2,2,3,3]) + assert self.allequal(self.concatenate([x4, x4]), y4) + assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) + self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) y6 = self.repeat(x4, 2, axis=0) assert self.allequal(y5, y6) - y7 = x4.repeat((2,2,2,2), axis=0) - assert self.allequal(y5,y7) - y8 = x4.repeat(2,0) - assert self.allequal(y5,y8) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert self.allequal(y5, y7) + y8 = x4.repeat(2, 0) + assert self.allequal(y5, y8) #---------------------------------- def test_4(self): @@ -220,17 +220,17 @@ class moduletester(object): x = self.arange(24) y = np.arange(24) x[5:6] = self.masked - x = x.reshape(2,3,4) - y = y.reshape(2,3,4) - assert self.allequal(np.transpose(y,(2,0,1)), self.transpose(x,(2,0,1))) - assert self.allequal(np.take(y, (2,0,1), 1), self.take(x, (2,0,1), 1)) - assert self.allequal(np.inner(self.filled(x,0), self.filled(y,0)), + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) + assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) + assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), self.inner(x, y)) - assert self.allequal(np.outer(self.filled(x,0), self.filled(y,0)), + assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), self.outer(x, y)) y = self.array(['abc', 1, 'def', 2, 3], object) y[2] = self.masked - t = self.take(y,[0,3,4]) + t = self.take(y, [0, 3, 4]) assert t[0] == 'abc' assert t[1] == 2 assert t[2] == 3 @@ -300,9 +300,9 @@ class moduletester(object): a[-1] = self.masked x += a xm += a - assert self.allequal(x,y+a) - assert self.allequal(xm,y+a) - assert self.allequal(xm.mask, self.mask_or(m,a.mask)) + assert self.allequal(x, y+a) + assert self.allequal(xm, y+a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) x = self.arange(10, dtype=float_) xm = self.arange(10, dtype=float_) @@ -312,9 +312,9 @@ class moduletester(object): a[-1] = self.masked x -= a xm -= a - assert self.allequal(x,y-a) - assert self.allequal(xm,y-a) - assert self.allequal(xm.mask, self.mask_or(m,a.mask)) + assert self.allequal(x, y-a) + assert self.allequal(xm, y-a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) x = self.arange(10, dtype=float_) xm = self.arange(10, dtype=float_) @@ -324,9 +324,9 @@ class moduletester(object): a[-1] = self.masked x *= a xm *= a - assert self.allequal(x,y*a) - assert self.allequal(xm,y*a) - assert self.allequal(xm.mask, self.mask_or(m,a.mask)) + assert self.allequal(x, y*a) + assert self.allequal(xm, y*a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) x = self.arange(10, dtype=float_) xm = self.arange(10, dtype=float_) @@ -340,8 +340,8 @@ class moduletester(object): #---------------------------------- def test_7(self): "Tests ufunc" - d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0,1]+[0]*6), - self.array([1.0, 0, -1, pi/2]*2, mask=[1,0]+[0]*6),) + d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), + self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', # 'sin', 'cos', 'tan', # 'arcsin', 'arccos', 'arctan', @@ -376,55 +376,55 @@ class moduletester(object): #---------------------------------- def test_99(self): # test average - ott = self.array([0.,1.,2.,3.], mask=[1,0,0,0]) - self.assert_array_equal(2.0, self.average(ott,axis=0)) + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + self.assert_array_equal(2.0, self.average(ott, axis=0)) self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) - result, wts = self.average(ott, weights=[1.,1.,2.,1.], returned=1) + result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) self.assert_array_equal(2.0, result) assert(wts == 4.0) ott[:] = self.masked - assert(self.average(ott,axis=0) is self.masked) - ott = self.array([0.,1.,2.,3.], mask=[1,0,0,0]) - ott = ott.reshape(2,2) - ott[:,1] = self.masked - self.assert_array_equal(self.average(ott,axis=0), [2.0, 0.0]) - assert(self.average(ott,axis=1)[0] is self.masked) - self.assert_array_equal([2.,0.], self.average(ott, axis=0)) + assert(self.average(ott, axis=0) is self.masked) + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = self.masked + self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) + assert(self.average(ott, axis=1)[0] is self.masked) + self.assert_array_equal([2., 0.], self.average(ott, axis=0)) result, wts = self.average(ott, axis=0, returned=1) self.assert_array_equal(wts, [1., 0.]) - w1 = [0,1,1,1,1,0] - w2 = [[0,1,1,1,1,0],[1,0,0,0,0,1]] + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] x = self.arange(6) self.assert_array_equal(self.average(x, axis=0), 2.5) self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) y = self.array([self.arange(6), 2.0*self.arange(6)]) self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) - self.assert_array_equal(self.average(y, axis=1), [self.average(x,axis=0), self.average(x,axis=0) * 2.0]) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) - self.assert_array_equal(self.average(y, axis=0, weights=w2), [0.,1.,2.,3.,4.,10.]) - self.assert_array_equal(self.average(y, axis=1), [self.average(x,axis=0), self.average(x,axis=0) * 2.0]) + self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) m1 = self.zeros(6) - m2 = [0,0,1,1,0,0] - m3 = [[0,0,1,1,0,0],[0,1,1,1,1,0]] + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] m4 = self.ones(6) m5 = [0, 1, 1, 1, 1, 1] - self.assert_array_equal(self.average(self.masked_array(x, m1),axis=0), 2.5) - self.assert_array_equal(self.average(self.masked_array(x, m2),axis=0), 2.5) + self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) + self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) # assert(self.average(masked_array(x, m4),axis=0) is masked) - self.assert_array_equal(self.average(self.masked_array(x, m5),axis=0), 0.0) - self.assert_array_equal(self.count(self.average(self.masked_array(x, m4),axis=0)), 0) + self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) + self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) z = self.masked_array(y, m3) self.assert_array_equal(self.average(z, None), 20./6.) - self.assert_array_equal(self.average(z, axis=0), [0.,1.,99.,99.,4.0, 7.5]) + self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) - self.assert_array_equal(self.average(z,axis=0, weights=w2), [0.,1., 99., 99., 4.0, 10.0]) + self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) #------------------------ def test_A(self): x = self.arange(24) y = np.arange(24) x[5:6] = self.masked - x = x.reshape(2,3,4) + x = x.reshape(2, 3, 4) ################################################################################ @@ -441,7 +441,7 @@ if __name__ == '__main__': (nrepeat, nloop) = (10, 10) if 1: - for i in range(1,8): + for i in range(1, 8): func = 'tester.test_%i()' % i # new = timeit.Timer(func, setup_new).repeat(nrepeat, nloop*10) cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) diff --git a/numpy/matlib.py b/numpy/matlib.py index 5a1ebe14a..677400367 100644 --- a/numpy/matlib.py +++ b/numpy/matlib.py @@ -168,8 +168,8 @@ def identity(n,dtype=None): [0, 0, 1]]) """ - a = array([1]+n*[0],dtype=dtype) - b = empty((n,n),dtype=dtype) + a = array([1]+n*[0], dtype=dtype) + b = empty((n, n), dtype=dtype) b.flat = a return b @@ -210,7 +210,7 @@ def eye(n,M=None, k=0, dtype=float): [ 0., 0., 0.]]) """ - return asmatrix(np.eye(n,M,k,dtype)) + return asmatrix(np.eye(n, M, k, dtype)) def rand(*args): """ @@ -347,12 +347,12 @@ def repmat(a, m, n): a = asanyarray(a) ndim = a.ndim if ndim == 0: - origrows, origcols = (1,1) + origrows, origcols = (1, 1) elif ndim == 1: origrows, origcols = (1, a.shape[0]) else: origrows, origcols = a.shape rows = origrows * m cols = origcols * n - c = a.reshape(1,a.size).repeat(m, 0).reshape(rows, origcols).repeat(n,0) + c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) return c.reshape(rows, cols) diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 1ca835af2..0a73725c2 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -39,7 +39,7 @@ else: del k def _eval(astr): - str_ = astr.translate(_table,_todelete) + str_ = astr.translate(_table, _todelete) if not str_: raise TypeError("Invalid data string supplied: " + astr) else: @@ -95,7 +95,7 @@ def asmatrix(data, dtype=None): """ return matrix(data, dtype=dtype, copy=False) -def matrix_power(M,n): +def matrix_power(M, n): """ Raise a square matrix to the (integer) power `n`. @@ -169,7 +169,7 @@ def matrix_power(M,n): M = asanyarray(M) if len(M.shape) != 2 or M.shape[0] != M.shape[1]: raise ValueError("input must be a square array") - if not issubdtype(type(n),int): + if not issubdtype(type(n), int): raise TypeError("exponent must be an integer") from numpy.linalg import inv @@ -185,21 +185,21 @@ def matrix_power(M,n): result = M if n <= 3: for _ in range(n-1): - result=N.dot(result,M) + result=N.dot(result, M) return result # binary decomposition to reduce the number of Matrix # multiplications for n > 3. beta = binary_repr(n) - Z,q,t = M,0,len(beta) + Z, q, t = M, 0, len(beta) while beta[t-q-1] == '0': - Z = N.dot(Z,Z) + Z = N.dot(Z, Z) q += 1 result = Z - for k in range(q+1,t): - Z = N.dot(Z,Z) + for k in range(q+1, t): + Z = N.dot(Z, Z) if beta[t-k-1] == '1': - result = N.dot(result,Z) + result = N.dot(result, Z) return result @@ -271,9 +271,9 @@ class matrix(N.ndarray): if (ndim > 2): raise ValueError("matrix must be 2-dimensional") elif ndim == 0: - shape = (1,1) + shape = (1, 1) elif ndim == 1: - shape = (1,shape[0]) + shape = (1, shape[0]) order = False if (ndim == 2) and arr.flags.fortran: @@ -304,9 +304,9 @@ class matrix(N.ndarray): else: newshape = self.shape if ndim == 0: - self.shape = (1,1) + self.shape = (1, 1) elif ndim == 1: - self.shape = (1,newshape[0]) + self.shape = (1, newshape[0]) return def __getitem__(self, index): @@ -330,13 +330,13 @@ class matrix(N.ndarray): except: n = 0 if n > 1 and isscalar(index[1]): - out.shape = (sh,1) + out.shape = (sh, 1) else: - out.shape = (1,sh) + out.shape = (1, sh) return out def __mul__(self, other): - if isinstance(other,(N.ndarray, list, tuple)) : + if isinstance(other, (N.ndarray, list, tuple)) : # This promotes 1-D vectors to row vectors return N.dot(self, asmatrix(other)) if isscalar(other) or not hasattr(other, '__rmul__') : @@ -378,7 +378,7 @@ class matrix(N.ndarray): orientation. """ if axis is None: - return self[0,0] + return self[0, 0] elif axis==0: return self elif axis==1: @@ -391,7 +391,7 @@ class matrix(N.ndarray): to a scalar like _align, but are using keepdims=True """ if axis is None: - return self[0,0] + return self[0, 0] else: return self @@ -862,7 +862,7 @@ class matrix(N.ndarray): [ 0., 1.]]) """ - M,N = self.shape + M, N = self.shape if M == N: from numpy.dual import inv as func else: @@ -997,7 +997,7 @@ class matrix(N.ndarray): H = property(getH, None, doc="hermitian (conjugate) transpose") I = property(getI, None, doc="inverse") -def _from_string(str,gdict,ldict): +def _from_string(str, gdict, ldict): rows = str.split(';') rowtup = [] for row in rows: @@ -1018,8 +1018,8 @@ def _from_string(str,gdict,ldict): raise KeyError("%s not found" % (col,)) coltup.append(thismat) - rowtup.append(concatenate(coltup,axis=-1)) - return concatenate(rowtup,axis=0) + rowtup.append(concatenate(coltup, axis=-1)) + return concatenate(rowtup, axis=0) def bmat(obj, ldict=None, gdict=None): @@ -1084,10 +1084,10 @@ def bmat(obj, ldict=None, gdict=None): arr_rows = [] for row in obj: if isinstance(row, N.ndarray): # not 2-d - return matrix(concatenate(obj,axis=-1)) + return matrix(concatenate(obj, axis=-1)) else: - arr_rows.append(concatenate(row,axis=-1)) - return matrix(concatenate(arr_rows,axis=0)) + arr_rows.append(concatenate(row, axis=-1)) + return matrix(concatenate(arr_rows, axis=0)) if isinstance(obj, N.ndarray): return matrix(obj) diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index 7cfcdbe27..d1a4e4ab5 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -10,51 +10,51 @@ import collections class TestCtor(TestCase): def test_basic(self): - A = array([[1,2],[3,4]]) + A = array([[1, 2], [3, 4]]) mA = matrix(A) assert_(all(mA.A == A)) B = bmat("A,A;A,A") - C = bmat([[A,A], [A,A]]) - D = array([[1,2,1,2], - [3,4,3,4], - [1,2,1,2], - [3,4,3,4]]) + C = bmat([[A, A], [A, A]]) + D = array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) assert_(all(B.A == D)) assert_(all(C.A == D)) - E = array([[5,6],[7,8]]) - AEresult = matrix([[1,2,5,6],[3,4,7,8]]) - assert_(all(bmat([A,E]) == AEresult)) + E = array([[5, 6], [7, 8]]) + AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) + assert_(all(bmat([A, E]) == AEresult)) vec = arange(5) mvec = matrix(vec) - assert_(mvec.shape == (1,5)) + assert_(mvec.shape == (1, 5)) def test_exceptions(self): # Check for TypeError when called with invalid string data. assert_raises(TypeError, matrix, "invalid") def test_bmat_nondefault_str(self): - A = array([[1,2],[3,4]]) - B = array([[5,6],[7,8]]) - Aresult = array([[1,2,1,2], - [3,4,3,4], - [1,2,1,2], - [3,4,3,4]]) - Bresult = array([[5,6,5,6], - [7,8,7,8], - [5,6,5,6], - [7,8,7,8]]) - mixresult = array([[1,2,5,6], - [3,4,7,8], - [5,6,1,2], - [7,8,3,4]]) + A = array([[1, 2], [3, 4]]) + B = array([[5, 6], [7, 8]]) + Aresult = array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + Bresult = array([[5, 6, 5, 6], + [7, 8, 7, 8], + [5, 6, 5, 6], + [7, 8, 7, 8]]) + mixresult = array([[1, 2, 5, 6], + [3, 4, 7, 8], + [5, 6, 1, 2], + [7, 8, 3, 4]]) assert_(all(bmat("A,A;A,A") == Aresult)) - assert_(all(bmat("A,A;A,A",ldict={'A':B}) == Aresult)) - assert_raises(TypeError, bmat, "A,A;A,A",gdict={'A':B}) - assert_(all(bmat("A,A;A,A",ldict={'A':A},gdict={'A':B}) == Aresult)) - b2 = bmat("A,B;C,D",ldict={'A':A,'B':B},gdict={'C':B,'D':A}) + assert_(all(bmat("A,A;A,A", ldict={'A':B}) == Aresult)) + assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B}) + assert_(all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult)) + b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A}) assert_(all(b2 == mixresult)) @@ -63,12 +63,12 @@ class TestProperties(TestCase): """Test whether matrix.sum(axis=1) preserves orientation. Fails in NumPy <= 0.9.6.2127. """ - M = matrix([[1,2,0,0], - [3,4,0,0], - [1,2,1,2], - [3,4,3,4]]) - sum0 = matrix([8,12,4,6]) - sum1 = matrix([3,7,6,14]).T + M = matrix([[1, 2, 0, 0], + [3, 4, 0, 0], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + sum0 = matrix([8, 12, 4, 6]) + sum1 = matrix([3, 7, 6, 14]).T sumall = 30 assert_array_equal(sum0, M.sum(axis=0)) assert_array_equal(sum1, M.sum(axis=1)) @@ -80,46 +80,46 @@ class TestProperties(TestCase): def test_prod(self): - x = matrix([[1,2,3],[4,5,6]]) + x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(x.prod(), 720) - assert_equal(x.prod(0), matrix([[4,10,18]])) - assert_equal(x.prod(1), matrix([[6],[120]])) + assert_equal(x.prod(0), matrix([[4, 10, 18]])) + assert_equal(x.prod(1), matrix([[6], [120]])) assert_equal(np.prod(x), 720) - assert_equal(np.prod(x, axis=0), matrix([[4,10,18]])) - assert_equal(np.prod(x, axis=1), matrix([[6],[120]])) + assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) + assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) - y = matrix([0,1,3]) + y = matrix([0, 1, 3]) assert_(y.prod() == 0) def test_max(self): - x = matrix([[1,2,3],[4,5,6]]) + x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(x.max(), 6) - assert_equal(x.max(0), matrix([[4,5,6]])) - assert_equal(x.max(1), matrix([[3],[6]])) + assert_equal(x.max(0), matrix([[4, 5, 6]])) + assert_equal(x.max(1), matrix([[3], [6]])) assert_equal(np.max(x), 6) - assert_equal(np.max(x, axis=0), matrix([[4,5,6]])) - assert_equal(np.max(x, axis=1), matrix([[3],[6]])) + assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) + assert_equal(np.max(x, axis=1), matrix([[3], [6]])) def test_min(self): - x = matrix([[1,2,3],[4,5,6]]) + x = matrix([[1, 2, 3], [4, 5, 6]]) assert_equal(x.min(), 1) - assert_equal(x.min(0), matrix([[1,2,3]])) - assert_equal(x.min(1), matrix([[1],[4]])) + assert_equal(x.min(0), matrix([[1, 2, 3]])) + assert_equal(x.min(1), matrix([[1], [4]])) assert_equal(np.min(x), 1) - assert_equal(np.min(x, axis=0), matrix([[1,2,3]])) - assert_equal(np.min(x, axis=1), matrix([[1],[4]])) + assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) + assert_equal(np.min(x, axis=1), matrix([[1], [4]])) def test_ptp(self): - x = np.arange(4).reshape((2,2)) + x = np.arange(4).reshape((2, 2)) assert_(x.ptp() == 3) assert_(all(x.ptp(0) == array([2, 2]))) assert_(all(x.ptp(1) == array([1, 1]))) def test_var(self): - x = np.arange(9).reshape((3,3)) + x = np.arange(9).reshape((3, 3)) mx = x.view(np.matrix) assert_equal(x.var(ddof=0), mx.var(ddof=0)) assert_equal(x.var(ddof=1), mx.var(ddof=1)) @@ -142,14 +142,14 @@ class TestProperties(TestCase): assert_(all(array(conjugate(transpose(B)) == mB.H))) def test_pinv(self): - x = matrix(arange(6).reshape(2,3)) + x = matrix(arange(6).reshape(2, 3)) xpinv = matrix([[-0.77777778, 0.27777778], [-0.11111111, 0.11111111], [ 0.55555556, -0.05555556]]) assert_almost_equal(x.I, xpinv) def test_comparisons(self): - A = arange(100).reshape(10,10) + A = arange(100).reshape(10, 10) mA = matrix(A) mB = matrix(A) + 0.1 assert_(all(mB == A+0.1)) @@ -173,34 +173,34 @@ class TestProperties(TestCase): assert_(all(abs(mB > 0))) def test_asmatrix(self): - A = arange(100).reshape(10,10) + A = arange(100).reshape(10, 10) mA = asmatrix(A) - A[0,0] = -10 - assert_(A[0,0] == mA[0,0]) + A[0, 0] = -10 + assert_(A[0, 0] == mA[0, 0]) def test_noaxis(self): - A = matrix([[1,0],[0,1]]) + A = matrix([[1, 0], [0, 1]]) assert_(A.sum() == matrix(2)) assert_(A.mean() == matrix(0.5)) def test_repr(self): - A = matrix([[1,0],[0,1]]) + A = matrix([[1, 0], [0, 1]]) assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") class TestCasting(TestCase): def test_basic(self): - A = arange(100).reshape(10,10) + A = arange(100).reshape(10, 10) mA = matrix(A) mB = mA.copy() - O = ones((10,10), float64) * 0.1 + O = ones((10, 10), float64) * 0.1 mB = mB + O assert_(mB.dtype.type == float64) assert_(all(mA != mB)) assert_(all(mB == mA+0.1)) mC = mA.copy() - O = ones((10,10), complex128) + O = ones((10, 10), complex128) mC = mC * O assert_(mC.dtype.type == complex128) assert_(all(mA != mB)) @@ -272,12 +272,12 @@ class TestMatrixReturn(TestCase): def test_instance_methods(self): a = matrix([1.0], dtype='f8') methodargs = { - 'astype' : ('intc',), - 'clip' : (0.0, 1.0), - 'compress' : ([1],), - 'repeat' : (1,), - 'reshape' : (1,), - 'swapaxes' : (0,0), + 'astype': ('intc',), + 'clip': (0.0, 1.0), + 'compress': ([1],), + 'repeat': (1,), + 'reshape': (1,), + 'swapaxes': (0, 0), 'dot': np.array([1.0]), } excluded_methods = [ @@ -305,23 +305,23 @@ class TestMatrixReturn(TestCase): assert_(type(b) is matrix, "%s" % attrib) assert_(type(a.real) is matrix) assert_(type(a.imag) is matrix) - c,d = matrix([0.0]).nonzero() + c, d = matrix([0.0]).nonzero() assert_(type(c) is matrix) assert_(type(d) is matrix) class TestIndexing(TestCase): def test_basic(self): - x = asmatrix(zeros((3,2),float)) - y = zeros((3,1),float) - y[:,0] = [0.8,0.2,0.3] - x[:,1] = y>0.5 - assert_equal(x, [[0,1],[0,0],[0,0]]) + x = asmatrix(zeros((3, 2), float)) + y = zeros((3, 1), float) + y[:, 0] = [0.8, 0.2, 0.3] + x[:, 1] = y>0.5 + assert_equal(x, [[0, 1], [0, 0], [0, 0]]) class TestNewScalarIndexing(TestCase): def setUp(self): - self.a = matrix([[1, 2],[3,4]]) + self.a = matrix([[1, 2], [3, 4]]) def test_dimesions(self): a = self.a @@ -331,64 +331,64 @@ class TestNewScalarIndexing(TestCase): def test_array_from_matrix_list(self): a = self.a x = array([a, a]) - assert_equal(x.shape, [2,2,2]) + assert_equal(x.shape, [2, 2, 2]) def test_array_to_list(self): a = self.a - assert_equal(a.tolist(),[[1, 2], [3, 4]]) + assert_equal(a.tolist(), [[1, 2], [3, 4]]) def test_fancy_indexing(self): a = self.a - x = a[1, [0,1,0]] + x = a[1, [0, 1, 0]] assert_(isinstance(x, matrix)) assert_equal(x, matrix([[3, 4, 3]])) - x = a[[1,0]] + x = a[[1, 0]] assert_(isinstance(x, matrix)) assert_equal(x, matrix([[3, 4], [1, 2]])) - x = a[[[1],[0]],[[1,0],[0,1]]] + x = a[[[1], [0]], [[1, 0], [0, 1]]] assert_(isinstance(x, matrix)) assert_equal(x, matrix([[4, 3], [1, 2]])) def test_matrix_element(self): - x = matrix([[1,2,3],[4,5,6]]) - assert_equal(x[0][0],matrix([[1,2,3]])) - assert_equal(x[0][0].shape,(1,3)) - assert_equal(x[0].shape,(1,3)) - assert_equal(x[:,0].shape,(2,1)) + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x[0][0], matrix([[1, 2, 3]])) + assert_equal(x[0][0].shape, (1, 3)) + assert_equal(x[0].shape, (1, 3)) + assert_equal(x[:, 0].shape, (2, 1)) x = matrix(0) - assert_equal(x[0,0],0) - assert_equal(x[0],0) - assert_equal(x[:,0].shape,x.shape) + assert_equal(x[0, 0], 0) + assert_equal(x[0], 0) + assert_equal(x[:, 0].shape, x.shape) def test_scalar_indexing(self): - x = asmatrix(zeros((3,2),float)) - assert_equal(x[0,0],x[0][0]) + x = asmatrix(zeros((3, 2), float)) + assert_equal(x[0, 0], x[0][0]) def test_row_column_indexing(self): x = asmatrix(np.eye(2)) - assert_array_equal(x[0,:],[[1,0]]) - assert_array_equal(x[1,:],[[0,1]]) - assert_array_equal(x[:,0],[[1],[0]]) - assert_array_equal(x[:,1],[[0],[1]]) + assert_array_equal(x[0,:], [[1, 0]]) + assert_array_equal(x[1,:], [[0, 1]]) + assert_array_equal(x[:, 0], [[1], [0]]) + assert_array_equal(x[:, 1], [[0], [1]]) def test_boolean_indexing(self): A = arange(6) - A.shape = (3,2) + A.shape = (3, 2) x = asmatrix(A) - assert_array_equal(x[:,array([True,False])],x[:,0]) - assert_array_equal(x[array([True,False,False]),:],x[0,:]) + assert_array_equal(x[:, array([True, False])], x[:, 0]) + assert_array_equal(x[array([True, False, False]),:], x[0,:]) def test_list_indexing(self): A = arange(6) - A.shape = (3,2) + A.shape = (3, 2) x = asmatrix(A) - assert_array_equal(x[:,[1,0]],x[:,::-1]) - assert_array_equal(x[[2,1,0],:],x[::-1,:]) + assert_array_equal(x[:, [1, 0]], x[:, ::-1]) + assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) class TestPower(TestCase): def test_returntype(self): - a = array([[0,1],[0,0]]) + a = array([[0, 1], [0, 0]]) assert_(type(matrix_power(a, 2)) is ndarray) a = mat(a) assert_(type(matrix_power(a, 2)) is matrix) diff --git a/numpy/matrixlib/tests/test_multiarray.py b/numpy/matrixlib/tests/test_multiarray.py index bed055615..fc5b1df17 100644 --- a/numpy/matrixlib/tests/test_multiarray.py +++ b/numpy/matrixlib/tests/test_multiarray.py @@ -5,14 +5,14 @@ from numpy.testing import * class TestView(TestCase): def test_type(self): - x = np.array([1,2,3]) - assert_(isinstance(x.view(np.matrix),np.matrix)) + x = np.array([1, 2, 3]) + assert_(isinstance(x.view(np.matrix), np.matrix)) def test_keywords(self): - x = np.array([(1,2)],dtype=[('a',np.int8),('b',np.int8)]) + x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) # We must be specific about the endianness here: y = x.view(dtype='<i2', type=np.matrix) - assert_array_equal(y,[[513]]) + assert_array_equal(y, [[513]]) - assert_(isinstance(y,np.matrix)) + assert_(isinstance(y, np.matrix)) assert_equal(y.dtype, np.dtype('<i2')) diff --git a/numpy/matrixlib/tests/test_numeric.py b/numpy/matrixlib/tests/test_numeric.py index 940bfc5d9..fa88f5288 100644 --- a/numpy/matrixlib/tests/test_numeric.py +++ b/numpy/matrixlib/tests/test_numeric.py @@ -6,5 +6,5 @@ from numpy import matrix class TestDot(TestCase): def test_matscalar(self): - b1 = matrix(ones((3,3),dtype=complex)) + b1 = matrix(ones((3, 3), dtype=complex)) assert_equal(b1*1.0, b1) diff --git a/numpy/matrixlib/tests/test_regression.py b/numpy/matrixlib/tests/test_regression.py index 27cff2c21..4bbd44dcf 100644 --- a/numpy/matrixlib/tests/test_regression.py +++ b/numpy/matrixlib/tests/test_regression.py @@ -9,14 +9,14 @@ class TestRegression(TestCase): def test_kron_matrix(self,level=rlevel): """Ticket #71""" x = np.matrix('[1 0; 1 0]') - assert_equal(type(np.kron(x,x)),type(x)) + assert_equal(type(np.kron(x, x)), type(x)) def test_matrix_properties(self,level=rlevel): """Ticket #125""" - a = np.matrix([1.0],dtype=float) + a = np.matrix([1.0], dtype=float) assert_(type(a.real) is np.matrix) assert_(type(a.imag) is np.matrix) - c,d = np.matrix([0.0]).nonzero() + c, d = np.matrix([0.0]).nonzero() assert_(type(c) is np.matrix) assert_(type(d) is np.matrix) @@ -25,11 +25,10 @@ class TestRegression(TestCase): def mul() : np.mat(np.eye(2))*np.ones(2) - self.assertRaises(ValueError,mul) + self.assertRaises(ValueError, mul) def test_matrix_std_argmax(self,level=rlevel): """Ticket #83""" - x = np.asmatrix(np.random.uniform(0,1,(3,3))) + x = np.asmatrix(np.random.uniform(0, 1, (3, 3))) self.assertEqual(x.std().shape, ()) self.assertEqual(x.argmax().shape, ()) - diff --git a/numpy/numarray/alter_code1.py b/numpy/numarray/alter_code1.py index a80a5ae3c..ff50d7c50 100644 --- a/numpy/numarray/alter_code1.py +++ b/numpy/numarray/alter_code1.py @@ -69,12 +69,12 @@ def changeimports(fstr, name, newname): name_ = name if ('.' in name): - name_ = name.replace('.','_') + name_ = name.replace('.', '_') fstr = re.sub(r'(import\s+[^,\n\r]+,\s*)(%s)' % name, "\\1%s as %s" % (newname, name), fstr) fstr = fstr.replace(importasstr, 'import %s as ' % newname) - fstr = fstr.replace(importstr, 'import %s as %s' % (newname,name_)) + fstr = fstr.replace(importstr, 'import %s as %s' % (newname, name_)) if (name_ != name): fstr = fstr.replace(name, name_) @@ -82,7 +82,7 @@ def changeimports(fstr, name, newname): Nlen = len(fromstr) Nlen2 = len("from %s import " % newname) while True: - found = fstr.find(fromstr,ind) + found = fstr.find(fromstr, ind) if (found < 0): break ind = found + Nlen @@ -106,16 +106,16 @@ def addimport(astr): def replaceattr(astr): astr = astr.replace(".imaginary", ".imag") - astr = astr.replace(".byteswapped()",".byteswap(False)") + astr = astr.replace(".byteswapped()", ".byteswap(False)") astr = astr.replace(".byteswap()", ".byteswap(True)") astr = astr.replace(".isaligned()", ".flags.aligned") - astr = astr.replace(".iscontiguous()",".flags.contiguous") - astr = astr.replace(".is_fortran_contiguous()",".flags.fortran") - astr = astr.replace(".itemsize()",".itemsize") - astr = astr.replace(".size()",".size") - astr = astr.replace(".nelements()",".size") - astr = astr.replace(".typecode()",".dtype.char") - astr = astr.replace(".stddev()",".std()") + astr = astr.replace(".iscontiguous()", ".flags.contiguous") + astr = astr.replace(".is_fortran_contiguous()", ".flags.fortran") + astr = astr.replace(".itemsize()", ".itemsize") + astr = astr.replace(".size()", ".size") + astr = astr.replace(".nelements()", ".size") + astr = astr.replace(".typecode()", ".dtype.char") + astr = astr.replace(".stddev()", ".std()") astr = astr.replace(".getshape()", ".shape") astr = astr.replace(".getflat()", ".ravel()") astr = astr.replace(".getreal", ".real") @@ -123,9 +123,9 @@ def replaceattr(astr): astr = astr.replace(".getimaginary", ".imag") # preserve uses of flat that should be o.k. - tmpstr = flatindex_re.sub(r"@@@@\2",astr) + tmpstr = flatindex_re.sub(r"@@@@\2", astr) # replace other uses of flat - tmpstr = tmpstr.replace(".flat",".ravel()") + tmpstr = tmpstr.replace(".flat", ".ravel()") # put back .flat where it was valid astr = tmpstr.replace("@@@@", ".flat") return astr @@ -222,7 +222,7 @@ def convertall(direc=os.path.curdir, orig=1): <usesnumeric>.py.orig. A new file named <usesnumeric>.py is then written with the updated code. """ - files = glob.glob(os.path.join(direc,'*.py')) + files = glob.glob(os.path.join(direc, '*.py')) for afile in files: if afile[-8:] == 'setup.py': continue convertfile(afile, orig) @@ -234,16 +234,16 @@ def convertsrc(direc=os.path.curdir, ext=None, orig=1): directory with extension give by list ext (if ext is None, then all files are replaced).""" if ext is None: - files = glob.glob(os.path.join(direc,'*')) + files = glob.glob(os.path.join(direc, '*')) else: files = [] for aext in ext: - files.extend(glob.glob(os.path.join(direc,"*.%s" % aext))) + files.extend(glob.glob(os.path.join(direc, "*.%s" % aext))) for afile in files: fid = open(afile) fstr = fid.read() fid.close() - fstr, n = header_re.subn(r'numpy/libnumarray.h',fstr) + fstr, n = header_re.subn(r'numpy/libnumarray.h', fstr) if n > 0: if orig: base, ext = os.path.splitext(afile) @@ -254,7 +254,7 @@ def convertsrc(direc=os.path.curdir, ext=None, orig=1): def _func(arg, dirname, fnames): convertall(dirname, orig=0) - convertsrc(dirname, ['h','c'], orig=0) + convertsrc(dirname, ['h', 'c'], orig=0) def converttree(direc=os.path.curdir): """Convert all .py files in the tree given diff --git a/numpy/numarray/alter_code2.py b/numpy/numarray/alter_code2.py index 18c604afb..5771a7285 100644 --- a/numpy/numarray/alter_code2.py +++ b/numpy/numarray/alter_code2.py @@ -50,7 +50,7 @@ def convertall(direc=os.path.curdir): <usesnumeric>.py.orig. A new file named <usesnumeric>.py is then written with the updated code. """ - files = glob.glob(os.path.join(direc,'*.py')) + files = glob.glob(os.path.join(direc, '*.py')) for afile in files: convertfile(afile) diff --git a/numpy/numarray/functions.py b/numpy/numarray/functions.py index 78d05e5f5..f37e0f054 100644 --- a/numpy/numarray/functions.py +++ b/numpy/numarray/functions.py @@ -107,7 +107,7 @@ def and_(a, b): def divide_remainder(a, b): a, b = asarray(a), asarray(b) - return (a/b,a%b) + return (a/b, a%b) def around(array, digits=0, output=None): ret = np.around(array, digits, output) @@ -211,13 +211,13 @@ def fromfile(infile, type=None, shape=None, sizing=STRICT, ##try to estimate file size try: curpos=infile.tell() - infile.seek(0,2) + infile.seek(0, 2) endpos=infile.tell() infile.seek(curpos) except (AttributeError, IOError): initsize=blocksize else: - initsize=max(1,(endpos-curpos)//recsize)*recsize + initsize=max(1, (endpos-curpos)//recsize)*recsize buf = np.newbuffer(initsize) @@ -228,7 +228,7 @@ def fromfile(infile, type=None, shape=None, sizing=STRICT, break ##do we have space? if len(buf) < bytesread+blocksize: - buf=_resizebuf(buf,len(buf)+blocksize) + buf=_resizebuf(buf, len(buf)+blocksize) ## or rather a=resizebuf(a,2*len(a)) ? assert len(buf) >= bytesread+blocksize buf[bytesread:bytesread+blocksize]=data @@ -241,7 +241,7 @@ def fromfile(infile, type=None, shape=None, sizing=STRICT, _warnings.warn("Filesize does not match specified shape", SizeMismatchWarning) try: - infile.seek(-(len(data) % recsize),1) + infile.seek(-(len(data) % recsize), 1) except AttributeError: _warnings.warn("Could not rewind (no seek support)", FileSeekWarning) @@ -250,7 +250,7 @@ def fromfile(infile, type=None, shape=None, sizing=STRICT, FileSeekWarning) datasize = (len(data)//recsize) * recsize if len(buf) != bytesread+datasize: - buf=_resizebuf(buf,bytesread+datasize) + buf=_resizebuf(buf, bytesread+datasize) buf[bytesread:bytesread+datasize]=data[:datasize] ##deduce shape from len(buf) shape = list(shape) @@ -265,7 +265,7 @@ def fromfile(infile, type=None, shape=None, sizing=STRICT, # this function is referenced in the code above but not defined. adding # it back. - phensley -def _resizebuf(buf,newsize): +def _resizebuf(buf, newsize): "Return a copy of BUF of size NEWSIZE." newbuf = np.newbuffer(newsize) if newsize > len(buf): @@ -389,7 +389,7 @@ def info(obj, output=sys.stdout, numpy=0): print("data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), file=output) print("byteorder: ", end=' ', file=output) endian = obj.dtype.byteorder - if endian in ['|','=']: + if endian in ['|', '=']: print("%s%s%s" % (tic, sys.byteorder, tic), file=output) byteswap = False elif endian == '>': @@ -472,14 +472,14 @@ def tensormultiply(a1, a2): raise ValueError("Unmatched dimensions") shape = a1.shape[:-1] + a2.shape[1:] return np.reshape(dot(np.reshape(a1, (-1, a1.shape[-1])), - np.reshape(a2, (a2.shape[0],-1))), + np.reshape(a2, (a2.shape[0], -1))), shape) def cumsum(a1, axis=0, out=None, type=None, dim=0): - return np.asarray(a1).cumsum(axis,dtype=type,out=out) + return np.asarray(a1).cumsum(axis, dtype=type, out=out) def cumproduct(a1, axis=0, out=None, type=None, dim=0): - return np.asarray(a1).cumprod(axis,dtype=type,out=out) + return np.asarray(a1).cumprod(axis, dtype=type, out=out) def argmax(x, axis=-1): return np.argmax(x, axis) diff --git a/numpy/numarray/include/numpy/cfunc.h b/numpy/numarray/include/numpy/cfunc.h index 1739290ae..1c03c2100 100644 --- a/numpy/numarray/include/numpy/cfunc.h +++ b/numpy/numarray/include/numpy/cfunc.h @@ -75,4 +75,3 @@ typedef struct { CFUNC_DESCR(name, CFUNC_NSTRIDING, 0, 2, 1, 0, 0, 0, 0, 0, 0) #endif - diff --git a/numpy/numarray/include/numpy/ieeespecial.h b/numpy/numarray/include/numpy/ieeespecial.h index 0f3fff2a9..3d2afc0e1 100644 --- a/numpy/numarray/include/numpy/ieeespecial.h +++ b/numpy/numarray/include/numpy/ieeespecial.h @@ -121,4 +121,3 @@ typedef enum #define MSK_NEG_ZERO BIT(NEG_ZERO_BIT) #define MSK_INDETERM BIT(INDETERM_BIT) #define MSK_BUG BIT(BUG_BIT) - diff --git a/numpy/numarray/numerictypes.py b/numpy/numarray/numerictypes.py index 21685c34d..739b855a1 100644 --- a/numpy/numarray/numerictypes.py +++ b/numpy/numarray/numerictypes.py @@ -32,17 +32,17 @@ from __future__ import division, absolute_import, print_function import numpy from numpy.compat import long -__all__ = ['NumericType','HasUInt64','typeDict','IsType', +__all__ = ['NumericType', 'HasUInt64', 'typeDict', 'IsType', 'BooleanType', 'SignedType', 'UnsignedType', 'IntegralType', 'SignedIntegralType', 'UnsignedIntegralType', 'FloatingType', 'ComplexType', 'AnyType', 'ObjectType', 'Any', 'Object', 'Bool', 'Int8', 'Int16', 'Int32', 'Int64', 'Float32', 'Float64', 'UInt8', 'UInt16', 'UInt32', 'UInt64', - 'Complex32', 'Complex64', 'Byte', 'Short', 'Int','Long', + 'Complex32', 'Complex64', 'Byte', 'Short', 'Int', 'Long', 'Float', 'Complex', 'genericTypeRank', 'pythonTypeRank', 'pythonTypeMap', 'scalarTypeMap', 'genericCoercions', - 'typecodes', 'genericPromotionExclusions','MaximumType', - 'getType','scalarTypes', 'typefrom'] + 'typecodes', 'genericPromotionExclusions', 'MaximumType', + 'getType', 'scalarTypes', 'typefrom'] MAX_ALIGN = 8 MAX_INT_SIZE = 8 @@ -305,23 +305,23 @@ _scipy_alias("complex64", "Complex32") # Ranking of types from lowest to highest (sorta) if not HasUInt64: - genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16', + genericTypeRank = ['Bool', 'Int8', 'UInt8', 'Int16', 'UInt16', 'Int32', 'UInt32', 'Int64', - 'Float32','Float64', 'Complex32', 'Complex64', 'Object'] + 'Float32', 'Float64', 'Complex32', 'Complex64', 'Object'] else: - genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16', + genericTypeRank = ['Bool', 'Int8', 'UInt8', 'Int16', 'UInt16', 'Int32', 'UInt32', 'Int64', 'UInt64', - 'Float32','Float64', 'Complex32', 'Complex64', 'Object'] + 'Float32', 'Float64', 'Complex32', 'Complex64', 'Object'] pythonTypeRank = [ bool, int, long, float, complex ] # The next line is not platform independent XXX Needs to be generalized if not LP64: pythonTypeMap = { - int:("Int32","int"), - long:("Int64","int"), - float:("Float64","float"), - complex:("Complex64","complex")} + int:("Int32", "int"), + long:("Int64", "int"), + float:("Float64", "float"), + complex:("Complex64", "complex")} scalarTypeMap = { int:"Int32", @@ -330,10 +330,10 @@ if not LP64: complex:"Complex64"} else: pythonTypeMap = { - int:("Int64","int"), - long:("Int64","int"), - float:("Float64","float"), - complex:("Complex64","complex")} + int:("Int64", "int"), + long:("Int64", "int"), + float:("Float64", "float"), + complex:("Complex64", "complex")} scalarTypeMap = { int:"Int64", @@ -341,7 +341,7 @@ else: float:"Float64", complex:"Complex64"} -pythonTypeMap.update({bool:("Bool","bool") }) +pythonTypeMap.update({bool:("Bool", "bool") }) scalarTypeMap.update({bool:"Bool"}) # Generate coercion matrix @@ -415,16 +415,16 @@ def _initGenericCoercions(): genericCoercions[("Complex32", "UInt64")] = "Complex64" genericCoercions[("UInt64", "Complex32")] = "Complex64" - genericCoercions[("Int64","Float32")] = "Float64" + genericCoercions[("Int64", "Float32")] = "Float64" genericCoercions[("Float32", "Int64")] = "Float64" - genericCoercions[("UInt64","Float32")] = "Float64" + genericCoercions[("UInt64", "Float32")] = "Float64" genericCoercions[("Float32", "UInt64")] = "Float64" genericCoercions[(float, "Bool")] = "Float64" genericCoercions[("Bool", float)] = "Float64" - genericCoercions[(float,float,float)] = "Float64" # for scipy.special - genericCoercions[(int,int,float)] = "Float64" # for scipy.special + genericCoercions[(float, float, float)] = "Float64" # for scipy.special + genericCoercions[(int, int, float)] = "Float64" # for scipy.special _initGenericCoercions() @@ -433,12 +433,12 @@ genericPromotionExclusions = { 'Bool': (), 'Int8': (), 'Int16': (), - 'Int32': ('Float32','Complex32'), + 'Int32': ('Float32', 'Complex32'), 'UInt8': (), 'UInt16': (), - 'UInt32': ('Float32','Complex32'), - 'Int64' : ('Float32','Complex32'), - 'UInt64' : ('Float32','Complex32'), + 'UInt32': ('Float32', 'Complex32'), + 'Int64' : ('Float32', 'Complex32'), + 'UInt64' : ('Float32', 'Complex32'), 'Float32': (), 'Float64': ('Complex32',), 'Complex32':(), @@ -510,7 +510,7 @@ def getType(type): except KeyError: raise TypeError("Not a numeric type") -scalarTypes = (bool,int,long,float,complex) +scalarTypes = (bool, int, long, float, complex) _scipy_dtypechar = { Int8 : 'b', @@ -528,7 +528,7 @@ _scipy_dtypechar = { } _scipy_dtypechar_inverse = {} -for key,value in _scipy_dtypechar.items(): +for key, value in _scipy_dtypechar.items(): _scipy_dtypechar_inverse[value] = key _val = numpy.int_(0).itemsize diff --git a/numpy/numarray/session.py b/numpy/numarray/session.py index e40cd4033..7e9c2f1fd 100644 --- a/numpy/numarray/session.py +++ b/numpy/numarray/session.py @@ -119,9 +119,9 @@ def _callers_modules(): global namespace.""" g = _callers_globals() mods = [] - for k,v in g.items(): + for k, v in g.items(): if isinstance(v, type(sys)): - mods.append(getattr(v,"__name__")) + mods.append(getattr(v, "__name__")) return mods def _errout(*args): @@ -154,7 +154,7 @@ class _ModuleProxy(object): try: self = _loadmodule(name) except ImportError: - _errout("warning: module", name,"import failed.") + _errout("warning: module", name, "import failed.") return self def __getnewargs__(self): @@ -192,13 +192,13 @@ class _ObjectProxy(object): except (ImportError, KeyError): _errout("warning: loading object proxy", module + "." + name, "module import failed.") - return _ProxyingFailure(module,name,_type2) + return _ProxyingFailure(module, name, _type2) try: self = getattr(m, name) except AttributeError: _errout("warning: object proxy", module + "." + name, "wouldn't reload from", m) - return _ProxyingFailure(module,name,_type2) + return _ProxyingFailure(module, name, _type2) return self def __getnewargs__(self): @@ -234,7 +234,7 @@ def _locate(modules, object): for mname in modules: m = sys.modules[mname] if m: - for k,v in m.__dict__.items(): + for k, v in m.__dict__.items(): if v is object: return m.__name__, k else: @@ -276,7 +276,7 @@ def save(variables=None, file=SAVEFILE, dictionary=None, verbose=False): p = pickle.Pickler(file, protocol=2) - _verbose("variables:",keys) + _verbose("variables:", keys) for k in keys: v = dictionary[k] _verbose("saving", k, type(v)) @@ -291,7 +291,7 @@ def save(variables=None, file=SAVEFILE, dictionary=None, verbose=False): try: module, name = _locate(source_modules, v) except ObjectNotFound: - _errout("warning: couldn't find object",k, + _errout("warning: couldn't find object", k, "in any module... skipping.") continue else: diff --git a/numpy/numarray/setup.py b/numpy/numarray/setup.py index 5c9574917..2c2b804c9 100644 --- a/numpy/numarray/setup.py +++ b/numpy/numarray/setup.py @@ -4,7 +4,7 @@ from os.path import join def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('numarray',parent_package,top_path) + config = Configuration('numarray', parent_package, top_path) config.add_data_files('include/numpy/*') diff --git a/numpy/oldnumeric/alter_code1.py b/numpy/oldnumeric/alter_code1.py index 34a59a7ca..2d4e17106 100644 --- a/numpy/oldnumeric/alter_code1.py +++ b/numpy/oldnumeric/alter_code1.py @@ -67,7 +67,7 @@ for char in _chars.keys(): def fixtypechars(fstr): for name in _func2 + _func4 + _meth1: - fstr = func_re[name].sub('\\1B\\2',fstr) + fstr = func_re[name].sub('\\1B\\2', fstr) for char in _chars.keys(): fstr = meth_re[char].sub('\\1%s\\2'%_chars[char], fstr) return fstr @@ -83,13 +83,13 @@ def changeimports(fstr, name, newname): fstr = re.sub(r'(import\s+[^,\n\r]+,\s*)(%s)' % name, "\\1%s as %s" % (newname, name), fstr) fstr = fstr.replace(importasstr, 'import %s as ' % newname) - fstr = fstr.replace(importstr, 'import %s as %s' % (newname,name)) + fstr = fstr.replace(importstr, 'import %s as %s' % (newname, name)) ind = 0 Nlen = len(fromstr) Nlen2 = len("from %s import " % newname) while True: - found = fstr.find(fromstr,ind) + found = fstr.find(fromstr, ind) if (found < 0): break ind = found + Nlen @@ -111,15 +111,15 @@ def fixistesting(astr): return astr def replaceattr(astr): - astr = astr.replace(".typecode()",".dtype.char") - astr = astr.replace(".iscontiguous()",".flags.contiguous") - astr = astr.replace(".byteswapped()",".byteswap()") + astr = astr.replace(".typecode()", ".dtype.char") + astr = astr.replace(".iscontiguous()", ".flags.contiguous") + astr = astr.replace(".byteswapped()", ".byteswap()") astr = astr.replace(".toscalar()", ".item()") - astr = astr.replace(".itemsize()",".itemsize") + astr = astr.replace(".itemsize()", ".itemsize") # preserve uses of flat that should be o.k. - tmpstr = flatindex_re.sub(r"@@@@\2",astr) + tmpstr = flatindex_re.sub(r"@@@@\2", astr) # replace other uses of flat - tmpstr = tmpstr.replace(".flat",".ravel()") + tmpstr = tmpstr.replace(".flat", ".ravel()") # put back .flat where it was valid astr = tmpstr.replace("@@@@", ".flat") return astr @@ -128,7 +128,7 @@ svspc2 = re.compile(r'([^,(\s]+[.]spacesaver[(][)])') svspc3 = re.compile(r'(\S+[.]savespace[(].*[)])') #shpe = re.compile(r'(\S+\s*)[.]shape\s*=[^=]\s*(.+)') def replaceother(astr): - astr = svspc2.sub('True',astr) + astr = svspc2.sub('True', astr) astr = svspc3.sub(r'pass ## \1', astr) #astr = shpe.sub('\\1=\\1.reshape(\\2)', astr) return astr @@ -139,7 +139,7 @@ def fromstr(filestr): filestr = fixtypechars(filestr) filestr = fixistesting(filestr) filestr, fromall1 = changeimports(filestr, 'Numeric', 'numpy.oldnumeric') - filestr, fromall1 = changeimports(filestr, 'multiarray','numpy.oldnumeric') + filestr, fromall1 = changeimports(filestr, 'multiarray', 'numpy.oldnumeric') filestr, fromall1 = changeimports(filestr, 'umath', 'numpy.oldnumeric') filestr, fromall1 = changeimports(filestr, 'Precision', 'numpy.oldnumeric.precision') filestr, fromall1 = changeimports(filestr, 'UserArray', 'numpy.oldnumeric.user_array') @@ -199,7 +199,7 @@ def convertall(direc=os.path.curdir, orig=1): <usesnumeric>.py.orig. A new file named <usesnumeric>.py is then written with the updated code. """ - files = glob.glob(os.path.join(direc,'*.py')) + files = glob.glob(os.path.join(direc, '*.py')) for afile in files: if afile[-8:] == 'setup.py': continue # skip these convertfile(afile, orig) @@ -211,16 +211,16 @@ def convertsrc(direc=os.path.curdir, ext=None, orig=1): directory with extension give by list ext (if ext is None, then all files are replaced).""" if ext is None: - files = glob.glob(os.path.join(direc,'*')) + files = glob.glob(os.path.join(direc, '*')) else: files = [] for aext in ext: - files.extend(glob.glob(os.path.join(direc,"*.%s" % aext))) + files.extend(glob.glob(os.path.join(direc, "*.%s" % aext))) for afile in files: fid = open(afile) fstr = fid.read() fid.close() - fstr, n = header_re.subn(r'numpy/oldnumeric.h',fstr) + fstr, n = header_re.subn(r'numpy/oldnumeric.h', fstr) if n > 0: if orig: base, ext = os.path.splitext(afile) @@ -231,7 +231,7 @@ def convertsrc(direc=os.path.curdir, ext=None, orig=1): def _func(arg, dirname, fnames): convertall(dirname, orig=0) - convertsrc(dirname, ext=['h','c'], orig=0) + convertsrc(dirname, ext=['h', 'c'], orig=0) def converttree(direc=os.path.curdir): """Convert all .py files and source code files in the tree given diff --git a/numpy/oldnumeric/alter_code2.py b/numpy/oldnumeric/alter_code2.py index c163c9565..1ec15c855 100644 --- a/numpy/oldnumeric/alter_code2.py +++ b/numpy/oldnumeric/alter_code2.py @@ -35,11 +35,11 @@ import glob # To convert typecharacters we need to # Not very safe. Disabled for now.. def replacetypechars(astr): - astr = astr.replace("'s'","'h'") - astr = astr.replace("'b'","'B'") - astr = astr.replace("'1'","'b'") - astr = astr.replace("'w'","'H'") - astr = astr.replace("'u'","'I'") + astr = astr.replace("'s'", "'h'") + astr = astr.replace("'b'", "'B'") + astr = astr.replace("'1'", "'b'") + astr = astr.replace("'w'", "'H'") + astr = astr.replace("'u'", "'I'") return astr def changeimports(fstr, name, newname): @@ -49,13 +49,13 @@ def changeimports(fstr, name, newname): fromall=0 fstr = fstr.replace(importasstr, 'import %s as ' % newname) - fstr = fstr.replace(importstr, 'import %s as %s' % (newname,name)) + fstr = fstr.replace(importstr, 'import %s as %s' % (newname, name)) ind = 0 Nlen = len(fromstr) Nlen2 = len("from %s import " % newname) while True: - found = fstr.find(fromstr,ind) + found = fstr.find(fromstr, ind) if (found < 0): break ind = found + Nlen @@ -66,7 +66,7 @@ def changeimports(fstr, name, newname): return fstr, fromall def replaceattr(astr): - astr = astr.replace("matrixmultiply","dot") + astr = astr.replace("matrixmultiply", "dot") return astr def replaceother(astr): @@ -131,7 +131,7 @@ def convertall(direc=os.path.curdir): <usesnumeric>.py.orig. A new file named <usesnumeric>.py is then written with the updated code. """ - files = glob.glob(os.path.join(direc,'*.py')) + files = glob.glob(os.path.join(direc, '*.py')) for afile in files: convertfile(afile) diff --git a/numpy/oldnumeric/arrayfns.py b/numpy/oldnumeric/arrayfns.py index 534ccacf7..0eb97ae9c 100644 --- a/numpy/oldnumeric/arrayfns.py +++ b/numpy/oldnumeric/arrayfns.py @@ -43,21 +43,21 @@ def interp(y, x, z, typ=None): raise error("incompatible typecode") def nz(x): - x = asarray(x,dtype=np.ubyte) + x = asarray(x, dtype=np.ubyte) if x.ndim != 1: raise TypeError("intput must have 1 dimension.") indxs = np.flatnonzero(x != 0) return indxs[-1].item()+1 def reverse(x, n): - x = asarray(x,dtype='d') + x = asarray(x, dtype='d') if x.ndim != 2: raise ValueError("input must be 2-d") y = np.empty_like(x) if n == 0: y[...] = x[::-1,:] elif n == 1: - y[...] = x[:,::-1] + y[...] = x[:, ::-1] return y def span(lo, hi, num, d2=0): @@ -65,7 +65,7 @@ def span(lo, hi, num, d2=0): if d2 <= 0: return x else: - ret = np.empty((d2,num),x.dtype) + ret = np.empty((d2, num), x.dtype) ret[...] = x return ret @@ -84,7 +84,7 @@ def zmin_zmax(z, ireg): nix = np.r_[ix, x1m[i1], x1m[i1], ix[i2] ] niy = np.r_[iy, iy[i1], y1m[i3], y1m[i2]] # remove any negative indices - zres = z[nix,niy] + zres = z[nix, niy] return zres.min().item(), zres.max().item() diff --git a/numpy/oldnumeric/fix_default_axis.py b/numpy/oldnumeric/fix_default_axis.py index 5f6128724..d4235a94c 100644 --- a/numpy/oldnumeric/fix_default_axis.py +++ b/numpy/oldnumeric/fix_default_axis.py @@ -82,9 +82,9 @@ def _add_axis(fstr, name, repl): and fstr[start-8:start-1] != 'numerix' \ and fstr[start-8:start-1] != 'Numeric': continue - if fstr[start-1] in ['\t',' ']: + if fstr[start-1] in ['\t', ' ']: k = start-2 - while fstr[k] in ['\t',' ']: + while fstr[k] in ['\t', ' ']: k -= 1 if fstr[k-2:k+1] == 'def' or \ fstr[k-4:k+1] == 'class': @@ -200,8 +200,8 @@ def _import_change(fstr, names): ptr += 1 substr = fstr[ind:ptr] found = 0 - substr = substr.replace('\n',' ') - substr = substr.replace('\\','') + substr = substr.replace('\n', ' ') + substr = substr.replace('\\', '') importnames = [x.strip() for x in substr.split(',')] # determine if any of names are in importnames addnames = [] @@ -277,7 +277,7 @@ def convertall(direc=os.path.curdir, import_change=False): <usesnumeric>.py.orig. A new file named <usesnumeric>.py is then written with the updated code. """ - files = glob.glob(os.path.join(direc,'*.py')) + files = glob.glob(os.path.join(direc, '*.py')) for afile in files: convertfile(afile, import_change) diff --git a/numpy/oldnumeric/functions.py b/numpy/oldnumeric/functions.py index c5941fb67..156a09a43 100644 --- a/numpy/oldnumeric/functions.py +++ b/numpy/oldnumeric/functions.py @@ -12,7 +12,7 @@ __all__ = ['take', 'repeat', 'sum', 'product', 'sometrue', 'alltrue', 'cumsum', 'cumproduct', 'compress', 'fromfunction', 'ones', 'empty', 'identity', 'zeros', 'array', 'asarray', 'nonzero', 'reshape', 'arange', 'fromstring', 'ravel', 'trace', - 'indices', 'where','sarray','cross_product', 'argmax', 'argmin', + 'indices', 'where', 'sarray', 'cross_product', 'argmax', 'argmin', 'average'] def take(a, indicies, axis=0): @@ -55,7 +55,7 @@ def ones(shape, typecode='l', savespace=0, dtype=None): """ones(shape, dtype=int) returns an array of the given dimensions which is initialized to all ones. """ - dtype = convtypecode(typecode,dtype) + dtype = convtypecode(typecode, dtype) a = mu.empty(shape, dtype) a.fill(1) return a @@ -64,7 +64,7 @@ def zeros(shape, typecode='l', savespace=0, dtype=None): """zeros(shape, dtype=int) returns an array of the given dimensions which is initialized to all zeros """ - dtype = convtypecode(typecode,dtype) + dtype = convtypecode(typecode, dtype) return mu.zeros(shape, dtype) def identity(n,typecode='l', dtype=None): diff --git a/numpy/oldnumeric/linear_algebra.py b/numpy/oldnumeric/linear_algebra.py index f9938b503..8208637b8 100644 --- a/numpy/oldnumeric/linear_algebra.py +++ b/numpy/oldnumeric/linear_algebra.py @@ -24,7 +24,7 @@ import numpy.linalg as linalg LinAlgError = linalg.LinAlgError def solve_linear_equations(a, b): - return linalg.solve(a,b) + return linalg.solve(a, b) # Matrix inversion @@ -42,7 +42,7 @@ def eigenvalues(a): return linalg.eigvals(a) def Heigenvalues(a, UPLO='L'): - return linalg.eigvalsh(a,UPLO) + return linalg.eigvalsh(a, UPLO) # Eigenvectors @@ -79,7 +79,7 @@ the number of rows, then residuals will be returned as an empty array otherwise resids = sum((b-dot(A,x)**2). Singular values less than s[0]*rcond are treated as zero. """ - return linalg.lstsq(a,b,rcond) + return linalg.lstsq(a, b, rcond) def singular_value_decomposition(A, full_matrices=0): return linalg.svd(A, full_matrices) diff --git a/numpy/oldnumeric/ma.py b/numpy/oldnumeric/ma.py index fbc0aca27..d41c68edc 100644 --- a/numpy/oldnumeric/ma.py +++ b/numpy/oldnumeric/ma.py @@ -1157,22 +1157,22 @@ array(data = %(data)s, return self def __eq__(self, other): - return equal(self,other) + return equal(self, other) def __ne__(self, other): - return not_equal(self,other) + return not_equal(self, other) def __lt__(self, other): - return less(self,other) + return less(self, other) def __le__(self, other): - return less_equal(self,other) + return less_equal(self, other) def __gt__(self, other): - return greater(self,other) + return greater(self, other) def __ge__(self, other): - return greater_equal(self,other) + return greater_equal(self, other) def astype (self, tc): "return self as array of given type." @@ -1772,7 +1772,7 @@ def masked_where(condition, x, copy=1): """Return x as an array masked where condition is true. Also masked where x or condition masked. """ - cm = filled(condition,1) + cm = filled(condition, 1) m = mask_or(getmask(x), cm) return array(filled(x), copy=copy, mask=m) @@ -2179,7 +2179,7 @@ def _clip(self,a_min,a_max,out=None): return MaskedArray(data = self.data.clip(asarray(a_min).data, asarray(a_max).data), mask = mask_or(self.mask, - mask_or(getmask(a_min),getmask(a_max)))) + mask_or(getmask(a_min), getmask(a_max)))) array.clip = _m(_clip) def _compress(self, cond, axis=None, out=None): @@ -2237,7 +2237,7 @@ array.nonzero = _m(nonzero) array.prod = _m(product) def _ptp(a,axis=None,out=None): - return a.max(axis,out)-a.min(axis) + return a.max(axis, out)-a.min(axis) array.ptp = _m(_ptp) array.repeat = _m(new_repeat) array.resize = _m(resize) @@ -2257,7 +2257,7 @@ array.squeeze = _m(_squeeze) array.strides = property(_m(not_implemented)) array.sum = _m(sum) -def _swapaxes(self,axis1,axis2): +def _swapaxes(self, axis1, axis2): return MaskedArray(data = self.data.swapaxes(axis1, axis2), mask = self.mask.swapaxes(axis1, axis2)) array.swapaxes = _m(_swapaxes) @@ -2269,13 +2269,13 @@ array.transpose = _m(transpose) def _var(self,axis=None,dtype=None, out=None): if axis is None: return numeric.asarray(self.compressed()).var() - a = self.swapaxes(axis,0) + a = self.swapaxes(axis, 0) a = a - a.mean(axis=0) a *= a a /= a.count(axis=0) - return a.swapaxes(0,axis).sum(axis) + return a.swapaxes(0, axis).sum(axis) def _std(self,axis=None, dtype=None, out=None): - return (self.var(axis,dtype))**0.5 + return (self.var(axis, dtype))**0.5 array.var = _m(_var) array.std = _m(_std) diff --git a/numpy/oldnumeric/matrix.py b/numpy/oldnumeric/matrix.py index 25dfe6302..35c795e9d 100644 --- a/numpy/oldnumeric/matrix.py +++ b/numpy/oldnumeric/matrix.py @@ -28,7 +28,7 @@ _todelete = ''.join(_todelete) def _eval(astr): - return eval(astr.translate(_table,_todelete)) + return eval(astr.translate(_table, _todelete)) def _convert_from_string(data): data.find diff --git a/numpy/oldnumeric/mlab.py b/numpy/oldnumeric/mlab.py index bc2844a87..2b357612c 100644 --- a/numpy/oldnumeric/mlab.py +++ b/numpy/oldnumeric/mlab.py @@ -29,7 +29,7 @@ def eye(N, M=None, k=0, typecode=None, dtype=None): """ dtype = convtypecode(typecode, dtype) if M is None: M = N - m = np.equal(np.subtract.outer(np.arange(N), np.arange(M)),-k) + m = np.equal(np.subtract.outer(np.arange(N), np.arange(M)), -k) if m.dtype != dtype: return m.astype(dtype) @@ -39,7 +39,7 @@ def tri(N, M=None, k=0, typecode=None, dtype=None): """ dtype = convtypecode(typecode, dtype) if M is None: M = N - m = np.greater_equal(np.subtract.outer(np.arange(N), np.arange(M)),-k) + m = np.greater_equal(np.subtract.outer(np.arange(N), np.arange(M)), -k) if m.dtype != dtype: return m.astype(dtype) @@ -84,8 +84,8 @@ def cov(m, y=None, rowvar=0, bias=0): N = m.shape[0] if (y.shape[0] != N): raise ValueError("x and y must have the same number of observations") - m = m - _Nmean(m,axis=0) - y = y - _Nmean(y,axis=0) + m = m - _Nmean(m, axis=0) + y = y - _Nmean(y, axis=0) if bias: fact = N*1.0 else: @@ -96,7 +96,7 @@ from numpy import sqrt, multiply def corrcoef(x, y=None): c = cov(x, y) d = diag(c) - return c/sqrt(multiply.outer(d,d)) + return c/sqrt(multiply.outer(d, d)) from .compat import * from .functions import * diff --git a/numpy/oldnumeric/random_array.py b/numpy/oldnumeric/random_array.py index ecb3d0b23..c43a49cdb 100644 --- a/numpy/oldnumeric/random_array.py +++ b/numpy/oldnumeric/random_array.py @@ -3,7 +3,7 @@ """ from __future__ import division, absolute_import, print_function -__all__ = ['ArgumentError','F','beta','binomial','chi_square', 'exponential', +__all__ = ['ArgumentError', 'F', 'beta', 'binomial', 'chi_square', 'exponential', 'gamma', 'get_seed', 'mean_var_test', 'multinomial', 'multivariate_normal', 'negative_binomial', 'noncentral_F', 'noncentral_chi_square', 'normal', 'permutation', 'poisson', @@ -19,7 +19,7 @@ def seed(x=0, y=0): if (x == 0 or y == 0): mt.seed() else: - mt.seed((x,y)) + mt.seed((x, y)) def get_seed(): raise NotImplementedError( @@ -189,14 +189,14 @@ def poisson(mean, shape=[]): def mean_var_test(x, type, mean, var, skew=[]): n = len(x) * 1.0 - x_mean = np.sum(x,axis=0)/n + x_mean = np.sum(x, axis=0)/n x_minus_mean = x - x_mean - x_var = np.sum(x_minus_mean*x_minus_mean,axis=0)/(n-1.0) + x_var = np.sum(x_minus_mean*x_minus_mean, axis=0)/(n-1.0) print("\nAverage of ", len(x), type) print("(should be about ", mean, "):", x_mean) print("Variance of those random numbers (should be about ", var, "):", x_var) if skew != []: - x_skew = (np.sum(x_minus_mean*x_minus_mean*x_minus_mean,axis=0)/9998.)/x_var**(3./2.) + x_skew = (np.sum(x_minus_mean*x_minus_mean*x_minus_mean, axis=0)/9998.)/x_var**(3./2.) print("Skewness of those random numbers (should be about ", skew, "):", x_skew) def test(): @@ -206,13 +206,13 @@ def test(): if (obj2[1] - obj[1]).any(): raise SystemExit("Failed seed test.") print("First random number is", random()) - print("Average of 10000 random numbers is", np.sum(random(10000),axis=0)/10000.) - x = random([10,1000]) + print("Average of 10000 random numbers is", np.sum(random(10000), axis=0)/10000.) + x = random([10, 1000]) if len(x.shape) != 2 or x.shape[0] != 10 or x.shape[1] != 1000: raise SystemExit("random returned wrong shape") x.shape = (10000,) - print("Average of 100 by 100 random numbers is", np.sum(x,axis=0)/10000.) - y = uniform(0.5,0.6, (1000,10)) + print("Average of 100 by 100 random numbers is", np.sum(x, axis=0)/10000.) + y = uniform(0.5, 0.6, (1000, 10)) if len(y.shape) !=2 or y.shape[0] != 1000 or y.shape[1] != 10: raise SystemExit("uniform returned wrong shape") y.shape = (10000,) @@ -221,7 +221,7 @@ def test(): print("randint(1, 10, shape=[50])") print(randint(1, 10, shape=[50])) print("permutation(10)", permutation(10)) - print("randint(3,9)", randint(3,9)) + print("randint(3,9)", randint(3, 9)) print("random_integers(10, shape=[20])") print(random_integers(10, shape=[20])) s = 3.0 @@ -232,20 +232,20 @@ def test(): mean_var_test(x, "normally distributed numbers with mean 2 and variance %f"%(s**2,), 2, s**2, 0) x = exponential(3, 10000) mean_var_test(x, "random numbers exponentially distributed with mean %f"%(s,), s, s**2, 2) - x = multivariate_normal(np.array([10,20]), np.array(([1,2],[2,4]))) + x = multivariate_normal(np.array([10, 20]), np.array(([1, 2], [2, 4]))) print("\nA multivariate normal", x) if x.shape != (2,): raise SystemExit("multivariate_normal returned wrong shape") - x = multivariate_normal(np.array([10,20]), np.array([[1,2],[2,4]]), [4,3]) + x = multivariate_normal(np.array([10, 20]), np.array([[1, 2], [2, 4]]), [4, 3]) print("A 4x3x2 array containing multivariate normals") print(x) - if x.shape != (4,3,2): raise SystemExit("multivariate_normal returned wrong shape") - x = multivariate_normal(np.array([-100,0,100]), np.array([[3,2,1],[2,2,1],[1,1,1]]), 10000) - x_mean = np.sum(x,axis=0)/10000. + if x.shape != (4, 3, 2): raise SystemExit("multivariate_normal returned wrong shape") + x = multivariate_normal(np.array([-100, 0, 100]), np.array([[3, 2, 1], [2, 2, 1], [1, 1, 1]]), 10000) + x_mean = np.sum(x, axis=0)/10000. print("Average of 10000 multivariate normals with mean [-100,0,100]") print(x_mean) x_minus_mean = x - x_mean print("Estimated covariance of 10000 multivariate normals with covariance [[3,2,1],[2,2,1],[1,1,1]]") - print(np.dot(np.transpose(x_minus_mean),x_minus_mean)/9999.) + print(np.dot(np.transpose(x_minus_mean), x_minus_mean)/9999.) x = beta(5.0, 10.0, 10000) mean_var_test(x, "beta(5.,10.) random numbers", 0.333, 0.014) x = gamma(.01, 2., 10000) @@ -263,7 +263,7 @@ def test(): print("\nEach row is the result of 16 multinomial trials with probabilities [0.1, 0.5, 0.1 0.3]:") x = multinomial(16, [0.1, 0.5, 0.1], 8) print(x) - print("Mean = ", np.sum(x,axis=0)/8.) + print("Mean = ", np.sum(x, axis=0)/8.) if __name__ == '__main__': test() diff --git a/numpy/oldnumeric/rng.py b/numpy/oldnumeric/rng.py index 1ffd47b27..06120798d 100644 --- a/numpy/oldnumeric/rng.py +++ b/numpy/oldnumeric/rng.py @@ -6,7 +6,7 @@ It is for backwards compatibility only. """ from __future__ import division, absolute_import, print_function -__all__ = ['CreateGenerator','ExponentialDistribution','LogNormalDistribution', +__all__ = ['CreateGenerator', 'ExponentialDistribution', 'LogNormalDistribution', 'NormalDistribution', 'UniformDistribution', 'error', 'ranf', 'default_distribution', 'random_sample', 'standard_generator'] @@ -21,7 +21,7 @@ class Distribution(object): self._meth = meth self._args = args - def density(self,x): + def density(self, x): raise NotImplementedError def __call__(self, x): @@ -61,7 +61,7 @@ class LogNormalDistribution(Distribution): self._fac = 1.0/math.sqrt(2*math.pi)/self._sn def density(x): - m,s = self._args + m, s = self._args y = (math.log(x)-self._mn)/self._sn return self._fac*math.exp(-0.5*y*y)/x @@ -76,7 +76,7 @@ class NormalDistribution(Distribution): self._fac = 1.0/math.sqrt(2*math.pi)/s def density(x): - m,s = self._args + m, s = self._args y = (x-m)/s return self._fac*math.exp(-0.5*y*y) @@ -97,7 +97,7 @@ class UniformDistribution(Distribution): else: return self._fac -default_distribution = UniformDistribution(0.0,1.0) +default_distribution = UniformDistribution(0.0, 1.0) class CreateGenerator(object): def __init__(self, seed, dist=None): diff --git a/numpy/oldnumeric/rng_stats.py b/numpy/oldnumeric/rng_stats.py index ed4781e6c..dd450343d 100644 --- a/numpy/oldnumeric/rng_stats.py +++ b/numpy/oldnumeric/rng_stats.py @@ -10,7 +10,7 @@ def average(data): def variance(data): data = Numeric.array(data) - return Numeric.add.reduce((data-average(data,axis=0))**2)/(len(data)-1) + return Numeric.add.reduce((data-average(data, axis=0))**2)/(len(data)-1) def standardDeviation(data): data = Numeric.array(data) @@ -26,11 +26,11 @@ def histogram(data, nbins, range = None): data = Numeric.repeat(data, Numeric.logical_and(Numeric.less_equal(data, max), Numeric.greater_equal(data, - min)),axis=0) + min)), axis=0) bin_width = (max-min)/nbins data = Numeric.floor((data - min)/bin_width).astype(Numeric.Int) histo = Numeric.add.reduce(Numeric.equal( - Numeric.arange(nbins)[:,Numeric.NewAxis], data), -1) + Numeric.arange(nbins)[:, Numeric.NewAxis], data), -1) histo[-1] = histo[-1] + Numeric.add.reduce(Numeric.equal(nbins, data)) bins = min + bin_width*(Numeric.arange(nbins)+0.5) return Numeric.transpose(Numeric.array([bins, histo])) diff --git a/numpy/oldnumeric/setup.py b/numpy/oldnumeric/setup.py index 5dd8d3c5c..13c3e0d8d 100644 --- a/numpy/oldnumeric/setup.py +++ b/numpy/oldnumeric/setup.py @@ -2,7 +2,7 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('oldnumeric',parent_package,top_path) + config = Configuration('oldnumeric', parent_package, top_path) config.add_data_dir('tests') return config diff --git a/numpy/oldnumeric/tests/test_oldnumeric.py b/numpy/oldnumeric/tests/test_oldnumeric.py index 5ff8e6bd3..2c1a806ac 100644 --- a/numpy/oldnumeric/tests/test_oldnumeric.py +++ b/numpy/oldnumeric/tests/test_oldnumeric.py @@ -11,79 +11,79 @@ from numpy.core.numeric import float32, float64, complex64, complex128, int8, \ class test_oldtypes(unittest.TestCase): def test_oldtypes(self, level=1): - a1 = array([0,1,0], Float) - a2 = array([0,1,0], float) + a1 = array([0, 1, 0], Float) + a2 = array([0, 1, 0], float) assert_array_equal(a1, a2) - a1 = array([0,1,0], Float8) - a2 = array([0,1,0], float) + a1 = array([0, 1, 0], Float8) + a2 = array([0, 1, 0], float) assert_array_equal(a1, a2) - a1 = array([0,1,0], Float16) - a2 = array([0,1,0], float) + a1 = array([0, 1, 0], Float16) + a2 = array([0, 1, 0], float) assert_array_equal(a1, a2) - a1 = array([0,1,0], Float32) - a2 = array([0,1,0], float32) + a1 = array([0, 1, 0], Float32) + a2 = array([0, 1, 0], float32) assert_array_equal(a1, a2) - a1 = array([0,1,0], Float64) - a2 = array([0,1,0], float64) + a1 = array([0, 1, 0], Float64) + a2 = array([0, 1, 0], float64) assert_array_equal(a1, a2) - a1 = array([0,1,0], Complex) - a2 = array([0,1,0], complex) + a1 = array([0, 1, 0], Complex) + a2 = array([0, 1, 0], complex) assert_array_equal(a1, a2) - a1 = array([0,1,0], Complex8) - a2 = array([0,1,0], complex) + a1 = array([0, 1, 0], Complex8) + a2 = array([0, 1, 0], complex) assert_array_equal(a1, a2) - a1 = array([0,1,0], Complex16) - a2 = array([0,1,0], complex) + a1 = array([0, 1, 0], Complex16) + a2 = array([0, 1, 0], complex) assert_array_equal(a1, a2) - a1 = array([0,1,0], Complex32) - a2 = array([0,1,0], complex64) + a1 = array([0, 1, 0], Complex32) + a2 = array([0, 1, 0], complex64) assert_array_equal(a1, a2) - a1 = array([0,1,0], Complex64) - a2 = array([0,1,0], complex128) + a1 = array([0, 1, 0], Complex64) + a2 = array([0, 1, 0], complex128) assert_array_equal(a1, a2) - a1 = array([0,1,0], Int) - a2 = array([0,1,0], int) + a1 = array([0, 1, 0], Int) + a2 = array([0, 1, 0], int) assert_array_equal(a1, a2) - a1 = array([0,1,0], Int8) - a2 = array([0,1,0], int8) + a1 = array([0, 1, 0], Int8) + a2 = array([0, 1, 0], int8) assert_array_equal(a1, a2) - a1 = array([0,1,0], Int16) - a2 = array([0,1,0], int16) + a1 = array([0, 1, 0], Int16) + a2 = array([0, 1, 0], int16) assert_array_equal(a1, a2) - a1 = array([0,1,0], Int32) - a2 = array([0,1,0], int32) + a1 = array([0, 1, 0], Int32) + a2 = array([0, 1, 0], int32) assert_array_equal(a1, a2) try: - a1 = array([0,1,0], Int64) - a2 = array([0,1,0], int64) + a1 = array([0, 1, 0], Int64) + a2 = array([0, 1, 0], int64) assert_array_equal(a1, a2) except NameError: # Not all systems have 64-bit integers. pass - a1 = array([0,1,0], UnsignedInt) - a2 = array([0,1,0], UnsignedInteger) - a3 = array([0,1,0], uint) + a1 = array([0, 1, 0], UnsignedInt) + a2 = array([0, 1, 0], UnsignedInteger) + a3 = array([0, 1, 0], uint) assert_array_equal(a1, a3) assert_array_equal(a2, a3) - a1 = array([0,1,0], UInt8) - a2 = array([0,1,0], UnsignedInt8) - a3 = array([0,1,0], uint8) + a1 = array([0, 1, 0], UInt8) + a2 = array([0, 1, 0], UnsignedInt8) + a3 = array([0, 1, 0], uint8) assert_array_equal(a1, a3) assert_array_equal(a2, a3) - a1 = array([0,1,0], UInt16) - a2 = array([0,1,0], UnsignedInt16) - a3 = array([0,1,0], uint16) + a1 = array([0, 1, 0], UInt16) + a2 = array([0, 1, 0], UnsignedInt16) + a3 = array([0, 1, 0], uint16) assert_array_equal(a1, a3) assert_array_equal(a2, a3) - a1 = array([0,1,0], UInt32) - a2 = array([0,1,0], UnsignedInt32) - a3 = array([0,1,0], uint32) + a1 = array([0, 1, 0], UInt32) + a2 = array([0, 1, 0], UnsignedInt32) + a3 = array([0, 1, 0], uint32) assert_array_equal(a1, a3) assert_array_equal(a2, a3) try: - a1 = array([0,1,0], UInt64) - a2 = array([0,1,0], UnsignedInt64) - a3 = array([0,1,0], uint64) + a1 = array([0, 1, 0], UInt64) + a2 = array([0, 1, 0], UnsignedInt64) + a3 = array([0, 1, 0], uint64) assert_array_equal(a1, a3) assert_array_equal(a2, a3) except NameError: diff --git a/numpy/oldnumeric/tests/test_regression.py b/numpy/oldnumeric/tests/test_regression.py index b62a384e4..272323b81 100644 --- a/numpy/oldnumeric/tests/test_regression.py +++ b/numpy/oldnumeric/tests/test_regression.py @@ -8,5 +8,4 @@ class TestRegression(TestCase): def test_numeric_random(self, level=rlevel): """Ticket #552""" from numpy.oldnumeric.random_array import randint - randint(0,50,[2,3]) - + randint(0, 50, [2, 3]) diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index db1b637fd..6a2394382 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -98,7 +98,7 @@ __all__ = ['chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebval', 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', - 'chebgrid2d', 'chebgrid3d', 'chebvander2d','chebvander3d', + 'chebgrid2d', 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', 'chebgauss', 'chebweight'] chebtrim = pu.trimcoef @@ -439,7 +439,7 @@ def cheb2poly(c) : # # Chebyshev default domain. -chebdomain = np.array([-1,1]) +chebdomain = np.array([-1, 1]) # Chebyshev coefficients representing zero. chebzero = np.array([0]) @@ -448,7 +448,7 @@ chebzero = np.array([0]) chebone = np.array([1]) # Chebyshev coefficients representing the identity x. -chebx = np.array([0,1]) +chebx = np.array([0, 1]) def chebline(off, scl) : @@ -482,7 +482,7 @@ def chebline(off, scl) : """ if scl != 0 : - return np.array([off,scl]) + return np.array([off, scl]) else : return np.array([off]) @@ -1523,7 +1523,7 @@ def chebvander2d(x, y, deg) : vx = chebvander(x, degx) vy = chebvander(y, degy) - v = vx[..., None]*vy[..., None, :] + v = vx[..., None]*vy[..., None,:] return v.reshape(v.shape[:-2] + (-1,)) @@ -1588,7 +1588,7 @@ def chebvander3d(x, y, z, deg) : vx = chebvander(x, degx) vy = chebvander(y, degy) vz = chebvander(z, degz) - v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :] + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] return v.reshape(v.shape[:-3] + (-1,)) @@ -1805,7 +1805,7 @@ def chebcompanion(c): top[0] = np.sqrt(.5) top[1:] = 1/2 bot[...] = top - mat[:,-1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 return mat diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 13b9e6845..4140acfb7 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -185,7 +185,7 @@ def herm2poly(c) : # # Hermite -hermdomain = np.array([-1,1]) +hermdomain = np.array([-1, 1]) # Hermite coefficients representing zero. hermzero = np.array([0]) @@ -228,7 +228,7 @@ def hermline(off, scl) : """ if scl != 0 : - return np.array([off,scl/2]) + return np.array([off, scl/2]) else : return np.array([off]) @@ -1295,7 +1295,7 @@ def hermvander2d(x, y, deg) : vx = hermvander(x, degx) vy = hermvander(y, degy) - v = vx[..., None]*vy[..., None, :] + v = vx[..., None]*vy[..., None,:] return v.reshape(v.shape[:-2] + (-1,)) @@ -1360,7 +1360,7 @@ def hermvander3d(x, y, z, deg) : vx = hermvander(x, degx) vy = hermvander(y, degy) vz = hermvander(z, degz) - v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :] + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] return v.reshape(v.shape[:-3] + (-1,)) @@ -1577,13 +1577,13 @@ def hermcompanion(c): n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., np.sqrt(2.*np.arange(1,n)))) + scl = np.hstack((1., np.sqrt(2.*np.arange(1, n)))) scl = np.multiply.accumulate(scl) top = mat.reshape(-1)[1::n+1] bot = mat.reshape(-1)[n::n+1] - top[...] = np.sqrt(.5*np.arange(1,n)) + top[...] = np.sqrt(.5*np.arange(1, n)) bot[...] = top - mat[:,-1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 return mat diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 9f10403dd..735ca9470 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -184,7 +184,7 @@ def herme2poly(c) : # # Hermite -hermedomain = np.array([-1,1]) +hermedomain = np.array([-1, 1]) # Hermite coefficients representing zero. hermezero = np.array([0]) @@ -228,7 +228,7 @@ def hermeline(off, scl) : """ if scl != 0 : - return np.array([off,scl]) + return np.array([off, scl]) else : return np.array([off]) @@ -1291,7 +1291,7 @@ def hermevander2d(x, y, deg) : vx = hermevander(x, degx) vy = hermevander(y, degy) - v = vx[..., None]*vy[..., None, :] + v = vx[..., None]*vy[..., None,:] return v.reshape(v.shape[:-2] + (-1,)) @@ -1356,7 +1356,7 @@ def hermevander3d(x, y, z, deg) : vx = hermevander(x, degx) vy = hermevander(y, degy) vz = hermevander(z, degz) - v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :] + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] return v.reshape(v.shape[:-3] + (-1,)) @@ -1574,13 +1574,13 @@ def hermecompanion(c): n = len(c) - 1 mat = np.zeros((n, n), dtype=c.dtype) - scl = np.hstack((1., np.sqrt(np.arange(1,n)))) + scl = np.hstack((1., np.sqrt(np.arange(1, n)))) scl = np.multiply.accumulate(scl) top = mat.reshape(-1)[1::n+1] bot = mat.reshape(-1)[n::n+1] - top[...] = np.sqrt(np.arange(1,n)) + top[...] = np.sqrt(np.arange(1, n)) bot[...] = top - mat[:,-1] -= (c[:-1]/c[-1])*(scl/scl[-1]) + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1]) return mat diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index ea805e146..b7ffe9b0c 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -181,7 +181,7 @@ def lag2poly(c) : # # Laguerre -lagdomain = np.array([0,1]) +lagdomain = np.array([0, 1]) # Laguerre coefficients representing zero. lagzero = np.array([0]) @@ -1294,7 +1294,7 @@ def lagvander2d(x, y, deg) : vx = lagvander(x, degx) vy = lagvander(y, degy) - v = vx[..., None]*vy[..., None, :] + v = vx[..., None]*vy[..., None,:] return v.reshape(v.shape[:-2] + (-1,)) @@ -1359,7 +1359,7 @@ def lagvander3d(x, y, z, deg) : vx = lagvander(x, degx) vy = lagvander(y, degy) vz = lagvander(z, degz) - v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :] + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] return v.reshape(v.shape[:-3] + (-1,)) @@ -1578,10 +1578,10 @@ def lagcompanion(c): top = mat.reshape(-1)[1::n+1] mid = mat.reshape(-1)[0::n+1] bot = mat.reshape(-1)[n::n+1] - top[...] = -np.arange(1,n) + top[...] = -np.arange(1, n) mid[...] = 2.*np.arange(n) + 1. bot[...] = top - mat[:,-1] += (c[:-1]/c[-1])*n + mat[:, -1] += (c[:-1]/c[-1])*n return mat diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index c7a1f2dd2..8d89c8412 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -92,7 +92,7 @@ from .polytemplate import polytemplate __all__ = ['legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', 'legint', 'leg2poly', 'poly2leg', 'legfromroots', - 'legvander', 'legfit', 'legtrim', 'legroots', 'Legendre','legval2d', + 'legvander', 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', 'leggauss', 'legweight'] @@ -213,7 +213,7 @@ def leg2poly(c) : # # Legendre -legdomain = np.array([-1,1]) +legdomain = np.array([-1, 1]) # Legendre coefficients representing zero. legzero = np.array([0]) @@ -222,7 +222,7 @@ legzero = np.array([0]) legone = np.array([1]) # Legendre coefficients representing the identity x. -legx = np.array([0,1]) +legx = np.array([0, 1]) def legline(off, scl) : @@ -256,7 +256,7 @@ def legline(off, scl) : """ if scl != 0 : - return np.array([off,scl]) + return np.array([off, scl]) else : return np.array([off]) @@ -1324,7 +1324,7 @@ def legvander2d(x, y, deg) : vx = legvander(x, degx) vy = legvander(y, degy) - v = vx[..., None]*vy[..., None, :] + v = vx[..., None]*vy[..., None,:] return v.reshape(v.shape[:-2] + (-1,)) @@ -1389,7 +1389,7 @@ def legvander3d(x, y, z, deg) : vx = legvander(x, degx) vy = legvander(y, degy) vz = legvander(z, degz) - v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :] + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] return v.reshape(v.shape[:-3] + (-1,)) @@ -1605,7 +1605,7 @@ def legcompanion(c): bot = mat.reshape(-1)[n::n+1] top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n] bot[...] = top - mat[:,-1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1)) + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1)) return mat diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 0b044e8e8..9acdcbe52 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -58,8 +58,8 @@ from __future__ import division, absolute_import, print_function __all__ = ['polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', 'polyder', 'polyint', 'polyfromroots', 'polyvander', - 'polyfit', 'polytrim', 'polyroots', 'Polynomial','polyval2d', - 'polyval3d', 'polygrid2d', 'polygrid3d', 'polyvander2d','polyvander3d'] + 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', + 'polyval3d', 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d'] import numpy as np import numpy.linalg as la @@ -75,7 +75,7 @@ polytrim = pu.trimcoef # # Polynomial default domain. -polydomain = np.array([-1,1]) +polydomain = np.array([-1, 1]) # Polynomial coefficients representing zero. polyzero = np.array([0]) @@ -84,7 +84,7 @@ polyzero = np.array([0]) polyone = np.array([1]) # Polynomial coefficients representing the identity x. -polyx = np.array([0,1]) +polyx = np.array([0, 1]) # # Polynomial series functions @@ -120,7 +120,7 @@ def polyline(off, scl) : """ if scl != 0 : - return np.array([off,scl]) + return np.array([off, scl]) else : return np.array([off]) @@ -1119,7 +1119,7 @@ def polyvander2d(x, y, deg) : vx = polyvander(x, degx) vy = polyvander(y, degy) - v = vx[..., None]*vy[..., None, :] + v = vx[..., None]*vy[..., None,:] # einsum bug #v = np.einsum("...i,...j->...ij", vx, vy) return v.reshape(v.shape[:-2] + (-1,)) @@ -1186,7 +1186,7 @@ def polyvander3d(x, y, z, deg) : vx = polyvander(x, degx) vy = polyvander(y, degy) vz = polyvander(z, degz) - v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :] + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] # einsum bug #v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz) return v.reshape(v.shape[:-3] + (-1,)) @@ -1424,7 +1424,7 @@ def polycompanion(c): mat = np.zeros((n, n), dtype=c.dtype) bot = mat.reshape(-1)[n::n+1] bot[...] = 1 - mat[:,-1] -= c[:-1]/c[-1] + mat[:, -1] -= c[:-1]/c[-1] return mat diff --git a/numpy/polynomial/setup.py b/numpy/polynomial/setup.py index ab2ff2be8..cb59ee1e5 100644 --- a/numpy/polynomial/setup.py +++ b/numpy/polynomial/setup.py @@ -2,7 +2,7 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('polynomial',parent_package,top_path) + config = Configuration('polynomial', parent_package, top_path) config.add_data_dir('tests') return config diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 367d81f58..70eccdd0a 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -64,8 +64,8 @@ class TestArithmetic(TestCase) : def test_chebadd(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = cheb.chebadd([0]*i + [1], [0]*j + [1]) @@ -74,8 +74,8 @@ class TestArithmetic(TestCase) : def test_chebsub(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = cheb.chebsub([0]*i + [1], [0]*j + [1]) @@ -83,7 +83,7 @@ class TestArithmetic(TestCase) : def test_chebmulx(self): assert_equal(cheb.chebmulx([0]), [0]) - assert_equal(cheb.chebmulx([1]), [0,1]) + assert_equal(cheb.chebmulx([1]), [0, 1]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i - 1) + [.5, 0, .5] @@ -92,7 +92,7 @@ class TestArithmetic(TestCase) : def test_chebmul(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) + msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(i + j + 1) tgt[i + j] += .5 tgt[abs(i - j)] += .5 @@ -102,7 +102,7 @@ class TestArithmetic(TestCase) : def test_chebdiv(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) + msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = cheb.chebadd(ci, cj) @@ -127,7 +127,7 @@ class TestEvaluation(TestCase): assert_equal(cheb.chebval([], [1]).size, 0) #check normal input) - x = np.linspace(-1,1) + x = np.linspace(-1, 1) y = [polyval(x, c) for c in Tlist] for i in range(10) : msg = "At i=%d" % i @@ -140,8 +140,8 @@ class TestEvaluation(TestCase): dims = [2]*i x = np.zeros(dims) assert_equal(cheb.chebval(x, [1]).shape, dims) - assert_equal(cheb.chebval(x, [1,0]).shape, dims) - assert_equal(cheb.chebval(x, [1,0,0]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) def test_chebval2d(self): x1, x2, x3 = self.x @@ -156,9 +156,9 @@ class TestEvaluation(TestCase): assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = cheb.chebval2d(z, z, self.c2d) - assert_(res.shape == (2,3)) + assert_(res.shape == (2, 3)) def test_chebval3d(self): x1, x2, x3 = self.x @@ -173,9 +173,9 @@ class TestEvaluation(TestCase): assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = cheb.chebval3d(z, z, z, self.c3d) - assert_(res.shape == (2,3)) + assert_(res.shape == (2, 3)) def test_chebgrid2d(self): x1, x2, x3 = self.x @@ -187,7 +187,7 @@ class TestEvaluation(TestCase): assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = cheb.chebgrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) @@ -201,7 +201,7 @@ class TestEvaluation(TestCase): assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = cheb.chebgrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) @@ -212,7 +212,7 @@ class TestIntegral(TestCase) : # check exceptions assert_raises(ValueError, cheb.chebint, [0], .5) assert_raises(ValueError, cheb.chebint, [0], -1) - assert_raises(ValueError, cheb.chebint, [0], 1, [0,0]) + assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) # test integration of zero polynomial for i in range(2, 5): @@ -250,7 +250,7 @@ class TestIntegral(TestCase) : # check multiple integrations with default k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -260,7 +260,7 @@ class TestIntegral(TestCase) : # check multiple integrations with defined k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -270,7 +270,7 @@ class TestIntegral(TestCase) : # check multiple integrations with lbnd for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -280,7 +280,7 @@ class TestIntegral(TestCase) : # check multiple integrations with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -320,14 +320,14 @@ class TestDerivative(TestCase) : # check that derivation is the inverse of integration for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -409,10 +409,10 @@ class TestFitting(TestCase): assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1,1]) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) # Test fit - x = np.linspace(0,2) + x = np.linspace(0, 2) y = f(x) # coef3 = cheb.chebfit(x, y, 3) @@ -423,8 +423,8 @@ class TestFitting(TestCase): assert_equal(len(coef4), 5) assert_almost_equal(cheb.chebval(x, coef4), y) # - coef2d = cheb.chebfit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) + coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -433,8 +433,8 @@ class TestFitting(TestCase): wcoef3 = cheb.chebfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # - wcoef2d = cheb.chebfit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] @@ -467,7 +467,7 @@ class TestGauss(TestCase): v = cheb.chebvander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:,None] * vv * vd + vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct @@ -480,16 +480,16 @@ class TestMisc(TestCase) : def test_chebfromroots(self) : res = cheb.chebfromroots([]) assert_almost_equal(trim(res), [1]) - for i in range(1,5) : + for i in range(1, 5) : roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) tgt = [0]*i + [1] res = cheb.chebfromroots(roots)*2**(i-1) - assert_almost_equal(trim(res),trim(tgt)) + assert_almost_equal(trim(res), trim(tgt)) def test_chebroots(self) : assert_almost_equal(cheb.chebroots([1]), []) assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) - for i in range(2,5) : + for i in range(2, 5) : tgt = np.linspace(-1, 1, i) res = cheb.chebroots(cheb.chebfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) @@ -506,7 +506,7 @@ class TestMisc(TestCase) : assert_equal(cheb.chebtrim(coef, 2), [0]) def test_chebline(self) : - assert_equal(cheb.chebline(3,4), [3, 4]) + assert_equal(cheb.chebline(3, 4), [3, 4]) def test_cheb2poly(self) : for i in range(10) : diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index f8910a473..5708d936f 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -155,12 +155,12 @@ def check_fit(Poly) : def f(x) : return x*(x - 1)*(x - 2) - x = np.linspace(0,3) + x = np.linspace(0, 3) y = f(x) # check default value of domain and window p = Poly.fit(x, y, 3) - assert_almost_equal(p.domain, [0,3]) + assert_almost_equal(p.domain, [0, 3]) assert_almost_equal(p(x), y) assert_equal(p.degree(), 3) @@ -441,7 +441,7 @@ def check_deriv(Poly): def check_linspace(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 - p = Poly([1,2,3], domain=d, window=w) + p = Poly([1, 2, 3], domain=d, window=w) # check default domain xtgt = np.linspace(d[0], d[1], 20) ytgt = p(xtgt) diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 327283d0e..978c9c79b 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -49,8 +49,8 @@ class TestArithmetic(TestCase) : def test_hermadd(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = herm.hermadd([0]*i + [1], [0]*j + [1]) @@ -59,8 +59,8 @@ class TestArithmetic(TestCase) : def test_hermsub(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = herm.hermsub([0]*i + [1], [0]*j + [1]) @@ -68,7 +68,7 @@ class TestArithmetic(TestCase) : def test_hermmulx(self): assert_equal(herm.hermmulx([0]), [0]) - assert_equal(herm.hermmulx([1]), [0,.5]) + assert_equal(herm.hermmulx([1]), [0, .5]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i - 1) + [i, 0, .5] @@ -80,7 +80,7 @@ class TestArithmetic(TestCase) : pol1 = [0]*i + [1] val1 = herm.hermval(self.x, pol1) for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) + msg = "At i=%d, j=%d" % (i, j) pol2 = [0]*j + [1] val2 = herm.hermval(self.x, pol2) pol3 = herm.hermmul(pol1, pol2) @@ -91,7 +91,7 @@ class TestArithmetic(TestCase) : def test_hermdiv(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) + msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = herm.hermadd(ci, cj) @@ -116,7 +116,7 @@ class TestEvaluation(TestCase) : assert_equal(herm.hermval([], [1]).size, 0) #check normal input) - x = np.linspace(-1,1) + x = np.linspace(-1, 1) y = [polyval(x, c) for c in Hlist] for i in range(10) : msg = "At i=%d" % i @@ -130,8 +130,8 @@ class TestEvaluation(TestCase) : dims = [2]*i x = np.zeros(dims) assert_equal(herm.hermval(x, [1]).shape, dims) - assert_equal(herm.hermval(x, [1,0]).shape, dims) - assert_equal(herm.hermval(x, [1,0,0]).shape, dims) + assert_equal(herm.hermval(x, [1, 0]).shape, dims) + assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) def test_hermval2d(self): x1, x2, x3 = self.x @@ -146,9 +146,9 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = herm.hermval2d(z, z, self.c2d) - assert_(res.shape == (2,3)) + assert_(res.shape == (2, 3)) def test_hermval3d(self): x1, x2, x3 = self.x @@ -163,9 +163,9 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = herm.hermval3d(z, z, z, self.c3d) - assert_(res.shape == (2,3)) + assert_(res.shape == (2, 3)) def test_hermgrid2d(self): x1, x2, x3 = self.x @@ -177,7 +177,7 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = herm.hermgrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) @@ -191,7 +191,7 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = herm.hermgrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) @@ -202,7 +202,7 @@ class TestIntegral(TestCase) : # check exceptions assert_raises(ValueError, herm.hermint, [0], .5) assert_raises(ValueError, herm.hermint, [0], -1) - assert_raises(ValueError, herm.hermint, [0], 1, [0,0]) + assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) # test integration of zero polynomial for i in range(2, 5): @@ -240,7 +240,7 @@ class TestIntegral(TestCase) : # check multiple integrations with default k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -250,7 +250,7 @@ class TestIntegral(TestCase) : # check multiple integrations with defined k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -260,7 +260,7 @@ class TestIntegral(TestCase) : # check multiple integrations with lbnd for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -270,7 +270,7 @@ class TestIntegral(TestCase) : # check multiple integrations with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -310,14 +310,14 @@ class TestDerivative(TestCase) : # check that derivation is the inverse of integration for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = herm.hermder(herm.hermint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -399,10 +399,10 @@ class TestFitting(TestCase): assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1,1]) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) # Test fit - x = np.linspace(0,2) + x = np.linspace(0, 2) y = f(x) # coef3 = herm.hermfit(x, y, 3) @@ -413,8 +413,8 @@ class TestFitting(TestCase): assert_equal(len(coef4), 5) assert_almost_equal(herm.hermval(x, coef4), y) # - coef2d = herm.hermfit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) + coef2d = herm.hermfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -423,8 +423,8 @@ class TestFitting(TestCase): wcoef3 = herm.hermfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # - wcoef2d = herm.hermfit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] @@ -457,7 +457,7 @@ class TestGauss(TestCase): v = herm.hermvander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:,None] * vv * vd + vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct @@ -470,7 +470,7 @@ class TestMisc(TestCase) : def test_hermfromroots(self) : res = herm.hermfromroots([]) assert_almost_equal(trim(res), [1]) - for i in range(1,5) : + for i in range(1, 5) : roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) pol = herm.hermfromroots(roots) res = herm.hermval(roots, pol) @@ -482,7 +482,7 @@ class TestMisc(TestCase) : def test_hermroots(self) : assert_almost_equal(herm.hermroots([1]), []) assert_almost_equal(herm.hermroots([1, 1]), [-.5]) - for i in range(2,5) : + for i in range(2, 5) : tgt = np.linspace(-1, 1, i) res = herm.hermroots(herm.hermfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) @@ -499,7 +499,7 @@ class TestMisc(TestCase) : assert_equal(herm.hermtrim(coef, 2), [0]) def test_hermline(self) : - assert_equal(herm.hermline(3,4), [3, 2]) + assert_equal(herm.hermline(3, 4), [3, 2]) def test_herm2poly(self) : for i in range(10) : diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index 404a46fc7..b27e8576b 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -9,15 +9,15 @@ from numpy.polynomial.polynomial import polyval from numpy.testing import * He0 = np.array([ 1 ]) -He1 = np.array([ 0 , 1 ]) -He2 = np.array([ -1 ,0 , 1 ]) -He3 = np.array([ 0 , -3 ,0 , 1 ]) -He4 = np.array([ 3 ,0 , -6 ,0 , 1 ]) -He5 = np.array([ 0 , 15 ,0 , -10 ,0 , 1 ]) -He6 = np.array([ -15 ,0 , 45 ,0 , -15 ,0 , 1 ]) -He7 = np.array([ 0 , -105 ,0 , 105 ,0 , -21 ,0 , 1 ]) -He8 = np.array([ 105 ,0 , -420 ,0 , 210 ,0 , -28 ,0 , 1 ]) -He9 = np.array([ 0 , 945 ,0 , -1260 ,0 , 378 ,0 , -36 ,0 , 1 ]) +He1 = np.array([ 0, 1 ]) +He2 = np.array([ -1, 0, 1 ]) +He3 = np.array([ 0, -3, 0, 1 ]) +He4 = np.array([ 3, 0, -6, 0, 1 ]) +He5 = np.array([ 0, 15, 0, -10, 0, 1 ]) +He6 = np.array([ -15, 0, 45, 0, -15, 0, 1 ]) +He7 = np.array([ 0, -105, 0, 105, 0, -21, 0, 1 ]) +He8 = np.array([ 105, 0, -420, 0, 210, 0, -28, 0, 1 ]) +He9 = np.array([ 0, 945, 0, -1260, 0, 378, 0, -36, 0, 1 ]) Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] @@ -46,8 +46,8 @@ class TestArithmetic(TestCase) : def test_hermeadd(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = herme.hermeadd([0]*i + [1], [0]*j + [1]) @@ -56,8 +56,8 @@ class TestArithmetic(TestCase) : def test_hermesub(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = herme.hermesub([0]*i + [1], [0]*j + [1]) @@ -65,7 +65,7 @@ class TestArithmetic(TestCase) : def test_hermemulx(self): assert_equal(herme.hermemulx([0]), [0]) - assert_equal(herme.hermemulx([1]), [0,1]) + assert_equal(herme.hermemulx([1]), [0, 1]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i - 1) + [i, 0, 1] @@ -77,7 +77,7 @@ class TestArithmetic(TestCase) : pol1 = [0]*i + [1] val1 = herme.hermeval(self.x, pol1) for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) + msg = "At i=%d, j=%d" % (i, j) pol2 = [0]*j + [1] val2 = herme.hermeval(self.x, pol2) pol3 = herme.hermemul(pol1, pol2) @@ -88,7 +88,7 @@ class TestArithmetic(TestCase) : def test_hermediv(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) + msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = herme.hermeadd(ci, cj) @@ -113,7 +113,7 @@ class TestEvaluation(TestCase) : assert_equal(herme.hermeval([], [1]).size, 0) #check normal input) - x = np.linspace(-1,1) + x = np.linspace(-1, 1) y = [polyval(x, c) for c in Helist] for i in range(10) : msg = "At i=%d" % i @@ -127,8 +127,8 @@ class TestEvaluation(TestCase) : dims = [2]*i x = np.zeros(dims) assert_equal(herme.hermeval(x, [1]).shape, dims) - assert_equal(herme.hermeval(x, [1,0]).shape, dims) - assert_equal(herme.hermeval(x, [1,0,0]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) def test_hermeval2d(self): x1, x2, x3 = self.x @@ -143,9 +143,9 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = herme.hermeval2d(z, z, self.c2d) - assert_(res.shape == (2,3)) + assert_(res.shape == (2, 3)) def test_hermeval3d(self): x1, x2, x3 = self.x @@ -160,9 +160,9 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = herme.hermeval3d(z, z, z, self.c3d) - assert_(res.shape == (2,3)) + assert_(res.shape == (2, 3)) def test_hermegrid2d(self): x1, x2, x3 = self.x @@ -174,7 +174,7 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = herme.hermegrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) @@ -188,7 +188,7 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = herme.hermegrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) @@ -199,7 +199,7 @@ class TestIntegral(TestCase): # check exceptions assert_raises(ValueError, herme.hermeint, [0], .5) assert_raises(ValueError, herme.hermeint, [0], -1) - assert_raises(ValueError, herme.hermeint, [0], 1, [0,0]) + assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) # test integration of zero polynomial for i in range(2, 5): @@ -237,7 +237,7 @@ class TestIntegral(TestCase): # check multiple integrations with default k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -247,7 +247,7 @@ class TestIntegral(TestCase): # check multiple integrations with defined k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -257,7 +257,7 @@ class TestIntegral(TestCase): # check multiple integrations with lbnd for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -267,7 +267,7 @@ class TestIntegral(TestCase): # check multiple integrations with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -307,14 +307,14 @@ class TestDerivative(TestCase) : # check that derivation is the inverse of integration for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = herme.hermeder(herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -396,10 +396,10 @@ class TestFitting(TestCase): assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1,1]) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) # Test fit - x = np.linspace(0,2) + x = np.linspace(0, 2) y = f(x) # coef3 = herme.hermefit(x, y, 3) @@ -410,8 +410,8 @@ class TestFitting(TestCase): assert_equal(len(coef4), 5) assert_almost_equal(herme.hermeval(x, coef4), y) # - coef2d = herme.hermefit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) + coef2d = herme.hermefit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -420,8 +420,8 @@ class TestFitting(TestCase): wcoef3 = herme.hermefit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # - wcoef2d = herme.hermefit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] @@ -454,7 +454,7 @@ class TestGauss(TestCase): v = herme.hermevander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:,None] * vv * vd + vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct @@ -467,7 +467,7 @@ class TestMisc(TestCase) : def test_hermefromroots(self) : res = herme.hermefromroots([]) assert_almost_equal(trim(res), [1]) - for i in range(1,5) : + for i in range(1, 5) : roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) pol = herme.hermefromroots(roots) res = herme.hermeval(roots, pol) @@ -479,7 +479,7 @@ class TestMisc(TestCase) : def test_hermeroots(self) : assert_almost_equal(herme.hermeroots([1]), []) assert_almost_equal(herme.hermeroots([1, 1]), [-1]) - for i in range(2,5) : + for i in range(2, 5) : tgt = np.linspace(-1, 1, i) res = herme.hermeroots(herme.hermefromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) @@ -496,7 +496,7 @@ class TestMisc(TestCase) : assert_equal(herme.hermetrim(coef, 2), [0]) def test_hermeline(self) : - assert_equal(herme.hermeline(3,4), [3, 4]) + assert_equal(herme.hermeline(3, 4), [3, 4]) def test_herme2poly(self) : for i in range(10) : diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 38fcce299..d42bac67c 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -11,12 +11,12 @@ from numpy.testing import ( assert_equal, assert_, run_module_suite) L0 = np.array([1 ])/1 -L1 = np.array([1 , -1 ])/1 -L2 = np.array([2 , -4 , 1 ])/2 -L3 = np.array([6 , -18 , 9 , -1 ])/6 -L4 = np.array([24 , -96 , 72 , -16 , 1 ])/24 -L5 = np.array([120 , -600 , 600 , -200 , 25 , -1 ])/120 -L6 = np.array([720 , -4320 , 5400 , -2400 , 450 , -36 , 1 ])/720 +L1 = np.array([1, -1 ])/1 +L2 = np.array([2, -4, 1 ])/2 +L3 = np.array([6, -18, 9, -1 ])/6 +L4 = np.array([24, -96, 72, -16, 1 ])/24 +L5 = np.array([120, -600, 600, -200, 25, -1 ])/120 +L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1 ])/720 Llist = [L0, L1, L2, L3, L4, L5, L6] @@ -45,8 +45,8 @@ class TestArithmetic(TestCase) : def test_lagadd(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = lag.lagadd([0]*i + [1], [0]*j + [1]) @@ -55,8 +55,8 @@ class TestArithmetic(TestCase) : def test_lagsub(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = lag.lagsub([0]*i + [1], [0]*j + [1]) @@ -64,7 +64,7 @@ class TestArithmetic(TestCase) : def test_lagmulx(self): assert_equal(lag.lagmulx([0]), [0]) - assert_equal(lag.lagmulx([1]), [1,-1]) + assert_equal(lag.lagmulx([1]), [1, -1]) for i in range(1, 5): ser = [0]*i + [1] tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] @@ -76,7 +76,7 @@ class TestArithmetic(TestCase) : pol1 = [0]*i + [1] val1 = lag.lagval(self.x, pol1) for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) + msg = "At i=%d, j=%d" % (i, j) pol2 = [0]*j + [1] val2 = lag.lagval(self.x, pol2) pol3 = lag.lagmul(pol1, pol2) @@ -87,7 +87,7 @@ class TestArithmetic(TestCase) : def test_lagdiv(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) + msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = lag.lagadd(ci, cj) @@ -111,7 +111,7 @@ class TestEvaluation(TestCase) : assert_equal(lag.lagval([], [1]).size, 0) #check normal input) - x = np.linspace(-1,1) + x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(7) : msg = "At i=%d" % i @@ -125,8 +125,8 @@ class TestEvaluation(TestCase) : dims = [2]*i x = np.zeros(dims) assert_equal(lag.lagval(x, [1]).shape, dims) - assert_equal(lag.lagval(x, [1,0]).shape, dims) - assert_equal(lag.lagval(x, [1,0,0]).shape, dims) + assert_equal(lag.lagval(x, [1, 0]).shape, dims) + assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) def test_lagval2d(self): x1, x2, x3 = self.x @@ -141,9 +141,9 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = lag.lagval2d(z, z, self.c2d) - assert_(res.shape == (2,3)) + assert_(res.shape == (2, 3)) def test_lagval3d(self): x1, x2, x3 = self.x @@ -158,9 +158,9 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = lag.lagval3d(z, z, z, self.c3d) - assert_(res.shape == (2,3)) + assert_(res.shape == (2, 3)) def test_laggrid2d(self): x1, x2, x3 = self.x @@ -172,7 +172,7 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = lag.laggrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) @@ -186,7 +186,7 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = lag.laggrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) @@ -197,7 +197,7 @@ class TestIntegral(TestCase) : # check exceptions assert_raises(ValueError, lag.lagint, [0], .5) assert_raises(ValueError, lag.lagint, [0], -1) - assert_raises(ValueError, lag.lagint, [0], 1, [0,0]) + assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) # test integration of zero polynomial for i in range(2, 5): @@ -235,7 +235,7 @@ class TestIntegral(TestCase) : # check multiple integrations with default k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -245,7 +245,7 @@ class TestIntegral(TestCase) : # check multiple integrations with defined k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -255,7 +255,7 @@ class TestIntegral(TestCase) : # check multiple integrations with lbnd for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -265,7 +265,7 @@ class TestIntegral(TestCase) : # check multiple integrations with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -305,14 +305,14 @@ class TestDerivative(TestCase) : # check that derivation is the inverse of integration for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = lag.lagder(lag.lagint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -394,10 +394,10 @@ class TestFitting(TestCase): assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1,1]) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) # Test fit - x = np.linspace(0,2) + x = np.linspace(0, 2) y = f(x) # coef3 = lag.lagfit(x, y, 3) @@ -408,8 +408,8 @@ class TestFitting(TestCase): assert_equal(len(coef4), 5) assert_almost_equal(lag.lagval(x, coef4), y) # - coef2d = lag.lagfit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) + coef2d = lag.lagfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -418,8 +418,8 @@ class TestFitting(TestCase): wcoef3 = lag.lagfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # - wcoef2d = lag.lagfit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] @@ -452,7 +452,7 @@ class TestGauss(TestCase): v = lag.lagvander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:,None] * vv * vd + vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct @@ -465,7 +465,7 @@ class TestMisc(TestCase) : def test_lagfromroots(self) : res = lag.lagfromroots([]) assert_almost_equal(trim(res), [1]) - for i in range(1,5) : + for i in range(1, 5) : roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) pol = lag.lagfromroots(roots) res = lag.lagval(roots, pol) @@ -477,7 +477,7 @@ class TestMisc(TestCase) : def test_lagroots(self) : assert_almost_equal(lag.lagroots([1]), []) assert_almost_equal(lag.lagroots([0, 1]), [1]) - for i in range(2,5) : + for i in range(2, 5) : tgt = np.linspace(0, 3, i) res = lag.lagroots(lag.lagfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) @@ -494,7 +494,7 @@ class TestMisc(TestCase) : assert_equal(lag.lagtrim(coef, 2), [0]) def test_lagline(self) : - assert_equal(lag.lagline(3,4), [7, -4]) + assert_equal(lag.lagline(3, 4), [7, -4]) def test_lag2poly(self) : for i in range(7) : diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index 379bdee31..ae7a85851 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -16,10 +16,10 @@ L2 = np.array([-1, 0, 3])/2 L3 = np.array([ 0, -3, 0, 5])/2 L4 = np.array([ 3, 0, -30, 0, 35])/8 L5 = np.array([ 0, 15, 0, -70, 0, 63])/8 -L6 = np.array([-5, 0, 105, 0,-315, 0, 231])/16 -L7 = np.array([ 0,-35, 0, 315, 0, -693, 0, 429])/16 -L8 = np.array([35, 0,-1260, 0,6930, 0,-12012, 0,6435])/128 -L9 = np.array([ 0,315, 0,-4620, 0,18018, 0,-25740, 0,12155])/128 +L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16 +L7 = np.array([ 0, -35, 0, 315, 0, -693, 0, 429])/16 +L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128 +L9 = np.array([ 0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128 Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] @@ -48,8 +48,8 @@ class TestArithmetic(TestCase) : def test_legadd(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = leg.legadd([0]*i + [1], [0]*j + [1]) @@ -58,8 +58,8 @@ class TestArithmetic(TestCase) : def test_legsub(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = leg.legsub([0]*i + [1], [0]*j + [1]) @@ -67,7 +67,7 @@ class TestArithmetic(TestCase) : def test_legmulx(self): assert_equal(leg.legmulx([0]), [0]) - assert_equal(leg.legmulx([1]), [0,1]) + assert_equal(leg.legmulx([1]), [0, 1]) for i in range(1, 5): tmp = 2*i + 1 ser = [0]*i + [1] @@ -80,7 +80,7 @@ class TestArithmetic(TestCase) : pol1 = [0]*i + [1] val1 = leg.legval(self.x, pol1) for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) + msg = "At i=%d, j=%d" % (i, j) pol2 = [0]*j + [1] val2 = leg.legval(self.x, pol2) pol3 = leg.legmul(pol1, pol2) @@ -91,7 +91,7 @@ class TestArithmetic(TestCase) : def test_legdiv(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) + msg = "At i=%d, j=%d" % (i, j) ci = [0]*i + [1] cj = [0]*j + [1] tgt = leg.legadd(ci, cj) @@ -115,7 +115,7 @@ class TestEvaluation(TestCase) : assert_equal(leg.legval([], [1]).size, 0) #check normal input) - x = np.linspace(-1,1) + x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(10) : msg = "At i=%d" % i @@ -129,8 +129,8 @@ class TestEvaluation(TestCase) : dims = [2]*i x = np.zeros(dims) assert_equal(leg.legval(x, [1]).shape, dims) - assert_equal(leg.legval(x, [1,0]).shape, dims) - assert_equal(leg.legval(x, [1,0,0]).shape, dims) + assert_equal(leg.legval(x, [1, 0]).shape, dims) + assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) def test_legval2d(self): x1, x2, x3 = self.x @@ -145,9 +145,9 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = leg.legval2d(z, z, self.c2d) - assert_(res.shape == (2,3)) + assert_(res.shape == (2, 3)) def test_legval3d(self): x1, x2, x3 = self.x @@ -162,7 +162,7 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = leg.legval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -176,7 +176,7 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = leg.leggrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) @@ -190,7 +190,7 @@ class TestEvaluation(TestCase) : assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = leg.leggrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) @@ -201,7 +201,7 @@ class TestIntegral(TestCase) : # check exceptions assert_raises(ValueError, leg.legint, [0], .5) assert_raises(ValueError, leg.legint, [0], -1) - assert_raises(ValueError, leg.legint, [0], 1, [0,0]) + assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) # test integration of zero polynomial for i in range(2, 5): @@ -239,7 +239,7 @@ class TestIntegral(TestCase) : # check multiple integrations with default k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -249,7 +249,7 @@ class TestIntegral(TestCase) : # check multiple integrations with defined k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -259,7 +259,7 @@ class TestIntegral(TestCase) : # check multiple integrations with lbnd for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -269,7 +269,7 @@ class TestIntegral(TestCase) : # check multiple integrations with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -309,14 +309,14 @@ class TestDerivative(TestCase) : # check that derivation is the inverse of integration for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = leg.legder(leg.legint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -397,10 +397,10 @@ class TestFitting(TestCase): assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1,1]) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) # Test fit - x = np.linspace(0,2) + x = np.linspace(0, 2) y = f(x) # coef3 = leg.legfit(x, y, 3) @@ -411,8 +411,8 @@ class TestFitting(TestCase): assert_equal(len(coef4), 5) assert_almost_equal(leg.legval(x, coef4), y) # - coef2d = leg.legfit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) + coef2d = leg.legfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -421,8 +421,8 @@ class TestFitting(TestCase): wcoef3 = leg.legfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # - wcoef2d = leg.legfit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] @@ -455,7 +455,7 @@ class TestGauss(TestCase): v = leg.legvander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) - vv = vd[:,None] * vv * vd + vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct @@ -468,7 +468,7 @@ class TestMisc(TestCase) : def test_legfromroots(self) : res = leg.legfromroots([]) assert_almost_equal(trim(res), [1]) - for i in range(1,5) : + for i in range(1, 5) : roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) pol = leg.legfromroots(roots) res = leg.legval(roots, pol) @@ -480,7 +480,7 @@ class TestMisc(TestCase) : def test_legroots(self) : assert_almost_equal(leg.legroots([1]), []) assert_almost_equal(leg.legroots([1, 2]), [-.5]) - for i in range(2,5) : + for i in range(2, 5) : tgt = np.linspace(-1, 1, i) res = leg.legroots(leg.legfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) @@ -497,7 +497,7 @@ class TestMisc(TestCase) : assert_equal(leg.legtrim(coef, 2), [0]) def test_legline(self) : - assert_equal(leg.legline(3,4), [3, 4]) + assert_equal(leg.legline(3, 4), [3, 4]) def test_leg2poly(self) : for i in range(10) : diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 583872978..373045aae 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -46,8 +46,8 @@ class TestArithmetic(TestCase) : def test_polyadd(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] += 1 res = poly.polyadd([0]*i + [1], [0]*j + [1]) @@ -56,8 +56,8 @@ class TestArithmetic(TestCase) : def test_polysub(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - tgt = np.zeros(max(i,j) + 1) + msg = "At i=%d, j=%d" % (i, j) + tgt = np.zeros(max(i, j) + 1) tgt[i] += 1 tgt[j] -= 1 res = poly.polysub([0]*i + [1], [0]*j + [1]) @@ -74,7 +74,7 @@ class TestArithmetic(TestCase) : def test_polymul(self) : for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) + msg = "At i=%d, j=%d" % (i, j) tgt = np.zeros(i + j + 1) tgt[i + j] += 1 res = poly.polymul([0]*i + [1], [0]*j + [1]) @@ -85,17 +85,17 @@ class TestArithmetic(TestCase) : assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) # check scalar division - quo, rem = poly.polydiv([2],[2]) + quo, rem = poly.polydiv([2], [2]) assert_equal((quo, rem), (1, 0)) - quo, rem = poly.polydiv([2,2],[2]) - assert_equal((quo, rem), ((1,1), 0)) + quo, rem = poly.polydiv([2, 2], [2]) + assert_equal((quo, rem), ((1, 1), 0)) # check rest. for i in range(5) : for j in range(5) : - msg = "At i=%d, j=%d" % (i,j) - ci = [0]*i + [1,2] - cj = [0]*j + [1,2] + msg = "At i=%d, j=%d" % (i, j) + ci = [0]*i + [1, 2] + cj = [0]*j + [1, 2] tgt = poly.polyadd(ci, cj) quo, rem = poly.polydiv(tgt, ci) res = poly.polyadd(poly.polymul(quo, ci), rem) @@ -118,7 +118,7 @@ class TestEvaluation(TestCase): assert_equal(poly.polyval([], [1]).size, 0) #check normal input) - x = np.linspace(-1,1) + x = np.linspace(-1, 1) y = [x**i for i in range(5)] for i in range(5) : tgt = y[i] @@ -133,8 +133,8 @@ class TestEvaluation(TestCase): dims = [2]*i x = np.zeros(dims) assert_equal(poly.polyval(x, [1]).shape, dims) - assert_equal(poly.polyval(x, [1,0]).shape, dims) - assert_equal(poly.polyval(x, [1,0,0]).shape, dims) + assert_equal(poly.polyval(x, [1, 0]).shape, dims) + assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) def test_polyval2d(self): x1, x2, x3 = self.x @@ -149,9 +149,9 @@ class TestEvaluation(TestCase): assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = poly.polyval2d(z, z, self.c2d) - assert_(res.shape == (2,3)) + assert_(res.shape == (2, 3)) def test_polyval3d(self): x1, x2, x3 = self.x @@ -166,7 +166,7 @@ class TestEvaluation(TestCase): assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = poly.polyval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -180,7 +180,7 @@ class TestEvaluation(TestCase): assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = poly.polygrid2d(z, z, self.c2d) assert_(res.shape == (2, 3)*2) @@ -194,7 +194,7 @@ class TestEvaluation(TestCase): assert_almost_equal(res, tgt) #test shape - z = np.ones((2,3)) + z = np.ones((2, 3)) res = poly.polygrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)*3) @@ -205,7 +205,7 @@ class TestIntegral(TestCase): # check exceptions assert_raises(ValueError, poly.polyint, [0], .5) assert_raises(ValueError, poly.polyint, [0], -1) - assert_raises(ValueError, poly.polyint, [0], 1, [0,0]) + assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) # test integration of zero polynomial for i in range(2, 5): @@ -238,7 +238,7 @@ class TestIntegral(TestCase): # check multiple integrations with default k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -248,7 +248,7 @@ class TestIntegral(TestCase): # check multiple integrations with defined k for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -258,7 +258,7 @@ class TestIntegral(TestCase): # check multiple integrations with lbnd for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -268,7 +268,7 @@ class TestIntegral(TestCase): # check multiple integrations with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : pol = [0]*i + [1] tgt = pol[:] for k in range(j) : @@ -308,14 +308,14 @@ class TestDerivative(TestCase) : # check that derivation is the inverse of integration for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = poly.polyder(poly.polyint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) # check derivation with scaling for i in range(5) : - for j in range(2,5) : + for j in range(2, 5) : tgt = [0]*i + [1] res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) assert_almost_equal(trim(res), trim(tgt)) @@ -403,16 +403,16 @@ class TestMisc(TestCase) : def test_polyfromroots(self) : res = poly.polyfromroots([]) assert_almost_equal(trim(res), [1]) - for i in range(1,5) : + for i in range(1, 5) : roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) tgt = Tlist[i] res = poly.polyfromroots(roots)*2**(i-1) - assert_almost_equal(trim(res),trim(tgt)) + assert_almost_equal(trim(res), trim(tgt)) def test_polyroots(self) : assert_almost_equal(poly.polyroots([1]), []) assert_almost_equal(poly.polyroots([1, 2]), [-.5]) - for i in range(2,5) : + for i in range(2, 5) : tgt = np.linspace(-1, 1, i) res = poly.polyroots(poly.polyfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) @@ -429,10 +429,10 @@ class TestMisc(TestCase) : assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) - assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1,1]) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) # Test fit - x = np.linspace(0,2) + x = np.linspace(0, 2) y = f(x) # coef3 = poly.polyfit(x, y, 3) @@ -443,8 +443,8 @@ class TestMisc(TestCase) : assert_equal(len(coef4), 5) assert_almost_equal(poly.polyval(x, coef4), y) # - coef2d = poly.polyfit(x, np.array([y,y]).T, 3) - assert_almost_equal(coef2d, np.array([coef3,coef3]).T) + coef2d = poly.polyfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() @@ -453,8 +453,8 @@ class TestMisc(TestCase) : wcoef3 = poly.polyfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # - wcoef2d = poly.polyfit(x, np.array([yw,yw]).T, 3, w=w) - assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] @@ -473,7 +473,7 @@ class TestMisc(TestCase) : assert_equal(poly.polytrim(coef, 2), [0]) def test_polyline(self) : - assert_equal(poly.polyline(3,4), [3, 4]) + assert_equal(poly.polyline(3, 4), [3, 4]) if __name__ == "__main__": diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py index 93c742abd..11b37aa1a 100644 --- a/numpy/polynomial/tests/test_polyutils.py +++ b/numpy/polynomial/tests/test_polyutils.py @@ -18,8 +18,8 @@ class TestMisc(TestCase) : def test_as_series(self) : # check exceptions assert_raises(ValueError, pu.as_series, [[]]) - assert_raises(ValueError, pu.as_series, [[[1,2]]]) - assert_raises(ValueError, pu.as_series, [[1],['a']]) + assert_raises(ValueError, pu.as_series, [[[1, 2]]]) + assert_raises(ValueError, pu.as_series, [[1], ['a']]) # check common types types = ['i', 'd', 'O'] for i in range(len(types)) : @@ -45,7 +45,7 @@ class TestDomain(TestCase) : def test_getdomain(self) : # test for real values x = [1, 10, 3, -1] - tgt = [-1,10] + tgt = [-1, 10] res = pu.getdomain(x) assert_almost_equal(res, tgt) @@ -57,8 +57,8 @@ class TestDomain(TestCase) : def test_mapdomain(self) : # test for real values - dom1 = [0,4] - dom2 = [1,3] + dom1 = [0, 4] + dom2 = [1, 3] tgt = dom2 res = pu. mapdomain(dom1, dom1, dom2) assert_almost_equal(res, tgt) @@ -72,24 +72,24 @@ class TestDomain(TestCase) : assert_almost_equal(res, tgt) # test for multidimensional arrays - dom1 = [0,4] - dom2 = [1,3] + dom1 = [0, 4] + dom2 = [1, 3] tgt = np.array([dom2, dom2]) x = np.array([dom1, dom1]) res = pu.mapdomain(x, dom1, dom2) assert_almost_equal(res, tgt) # test that subtypes are preserved. - dom1 = [0,4] - dom2 = [1,3] + dom1 = [0, 4] + dom2 = [1, 3] x = np.matrix([dom1, dom1]) res = pu.mapdomain(x, dom1, dom2) assert_(isinstance(res, np.matrix)) def test_mapparms(self) : # test for real values - dom1 = [0,4] - dom2 = [1,3] + dom1 = [0, 4] + dom2 = [1, 3] tgt = [1, .5] res = pu. mapparms(dom1, dom2) assert_almost_equal(res, tgt) diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py index 96a2ea7d4..883597cb9 100644 --- a/numpy/polynomial/tests/test_printing.py +++ b/numpy/polynomial/tests/test_printing.py @@ -5,74 +5,74 @@ from numpy.testing import TestCase, run_module_suite, assert_ class test_str(TestCase): def test_polynomial_str(self): - res = str(poly.Polynomial([0,1])) + res = str(poly.Polynomial([0, 1])) tgt = 'poly([0., 1.])' assert_(res, tgt) def test_chebyshev_str(self): - res = str(poly.Chebyshev([0,1])) + res = str(poly.Chebyshev([0, 1])) tgt = 'leg([0., 1.])' assert_(res, tgt) def test_legendre_str(self): - res = str(poly.Legendre([0,1])) + res = str(poly.Legendre([0, 1])) tgt = 'leg([0., 1.])' assert_(res, tgt) def test_hermite_str(self): - res = str(poly.Hermite([0,1])) + res = str(poly.Hermite([0, 1])) tgt = 'herm([0., 1.])' assert_(res, tgt) def test_hermiteE_str(self): - res = str(poly.HermiteE([0,1])) + res = str(poly.HermiteE([0, 1])) tgt = 'herme([0., 1.])' assert_(res, tgt) def test_laguerre_str(self): - res = str(poly.Laguerre([0,1])) + res = str(poly.Laguerre([0, 1])) tgt = 'lag([0., 1.])' assert_(res, tgt) class test_repr(TestCase): def test_polynomial_str(self): - res = repr(poly.Polynomial([0,1])) + res = repr(poly.Polynomial([0, 1])) tgt = 'Polynomial([0., 1.])' assert_(res, tgt) def test_chebyshev_str(self): - res = repr(poly.Chebyshev([0,1])) + res = repr(poly.Chebyshev([0, 1])) tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])' assert_(res, tgt) def test_legendre_repr(self): - res = repr(poly.Legendre([0,1])) + res = repr(poly.Legendre([0, 1])) tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])' assert_(res, tgt) def test_hermite_repr(self): - res = repr(poly.Hermite([0,1])) + res = repr(poly.Hermite([0, 1])) tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])' assert_(res, tgt) def test_hermiteE_repr(self): - res = repr(poly.HermiteE([0,1])) + res = repr(poly.HermiteE([0, 1])) tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])' assert_(res, tgt) def test_laguerre_repr(self): - res = repr(poly.Laguerre([0,1])) + res = repr(poly.Laguerre([0, 1])) tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])' assert_(res, tgt) diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py index 80723cec6..614b25a1c 100644 --- a/numpy/random/__init__.py +++ b/numpy/random/__init__.py @@ -100,7 +100,7 @@ with warnings.catch_warnings(): # Some aliases: ranf = random = sample = random_sample -__all__.extend(['ranf','random','sample']) +__all__.extend(['ranf', 'random', 'sample']) def __RandomState_ctor(): """Return a RandomState instance. diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c index d792cf86d..05f62f0cf 100644 --- a/numpy/random/mtrand/distributions.c +++ b/numpy/random/mtrand/distributions.c @@ -7,10 +7,10 @@ * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: - * + * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. @@ -21,9 +21,9 @@ */ /* The implementations of rk_hypergeometric_hyp(), rk_hypergeometric_hrua(), - * and rk_triangular() were adapted from Ivan Frohne's rv.py which has this + * and rk_triangular() were adapted from Ivan Frohne's rv.py which has this * license: - * + * * Copyright 1998 by Ivan Frohne; Wasilla, Alaska, U.S.A. * All Rights Reserved * @@ -52,8 +52,8 @@ #ifndef M_PI #define M_PI 3.14159265358979323846264338328 -#endif -/* log-gamma function to support some of these distributions. The +#endif +/* log-gamma function to support some of these distributions. The * algorithm comes from SPECFUN by Shanjie Zhang and Jianming Jin and their * book "Computation of Special Functions", 1996, John Wiley & Sons, Inc. */ @@ -62,7 +62,7 @@ double loggam(double x) { double x0, x2, xp, gl, gl0; long k, n; - + static double a[10] = {8.333333333333333e-02,-2.777777777777778e-03, 7.936507936507937e-04,-5.952380952380952e-04, 8.417508417508418e-04,-1.917526917526918e-03, @@ -162,7 +162,7 @@ double rk_standard_gamma(rk_state *state, double shape) { do { - X = rk_gauss(state); + X = rk_gauss(state); V = 1.0 + c*X; } while (V <= 0.0); @@ -225,7 +225,7 @@ double rk_noncentral_chisquare(rk_state *state, double df, double nonc) double rk_f(rk_state *state, double dfnum, double dfden) { - return ((rk_chisquare(state, dfnum) * dfden) / + return ((rk_chisquare(state, dfnum) * dfden) / (rk_chisquare(state, dfden) * dfnum)); } @@ -241,7 +241,7 @@ long rk_binomial_btpe(rk_state *state, long n, double p) double a,u,v,s,F,rho,t,A,nrq,x1,x2,f1,f2,z,z2,w,w2,x; long m,y,k,i; - if (!(state->has_binomial) || + if (!(state->has_binomial) || (state->nsave != n) || (state->psave != p)) { @@ -380,7 +380,7 @@ long rk_binomial_inversion(rk_state *state, long n, double p) double q, qn, np, px, U; long X, bound; - if (!(state->has_binomial) || + if (!(state->has_binomial) || (state->nsave != n) || (state->psave != p)) { @@ -404,12 +404,12 @@ long rk_binomial_inversion(rk_state *state, long n, double p) while (U > px) { X++; - if (X > bound) + if (X > bound) { X = 0; px = qn; U = rk_double(state); - } else + } else { U -= px; px = ((n-X+1) * p * px)/(X*q); @@ -514,7 +514,7 @@ long rk_poisson_ptrs(rk_state *state, double lam) return k; } - + } } @@ -525,11 +525,11 @@ long rk_poisson(rk_state *state, double lam) { return rk_poisson_ptrs(state, lam); } - else if (lam == 0) + else if (lam == 0) { return 0; } - else + else { return rk_poisson_mult(state, lam); } @@ -551,7 +551,7 @@ double rk_standard_t(rk_state *state, double df) } /* Uses the rejection algorithm compared against the wrapped Cauchy - distribution suggested by Best and Fisher and documented in + distribution suggested by Best and Fisher and documented in Chapter 9 of Luc's Non-Uniform Random Variate Generation. http://cg.scs.carleton.ca/~luc/rnbookindex.html (but corrected to match the algorithm in R and Python) @@ -600,7 +600,7 @@ double rk_vonmises(rk_state *state, double mu, double kappa) if (neg) { mod *= -1; - } + } return mod; } @@ -624,12 +624,12 @@ double rk_power(rk_state *state, double a) double rk_laplace(rk_state *state, double loc, double scale) { double U; - + U = rk_double(state); if (U < 0.5) { U = loc + scale * log(U + U); - } else + } else { U = loc - scale * log(2.0 - U - U); } @@ -639,7 +639,7 @@ double rk_laplace(rk_state *state, double loc, double scale) double rk_gumbel(rk_state *state, double loc, double scale) { double U; - + U = 1.0 - rk_double(state); return loc - scale * log(-log(U)); } @@ -647,7 +647,7 @@ double rk_gumbel(rk_state *state, double loc, double scale) double rk_logistic(rk_state *state, double loc, double scale) { double U; - + U = rk_double(state); return loc + scale * log(U/(1.0 - U)); } @@ -666,7 +666,7 @@ double rk_wald(rk_state *state, double mean, double scale) { double U, X, Y; double mu_2l; - + mu_2l = mean / (2*scale); Y = rk_gauss(state); Y = mean*Y*Y; @@ -710,7 +710,7 @@ long rk_geometric_search(rk_state *state, double p) double U; long X; double sum, prod, q; - + X = 1; sum = prod = p; q = 1.0 - p; @@ -744,10 +744,10 @@ long rk_hypergeometric_hyp(rk_state *state, long good, long bad, long sample) { long d1, K, Z; double d2, U, Y; - + d1 = bad + good - sample; d2 = (double)min(bad, good); - + Y = d2; K = sample; while (Y > 0.0) @@ -772,7 +772,7 @@ long rk_hypergeometric_hrua(rk_state *state, long good, long bad, long sample) double d4, d5, d6, d7, d8, d10, d11; long Z; double T, W, X, Y; - + mingoodbad = min(good, bad); popsize = good + bad; maxgoodbad = max(good, bad); @@ -783,39 +783,39 @@ long rk_hypergeometric_hrua(rk_state *state, long good, long bad, long sample) d7 = sqrt((popsize - m) * sample * d4 *d5 / (popsize-1) + 0.5); d8 = D1*d7 + D2; d9 = (long)floor((double)((m+1)*(mingoodbad+1))/(popsize+2)); - d10 = (loggam(d9+1) + loggam(mingoodbad-d9+1) + loggam(m-d9+1) + + d10 = (loggam(d9+1) + loggam(mingoodbad-d9+1) + loggam(m-d9+1) + loggam(maxgoodbad-m+d9+1)); d11 = min(min(m, mingoodbad)+1.0, floor(d6+16*d7)); /* 16 for 16-decimal-digit precision in D1 and D2 */ - + while (1) { X = rk_double(state); Y = rk_double(state); W = d6 + d8*(Y- 0.5)/X; - + /* fast rejection: */ if ((W < 0.0) || (W >= d11)) continue; - + Z = (long)floor(W); T = d10 - (loggam(Z+1) + loggam(mingoodbad-Z+1) + loggam(m-Z+1) + loggam(maxgoodbad-m+Z+1)); - + /* fast acceptance: */ if ((X*(4.0-X)-3.0) <= T) break; - + /* fast rejection: */ if (X*(X-T) >= 1) continue; if (2.0*log(X) <= T) break; /* acceptance */ } - + /* this is a correction to HRUA* by Ivan Frohne in rv.py */ if (good > bad) Z = m - Z; - + /* another fix from rv.py to allow sample to exceed popsize/2 */ if (m < sample) Z = good - Z; - + return Z; } #undef D1 @@ -836,20 +836,20 @@ double rk_triangular(rk_state *state, double left, double mode, double right) { double base, leftbase, ratio, leftprod, rightprod; double U; - + base = right - left; leftbase = mode - left; ratio = leftbase / base; leftprod = leftbase*base; rightprod = (right - mode)*base; - + U = rk_double(state); if (U <= ratio) { return left + sqrt(U*leftprod); - } else + } else { - return right - sqrt((1.0 - U) * rightprod); + return right - sqrt((1.0 - U) * rightprod); } } @@ -857,7 +857,7 @@ long rk_logseries(rk_state *state, double p) { double q, r, U, V; long result; - + r = log(1.0 - p); while (1) { diff --git a/numpy/random/mtrand/distributions.h b/numpy/random/mtrand/distributions.h index 6f60a4ff3..0b42bc794 100644 --- a/numpy/random/mtrand/distributions.h +++ b/numpy/random/mtrand/distributions.h @@ -7,10 +7,10 @@ * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: - * + * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. @@ -30,11 +30,11 @@ extern "C" { #endif /* References: - * + * * Devroye, Luc. _Non-Uniform Random Variate Generation_. * Springer-Verlag, New York, 1986. * http://cgm.cs.mcgill.ca/~luc/rnbookindex.html - * + * * Kachitvichyanukul, V. and Schmeiser, B. W. Binomial Random Variate * Generation. Communications of the ACM, 31, 2 (February, 1988) 216. * @@ -42,15 +42,15 @@ extern "C" { * Variables. Insurance: Mathematics and Economics, (to appear) * http://citeseer.csail.mit.edu/151115.html * - * Marsaglia, G. and Tsang, W. W. A Simple Method for Generating Gamma + * Marsaglia, G. and Tsang, W. W. A Simple Method for Generating Gamma * Variables. ACM Transactions on Mathematical Software, Vol. 26, No. 3, * September 2000, Pages 363–372. */ - + /* Normal distribution with mean=loc and standard deviation=scale. */ extern double rk_normal(rk_state *state, double loc, double scale); -/* Standard exponential distribution (mean=1) computed by inversion of the +/* Standard exponential distribution (mean=1) computed by inversion of the * CDF. */ extern double rk_standard_exponential(rk_state *state); @@ -60,10 +60,10 @@ extern double rk_exponential(rk_state *state, double scale); /* Uniform distribution on interval [loc, loc+scale). */ extern double rk_uniform(rk_state *state, double loc, double scale); -/* Standard gamma distribution with shape parameter. +/* Standard gamma distribution with shape parameter. * When shape < 1, the algorithm given by (Devroye p. 304) is used. * When shape == 1, a Exponential variate is generated. - * When shape > 1, the small and fast method of (Marsaglia and Tsang 2000) + * When shape > 1, the small and fast method of (Marsaglia and Tsang 2000) * is used. */ extern double rk_standard_gamma(rk_state *state, double shape); @@ -119,7 +119,7 @@ extern long rk_poisson_mult(rk_state *state, double lam); /* Poisson distribution computer by the PTRS algorithm. */ extern long rk_poisson_ptrs(rk_state *state, double lam); -/* Standard Cauchy distribution computed by dividing standard gaussians +/* Standard Cauchy distribution computed by dividing standard gaussians * (Devroye p. 451). */ extern double rk_standard_cauchy(rk_state *state); diff --git a/numpy/random/mtrand/mtrand.c b/numpy/random/mtrand/mtrand.c index b1c92e570..a508efba1 100644 --- a/numpy/random/mtrand/mtrand.c +++ b/numpy/random/mtrand/mtrand.c @@ -486,7 +486,7 @@ struct __pyx_obj_6mtrand_RandomState; /* "mtrand.pyx":107 * long rk_logseries(rk_state *state, double p) - * + * * ctypedef double (* rk_cont0)(rk_state *state) # <<<<<<<<<<<<<< * ctypedef double (* rk_cont1)(rk_state *state, double a) * ctypedef double (* rk_cont2)(rk_state *state, double a, double b) @@ -494,7 +494,7 @@ struct __pyx_obj_6mtrand_RandomState; typedef double (*__pyx_t_6mtrand_rk_cont0)(rk_state *); /* "mtrand.pyx":108 - * + * * ctypedef double (* rk_cont0)(rk_state *state) * ctypedef double (* rk_cont1)(rk_state *state, double a) # <<<<<<<<<<<<<< * ctypedef double (* rk_cont2)(rk_state *state, double a, double b) @@ -507,7 +507,7 @@ typedef double (*__pyx_t_6mtrand_rk_cont1)(rk_state *, double); * ctypedef double (* rk_cont1)(rk_state *state, double a) * ctypedef double (* rk_cont2)(rk_state *state, double a, double b) # <<<<<<<<<<<<<< * ctypedef double (* rk_cont3)(rk_state *state, double a, double b, double c) - * + * */ typedef double (*__pyx_t_6mtrand_rk_cont2)(rk_state *, double, double); @@ -515,14 +515,14 @@ typedef double (*__pyx_t_6mtrand_rk_cont2)(rk_state *, double, double); * ctypedef double (* rk_cont1)(rk_state *state, double a) * ctypedef double (* rk_cont2)(rk_state *state, double a, double b) * ctypedef double (* rk_cont3)(rk_state *state, double a, double b, double c) # <<<<<<<<<<<<<< - * + * * ctypedef long (* rk_disc0)(rk_state *state) */ typedef double (*__pyx_t_6mtrand_rk_cont3)(rk_state *, double, double, double); /* "mtrand.pyx":112 * ctypedef double (* rk_cont3)(rk_state *state, double a, double b, double c) - * + * * ctypedef long (* rk_disc0)(rk_state *state) # <<<<<<<<<<<<<< * ctypedef long (* rk_discnp)(rk_state *state, long n, double p) * ctypedef long (* rk_discdd)(rk_state *state, double n, double p) @@ -530,7 +530,7 @@ typedef double (*__pyx_t_6mtrand_rk_cont3)(rk_state *, double, double, double); typedef long (*__pyx_t_6mtrand_rk_disc0)(rk_state *); /* "mtrand.pyx":113 - * + * * ctypedef long (* rk_disc0)(rk_state *state) * ctypedef long (* rk_discnp)(rk_state *state, long n, double p) # <<<<<<<<<<<<<< * ctypedef long (* rk_discdd)(rk_state *state, double n, double p) @@ -552,7 +552,7 @@ typedef long (*__pyx_t_6mtrand_rk_discdd)(rk_state *, double, double); * ctypedef long (* rk_discdd)(rk_state *state, double n, double p) * ctypedef long (* rk_discnmN)(rk_state *state, long n, long m, long N) # <<<<<<<<<<<<<< * ctypedef long (* rk_discd)(rk_state *state, double a) - * + * */ typedef long (*__pyx_t_6mtrand_rk_discnmN)(rk_state *, long, long, long); @@ -560,14 +560,14 @@ typedef long (*__pyx_t_6mtrand_rk_discnmN)(rk_state *, long, long, long); * ctypedef long (* rk_discdd)(rk_state *state, double n, double p) * ctypedef long (* rk_discnmN)(rk_state *state, long n, long m, long N) * ctypedef long (* rk_discd)(rk_state *state, double a) # <<<<<<<<<<<<<< - * - * + * + * */ typedef long (*__pyx_t_6mtrand_rk_discd)(rk_state *, double); /* "mtrand.pyx":525 * return sum - * + * * cdef class RandomState: # <<<<<<<<<<<<<< * """ * RandomState(seed=None) @@ -1629,7 +1629,7 @@ static PyObject *__pyx_k_tuple_198; /* "mtrand.pyx":129 * import operator - * + * * cdef object cont0_array(rk_state *state, rk_cont0 func, object size): # <<<<<<<<<<<<<< * cdef double *array_data * cdef ndarray array "arrayObject" @@ -1654,7 +1654,7 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m /* "mtrand.pyx":135 * cdef npy_intp i - * + * * if size is None: # <<<<<<<<<<<<<< * return func(state) * else: @@ -1663,7 +1663,7 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m if (__pyx_t_1) { /* "mtrand.pyx":136 - * + * * if size is None: * return func(state) # <<<<<<<<<<<<<< * else: @@ -1747,7 +1747,7 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m * for i from 0 <= i < length: * array_data[i] = func(state) # <<<<<<<<<<<<<< * return array - * + * */ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state); } @@ -1756,8 +1756,8 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m * for i from 0 <= i < length: * array_data[i] = func(state) * return array # <<<<<<<<<<<<<< - * - * + * + * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)arrayObject)); @@ -1782,8 +1782,8 @@ static PyObject *__pyx_f_6mtrand_cont0_array(rk_state *__pyx_v_state, __pyx_t_6m } /* "mtrand.pyx":146 - * - * + * + * * cdef object cont1_array_sc(rk_state *state, rk_cont1 func, object size, double a): # <<<<<<<<<<<<<< * cdef double *array_data * cdef ndarray array "arrayObject" @@ -1808,7 +1808,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t /* "mtrand.pyx":152 * cdef npy_intp i - * + * * if size is None: # <<<<<<<<<<<<<< * return func(state, a) * else: @@ -1817,7 +1817,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t if (__pyx_t_1) { /* "mtrand.pyx":153 - * + * * if size is None: * return func(state, a) # <<<<<<<<<<<<<< * else: @@ -1901,7 +1901,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t * for i from 0 <= i < length: * array_data[i] = func(state, a) # <<<<<<<<<<<<<< * return array - * + * */ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_a); } @@ -1910,7 +1910,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t * for i from 0 <= i < length: * array_data[i] = func(state, a) * return array # <<<<<<<<<<<<<< - * + * * cdef object cont1_array(rk_state *state, rk_cont1 func, object size, ndarray oa): */ __Pyx_XDECREF(__pyx_r); @@ -1937,7 +1937,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array_sc(rk_state *__pyx_v_state, __pyx_t /* "mtrand.pyx":162 * return array - * + * * cdef object cont1_array(rk_state *state, rk_cont1 func, object size, ndarray oa): # <<<<<<<<<<<<<< * cdef double *array_data * cdef double *oa_data @@ -1965,7 +1965,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m /* "mtrand.pyx":171 * cdef broadcast multi - * + * * if size is None: # <<<<<<<<<<<<<< * array = <ndarray>PyArray_SimpleNew(PyArray_NDIM(oa), * PyArray_DIMS(oa) , NPY_DOUBLE) @@ -2171,7 +2171,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m * array_data[i] = func(state, oa_data[0]) * PyArray_MultiIter_NEXTi(multi, 1) # <<<<<<<<<<<<<< * return array - * + * */ PyArray_MultiIter_NEXTi(__pyx_v_multi, 1); } @@ -2182,7 +2182,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m * array_data[i] = func(state, oa_data[0]) * PyArray_MultiIter_NEXTi(multi, 1) * return array # <<<<<<<<<<<<<< - * + * * cdef object cont2_array_sc(rk_state *state, rk_cont2 func, object size, double a, */ __Pyx_XDECREF(__pyx_r); @@ -2209,7 +2209,7 @@ static PyObject *__pyx_f_6mtrand_cont1_array(rk_state *__pyx_v_state, __pyx_t_6m /* "mtrand.pyx":193 * return array - * + * * cdef object cont2_array_sc(rk_state *state, rk_cont2 func, object size, double a, # <<<<<<<<<<<<<< * double b): * cdef double *array_data @@ -2234,7 +2234,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t /* "mtrand.pyx":200 * cdef npy_intp i - * + * * if size is None: # <<<<<<<<<<<<<< * return func(state, a, b) * else: @@ -2243,7 +2243,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t if (__pyx_t_1) { /* "mtrand.pyx":201 - * + * * if size is None: * return func(state, a, b) # <<<<<<<<<<<<<< * else: @@ -2327,7 +2327,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t * for i from 0 <= i < length: * array_data[i] = func(state, a, b) # <<<<<<<<<<<<<< * return array - * + * */ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_a, __pyx_v_b); } @@ -2336,8 +2336,8 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t * for i from 0 <= i < length: * array_data[i] = func(state, a, b) * return array # <<<<<<<<<<<<<< - * - * + * + * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)arrayObject)); @@ -2362,8 +2362,8 @@ static PyObject *__pyx_f_6mtrand_cont2_array_sc(rk_state *__pyx_v_state, __pyx_t } /* "mtrand.pyx":211 - * - * + * + * * cdef object cont2_array(rk_state *state, rk_cont2 func, object size, # <<<<<<<<<<<<<< * ndarray oa, ndarray ob): * cdef double *array_data @@ -2390,7 +2390,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m /* "mtrand.pyx":221 * cdef broadcast multi - * + * * if size is None: # <<<<<<<<<<<<<< * multi = <broadcast> PyArray_MultiIterNew(2, <void *>oa, <void *>ob) * array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE) @@ -2399,7 +2399,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m if (__pyx_t_1) { /* "mtrand.pyx":222 - * + * * if size is None: * multi = <broadcast> PyArray_MultiIterNew(2, <void *>oa, <void *>ob) # <<<<<<<<<<<<<< * array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE) @@ -2623,7 +2623,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m * PyArray_MultiIter_NEXTi(multi, 1) * PyArray_MultiIter_NEXTi(multi, 2) # <<<<<<<<<<<<<< * return array - * + * */ PyArray_MultiIter_NEXTi(__pyx_v_multi, 2); } @@ -2634,7 +2634,7 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m * PyArray_MultiIter_NEXTi(multi, 1) * PyArray_MultiIter_NEXTi(multi, 2) * return array # <<<<<<<<<<<<<< - * + * * cdef object cont3_array_sc(rk_state *state, rk_cont3 func, object size, double a, */ __Pyx_XDECREF(__pyx_r); @@ -2660,10 +2660,10 @@ static PyObject *__pyx_f_6mtrand_cont2_array(rk_state *__pyx_v_state, __pyx_t_6m /* "mtrand.pyx":244 * return array - * + * * cdef object cont3_array_sc(rk_state *state, rk_cont3 func, object size, double a, # <<<<<<<<<<<<<< * double b, double c): - * + * */ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t_6mtrand_rk_cont3 __pyx_v_func, PyObject *__pyx_v_size, double __pyx_v_a, double __pyx_v_b, double __pyx_v_c) { @@ -2685,7 +2685,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t /* "mtrand.pyx":252 * cdef npy_intp i - * + * * if size is None: # <<<<<<<<<<<<<< * return func(state, a, b, c) * else: @@ -2694,7 +2694,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t if (__pyx_t_1) { /* "mtrand.pyx":253 - * + * * if size is None: * return func(state, a, b, c) # <<<<<<<<<<<<<< * else: @@ -2778,7 +2778,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t * for i from 0 <= i < length: * array_data[i] = func(state, a, b, c) # <<<<<<<<<<<<<< * return array - * + * */ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_a, __pyx_v_b, __pyx_v_c); } @@ -2787,7 +2787,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t * for i from 0 <= i < length: * array_data[i] = func(state, a, b, c) * return array # <<<<<<<<<<<<<< - * + * * cdef object cont3_array(rk_state *state, rk_cont3 func, object size, ndarray oa, */ __Pyx_XDECREF(__pyx_r); @@ -2814,10 +2814,10 @@ static PyObject *__pyx_f_6mtrand_cont3_array_sc(rk_state *__pyx_v_state, __pyx_t /* "mtrand.pyx":262 * return array - * + * * cdef object cont3_array(rk_state *state, rk_cont3 func, object size, ndarray oa, # <<<<<<<<<<<<<< * ndarray ob, ndarray oc): - * + * */ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6mtrand_rk_cont3 __pyx_v_func, PyObject *__pyx_v_size, PyArrayObject *__pyx_v_oa, PyArrayObject *__pyx_v_ob, PyArrayObject *__pyx_v_oc) { @@ -2842,7 +2842,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m /* "mtrand.pyx":274 * cdef broadcast multi - * + * * if size is None: # <<<<<<<<<<<<<< * multi = <broadcast> PyArray_MultiIterNew(3, <void *>oa, <void *>ob, <void *>oc) * array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE) @@ -2851,7 +2851,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m if (__pyx_t_1) { /* "mtrand.pyx":275 - * + * * if size is None: * multi = <broadcast> PyArray_MultiIterNew(3, <void *>oa, <void *>ob, <void *>oc) # <<<<<<<<<<<<<< * array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_DOUBLE) @@ -3084,7 +3084,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m * array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0]) * PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<< * return array - * + * */ PyArray_MultiIter_NEXT(__pyx_v_multi); } @@ -3095,7 +3095,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m * array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0]) * PyArray_MultiIter_NEXT(multi) * return array # <<<<<<<<<<<<<< - * + * * cdef object disc0_array(rk_state *state, rk_disc0 func, object size): */ __Pyx_XDECREF(__pyx_r); @@ -3121,7 +3121,7 @@ static PyObject *__pyx_f_6mtrand_cont3_array(rk_state *__pyx_v_state, __pyx_t_6m /* "mtrand.pyx":299 * return array - * + * * cdef object disc0_array(rk_state *state, rk_disc0 func, object size): # <<<<<<<<<<<<<< * cdef long *array_data * cdef ndarray array "arrayObject" @@ -3146,7 +3146,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m /* "mtrand.pyx":305 * cdef npy_intp i - * + * * if size is None: # <<<<<<<<<<<<<< * return func(state) * else: @@ -3155,7 +3155,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m if (__pyx_t_1) { /* "mtrand.pyx":306 - * + * * if size is None: * return func(state) # <<<<<<<<<<<<<< * else: @@ -3234,7 +3234,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m * for i from 0 <= i < length: * array_data[i] = func(state) # <<<<<<<<<<<<<< * return array - * + * */ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state); } @@ -3243,7 +3243,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m * for i from 0 <= i < length: * array_data[i] = func(state) * return array # <<<<<<<<<<<<<< - * + * * cdef object discnp_array_sc(rk_state *state, rk_discnp func, object size, long n, double p): */ __Pyx_XDECREF(__pyx_r); @@ -3270,7 +3270,7 @@ static PyObject *__pyx_f_6mtrand_disc0_array(rk_state *__pyx_v_state, __pyx_t_6m /* "mtrand.pyx":315 * return array - * + * * cdef object discnp_array_sc(rk_state *state, rk_discnp func, object size, long n, double p): # <<<<<<<<<<<<<< * cdef long *array_data * cdef ndarray array "arrayObject" @@ -3295,7 +3295,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_ /* "mtrand.pyx":321 * cdef npy_intp i - * + * * if size is None: # <<<<<<<<<<<<<< * return func(state, n, p) * else: @@ -3304,7 +3304,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_ if (__pyx_t_1) { /* "mtrand.pyx":322 - * + * * if size is None: * return func(state, n, p) # <<<<<<<<<<<<<< * else: @@ -3383,7 +3383,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_ * for i from 0 <= i < length: * array_data[i] = func(state, n, p) # <<<<<<<<<<<<<< * return array - * + * */ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_p); } @@ -3392,7 +3392,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_ * for i from 0 <= i < length: * array_data[i] = func(state, n, p) * return array # <<<<<<<<<<<<<< - * + * * cdef object discnp_array(rk_state *state, rk_discnp func, object size, ndarray on, ndarray op): */ __Pyx_XDECREF(__pyx_r); @@ -3419,7 +3419,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array_sc(rk_state *__pyx_v_state, __pyx_ /* "mtrand.pyx":331 * return array - * + * * cdef object discnp_array(rk_state *state, rk_discnp func, object size, ndarray on, ndarray op): # <<<<<<<<<<<<<< * cdef long *array_data * cdef ndarray array "arrayObject" @@ -3446,7 +3446,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6 /* "mtrand.pyx":340 * cdef broadcast multi - * + * * if size is None: # <<<<<<<<<<<<<< * multi = <broadcast> PyArray_MultiIterNew(2, <void *>on, <void *>op) * array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) @@ -3455,7 +3455,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6 if (__pyx_t_1) { /* "mtrand.pyx":341 - * + * * if size is None: * multi = <broadcast> PyArray_MultiIterNew(2, <void *>on, <void *>op) # <<<<<<<<<<<<<< * array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) @@ -3665,7 +3665,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6 * array_data[i] = func(state, on_data[0], op_data[0]) * PyArray_MultiIter_NEXTi(multi, 1) # <<<<<<<<<<<<<< * PyArray_MultiIter_NEXTi(multi, 2) - * + * */ PyArray_MultiIter_NEXTi(__pyx_v_multi, 1); @@ -3673,7 +3673,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6 * array_data[i] = func(state, on_data[0], op_data[0]) * PyArray_MultiIter_NEXTi(multi, 1) * PyArray_MultiIter_NEXTi(multi, 2) # <<<<<<<<<<<<<< - * + * * return array */ PyArray_MultiIter_NEXTi(__pyx_v_multi, 2); @@ -3683,9 +3683,9 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6 /* "mtrand.pyx":362 * PyArray_MultiIter_NEXTi(multi, 2) - * + * * return array # <<<<<<<<<<<<<< - * + * * cdef object discdd_array_sc(rk_state *state, rk_discdd func, object size, double n, double p): */ __Pyx_XDECREF(__pyx_r); @@ -3711,7 +3711,7 @@ static PyObject *__pyx_f_6mtrand_discnp_array(rk_state *__pyx_v_state, __pyx_t_6 /* "mtrand.pyx":364 * return array - * + * * cdef object discdd_array_sc(rk_state *state, rk_discdd func, object size, double n, double p): # <<<<<<<<<<<<<< * cdef long *array_data * cdef ndarray array "arrayObject" @@ -3736,7 +3736,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_ /* "mtrand.pyx":370 * cdef npy_intp i - * + * * if size is None: # <<<<<<<<<<<<<< * return func(state, n, p) * else: @@ -3745,7 +3745,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_ if (__pyx_t_1) { /* "mtrand.pyx":371 - * + * * if size is None: * return func(state, n, p) # <<<<<<<<<<<<<< * else: @@ -3824,7 +3824,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_ * for i from 0 <= i < length: * array_data[i] = func(state, n, p) # <<<<<<<<<<<<<< * return array - * + * */ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_p); } @@ -3833,7 +3833,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_ * for i from 0 <= i < length: * array_data[i] = func(state, n, p) * return array # <<<<<<<<<<<<<< - * + * * cdef object discdd_array(rk_state *state, rk_discdd func, object size, ndarray on, ndarray op): */ __Pyx_XDECREF(__pyx_r); @@ -3860,7 +3860,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array_sc(rk_state *__pyx_v_state, __pyx_ /* "mtrand.pyx":380 * return array - * + * * cdef object discdd_array(rk_state *state, rk_discdd func, object size, ndarray on, ndarray op): # <<<<<<<<<<<<<< * cdef long *array_data * cdef ndarray array "arrayObject" @@ -3887,7 +3887,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6 /* "mtrand.pyx":389 * cdef broadcast multi - * + * * if size is None: # <<<<<<<<<<<<<< * multi = <broadcast> PyArray_MultiIterNew(2, <void *>on, <void *>op) * array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) @@ -3896,7 +3896,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6 if (__pyx_t_1) { /* "mtrand.pyx":390 - * + * * if size is None: * multi = <broadcast> PyArray_MultiIterNew(2, <void *>on, <void *>op) # <<<<<<<<<<<<<< * array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) @@ -4106,7 +4106,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6 * array_data[i] = func(state, on_data[0], op_data[0]) * PyArray_MultiIter_NEXTi(multi, 1) # <<<<<<<<<<<<<< * PyArray_MultiIter_NEXTi(multi, 2) - * + * */ PyArray_MultiIter_NEXTi(__pyx_v_multi, 1); @@ -4114,7 +4114,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6 * array_data[i] = func(state, on_data[0], op_data[0]) * PyArray_MultiIter_NEXTi(multi, 1) * PyArray_MultiIter_NEXTi(multi, 2) # <<<<<<<<<<<<<< - * + * * return array */ PyArray_MultiIter_NEXTi(__pyx_v_multi, 2); @@ -4124,9 +4124,9 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6 /* "mtrand.pyx":411 * PyArray_MultiIter_NEXTi(multi, 2) - * + * * return array # <<<<<<<<<<<<<< - * + * * cdef object discnmN_array_sc(rk_state *state, rk_discnmN func, object size, */ __Pyx_XDECREF(__pyx_r); @@ -4152,7 +4152,7 @@ static PyObject *__pyx_f_6mtrand_discdd_array(rk_state *__pyx_v_state, __pyx_t_6 /* "mtrand.pyx":413 * return array - * + * * cdef object discnmN_array_sc(rk_state *state, rk_discnmN func, object size, # <<<<<<<<<<<<<< * long n, long m, long N): * cdef long *array_data @@ -4177,7 +4177,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx /* "mtrand.pyx":420 * cdef npy_intp i - * + * * if size is None: # <<<<<<<<<<<<<< * return func(state, n, m, N) * else: @@ -4186,7 +4186,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx if (__pyx_t_1) { /* "mtrand.pyx":421 - * + * * if size is None: * return func(state, n, m, N) # <<<<<<<<<<<<<< * else: @@ -4265,7 +4265,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx * for i from 0 <= i < length: * array_data[i] = func(state, n, m, N) # <<<<<<<<<<<<<< * return array - * + * */ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_n, __pyx_v_m, __pyx_v_N); } @@ -4274,7 +4274,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx * for i from 0 <= i < length: * array_data[i] = func(state, n, m, N) * return array # <<<<<<<<<<<<<< - * + * * cdef object discnmN_array(rk_state *state, rk_discnmN func, object size, */ __Pyx_XDECREF(__pyx_r); @@ -4301,7 +4301,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array_sc(rk_state *__pyx_v_state, __pyx /* "mtrand.pyx":430 * return array - * + * * cdef object discnmN_array(rk_state *state, rk_discnmN func, object size, # <<<<<<<<<<<<<< * ndarray on, ndarray om, ndarray oN): * cdef long *array_data @@ -4329,7 +4329,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_ /* "mtrand.pyx":441 * cdef broadcast multi - * + * * if size is None: # <<<<<<<<<<<<<< * multi = <broadcast> PyArray_MultiIterNew(3, <void *>on, <void *>om, <void *>oN) * array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) @@ -4338,7 +4338,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_ if (__pyx_t_1) { /* "mtrand.pyx":442 - * + * * if size is None: * multi = <broadcast> PyArray_MultiIterNew(3, <void *>on, <void *>om, <void *>oN) # <<<<<<<<<<<<<< * array = <ndarray> PyArray_SimpleNew(multi.nd, multi.dimensions, NPY_LONG) @@ -4557,7 +4557,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_ * oN_data = <long *>PyArray_MultiIter_DATA(multi, 3) * array_data[i] = func(state, on_data[0], om_data[0], oN_data[0]) # <<<<<<<<<<<<<< * PyArray_MultiIter_NEXT(multi) - * + * */ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (__pyx_v_on_data[0]), (__pyx_v_om_data[0]), (__pyx_v_oN_data[0])); @@ -4565,7 +4565,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_ * oN_data = <long *>PyArray_MultiIter_DATA(multi, 3) * array_data[i] = func(state, on_data[0], om_data[0], oN_data[0]) * PyArray_MultiIter_NEXT(multi) # <<<<<<<<<<<<<< - * + * * return array */ PyArray_MultiIter_NEXT(__pyx_v_multi); @@ -4575,9 +4575,9 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_ /* "mtrand.pyx":465 * PyArray_MultiIter_NEXT(multi) - * + * * return array # <<<<<<<<<<<<<< - * + * * cdef object discd_array_sc(rk_state *state, rk_discd func, object size, double a): */ __Pyx_XDECREF(__pyx_r); @@ -4603,7 +4603,7 @@ static PyObject *__pyx_f_6mtrand_discnmN_array(rk_state *__pyx_v_state, __pyx_t_ /* "mtrand.pyx":467 * return array - * + * * cdef object discd_array_sc(rk_state *state, rk_discd func, object size, double a): # <<<<<<<<<<<<<< * cdef long *array_data * cdef ndarray array "arrayObject" @@ -4628,7 +4628,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t /* "mtrand.pyx":473 * cdef npy_intp i - * + * * if size is None: # <<<<<<<<<<<<<< * return func(state, a) * else: @@ -4637,7 +4637,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t if (__pyx_t_1) { /* "mtrand.pyx":474 - * + * * if size is None: * return func(state, a) # <<<<<<<<<<<<<< * else: @@ -4716,7 +4716,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t * for i from 0 <= i < length: * array_data[i] = func(state, a) # <<<<<<<<<<<<<< * return array - * + * */ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, __pyx_v_a); } @@ -4725,7 +4725,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t * for i from 0 <= i < length: * array_data[i] = func(state, a) * return array # <<<<<<<<<<<<<< - * + * * cdef object discd_array(rk_state *state, rk_discd func, object size, ndarray oa): */ __Pyx_XDECREF(__pyx_r); @@ -4752,7 +4752,7 @@ static PyObject *__pyx_f_6mtrand_discd_array_sc(rk_state *__pyx_v_state, __pyx_t /* "mtrand.pyx":483 * return array - * + * * cdef object discd_array(rk_state *state, rk_discd func, object size, ndarray oa): # <<<<<<<<<<<<<< * cdef long *array_data * cdef double *oa_data @@ -4780,7 +4780,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m /* "mtrand.pyx":492 * cdef flatiter itera - * + * * if size is None: # <<<<<<<<<<<<<< * array = <ndarray>PyArray_SimpleNew(PyArray_NDIM(oa), * PyArray_DIMS(oa), NPY_LONG) @@ -4981,7 +4981,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m * array_data[i] = func(state, oa_data[0]) * PyArray_MultiIter_NEXTi(multi, 1) # <<<<<<<<<<<<<< * return array - * + * */ PyArray_MultiIter_NEXTi(__pyx_v_multi, 1); } @@ -4992,7 +4992,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m * array_data[i] = func(state, oa_data[0]) * PyArray_MultiIter_NEXTi(multi, 1) * return array # <<<<<<<<<<<<<< - * + * * cdef double kahan_sum(double *darr, npy_intp n): */ __Pyx_XDECREF(__pyx_r); @@ -5019,7 +5019,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *__pyx_v_state, __pyx_t_6m /* "mtrand.pyx":513 * return array - * + * * cdef double kahan_sum(double *darr, npy_intp n): # <<<<<<<<<<<<<< * cdef double c, y, t, sum * cdef npy_intp i @@ -5096,7 +5096,7 @@ static double __pyx_f_6mtrand_kahan_sum(double *__pyx_v_darr, npy_intp __pyx_v_n * c = (t-sum) - y * sum = t # <<<<<<<<<<<<<< * return sum - * + * */ __pyx_v_sum = __pyx_v_t; } @@ -5105,7 +5105,7 @@ static double __pyx_f_6mtrand_kahan_sum(double *__pyx_v_darr, npy_intp __pyx_v_n * c = (t-sum) - y * sum = t * return sum # <<<<<<<<<<<<<< - * + * * cdef class RandomState: */ __pyx_r = __pyx_v_sum; @@ -5133,10 +5133,10 @@ static int __pyx_pw_6mtrand_11RandomState_1__init__(PyObject *__pyx_v_self, PyOb /* "mtrand.pyx":561 * poisson_lam_max = np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10 - * + * * def __init__(self, seed=None): # <<<<<<<<<<<<<< * self.internal_state = <rk_state*>PyMem_Malloc(sizeof(rk_state)) - * + * */ values[0] = ((PyObject *)Py_None); if (unlikely(__pyx_kwds)) { @@ -5192,19 +5192,19 @@ static int __pyx_pf_6mtrand_11RandomState___init__(struct __pyx_obj_6mtrand_Rand __Pyx_RefNannySetupContext("__init__", 0); /* "mtrand.pyx":562 - * + * * def __init__(self, seed=None): * self.internal_state = <rk_state*>PyMem_Malloc(sizeof(rk_state)) # <<<<<<<<<<<<<< - * + * * self.seed(seed) */ __pyx_v_self->internal_state = ((rk_state *)PyMem_Malloc((sizeof(rk_state)))); /* "mtrand.pyx":564 * self.internal_state = <rk_state*>PyMem_Malloc(sizeof(rk_state)) - * + * * self.seed(seed) # <<<<<<<<<<<<<< - * + * * def __dealloc__(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__seed); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 564; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -5244,7 +5244,7 @@ static void __pyx_pw_6mtrand_11RandomState_3__dealloc__(PyObject *__pyx_v_self) /* "mtrand.pyx":566 * self.seed(seed) - * + * * def __dealloc__(self): # <<<<<<<<<<<<<< * if self.internal_state != NULL: * PyMem_Free(self.internal_state) @@ -5256,7 +5256,7 @@ static void __pyx_pf_6mtrand_11RandomState_2__dealloc__(struct __pyx_obj_6mtrand __Pyx_RefNannySetupContext("__dealloc__", 0); /* "mtrand.pyx":567 - * + * * def __dealloc__(self): * if self.internal_state != NULL: # <<<<<<<<<<<<<< * PyMem_Free(self.internal_state) @@ -5270,7 +5270,7 @@ static void __pyx_pf_6mtrand_11RandomState_2__dealloc__(struct __pyx_obj_6mtrand * if self.internal_state != NULL: * PyMem_Free(self.internal_state) # <<<<<<<<<<<<<< * self.internal_state = NULL - * + * */ PyMem_Free(__pyx_v_self->internal_state); @@ -5278,7 +5278,7 @@ static void __pyx_pf_6mtrand_11RandomState_2__dealloc__(struct __pyx_obj_6mtrand * if self.internal_state != NULL: * PyMem_Free(self.internal_state) * self.internal_state = NULL # <<<<<<<<<<<<<< - * + * * def seed(self, seed=None): */ __pyx_v_self->internal_state = NULL; @@ -5306,7 +5306,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_5seed(PyObject *__pyx_v_self, Py /* "mtrand.pyx":571 * self.internal_state = NULL - * + * * def seed(self, seed=None): # <<<<<<<<<<<<<< * """ * seed(seed=None) @@ -5477,7 +5477,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_4seed(struct __pyx_obj_6mtrand_R * obj = <ndarray>PyArray_ContiguousFromObject(seed, NPY_LONG, 1, 1) * init_by_array(self.internal_state, <unsigned long *>PyArray_DATA(obj), * PyArray_DIM(obj, 0)) # <<<<<<<<<<<<<< - * + * * def get_state(self): */ init_by_array(__pyx_v_self->internal_state, ((unsigned long *)PyArray_DATA(arrayObject_obj)), PyArray_DIM(arrayObject_obj, 0)); @@ -5513,7 +5513,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_7get_state(PyObject *__pyx_v_sel /* "mtrand.pyx":604 * PyArray_DIM(obj, 0)) - * + * * def get_state(self): # <<<<<<<<<<<<<< * """ * get_state() @@ -5617,7 +5617,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_6get_state(struct __pyx_obj_6mtr * state = <ndarray>np.asarray(state, np.uint32) * return ('MT19937', state, self.internal_state.pos, # <<<<<<<<<<<<<< * self.internal_state.has_gauss, self.internal_state.gauss) - * + * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromLong(__pyx_v_self->internal_state->pos); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -5627,7 +5627,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_6get_state(struct __pyx_obj_6mtr * state = <ndarray>np.asarray(state, np.uint32) * return ('MT19937', state, self.internal_state.pos, * self.internal_state.has_gauss, self.internal_state.gauss) # <<<<<<<<<<<<<< - * + * * def set_state(self, state): */ __pyx_t_2 = PyInt_FromLong(__pyx_v_self->internal_state->has_gauss); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 639; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -5685,7 +5685,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_9set_state(PyObject *__pyx_v_sel /* "mtrand.pyx":641 * self.internal_state.has_gauss, self.internal_state.gauss) - * + * * def set_state(self, state): # <<<<<<<<<<<<<< * """ * set_state(state) @@ -5781,11 +5781,11 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); } else { - __pyx_t_3 = PyList_GET_ITEM(sequence, 0); - __pyx_t_4 = PyList_GET_ITEM(sequence, 1); + __pyx_t_3 = PyList_GET_ITEM(sequence, 0); + __pyx_t_4 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); @@ -5883,11 +5883,11 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); } else { - __pyx_t_4 = PyList_GET_ITEM(sequence, 0); - __pyx_t_3 = PyList_GET_ITEM(sequence, 1); + __pyx_t_4 = PyList_GET_ITEM(sequence, 0); + __pyx_t_3 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_3); @@ -6064,7 +6064,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr * self.internal_state.pos = pos * self.internal_state.has_gauss = has_gauss # <<<<<<<<<<<<<< * self.internal_state.gauss = cached_gaussian - * + * */ __pyx_t_7 = __Pyx_PyInt_AsInt(__pyx_v_has_gauss); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->internal_state->has_gauss = __pyx_t_7; @@ -6073,7 +6073,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_8set_state(struct __pyx_obj_6mtr * self.internal_state.pos = pos * self.internal_state.has_gauss = has_gauss * self.internal_state.gauss = cached_gaussian # <<<<<<<<<<<<<< - * + * * # Pickling support: */ __pyx_t_13 = __pyx_PyFloat_AsDouble(__pyx_v_cached_gaussian); if (unlikely((__pyx_t_13 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 709; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -6112,11 +6112,11 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_11__getstate__(PyObject *__pyx_v } /* "mtrand.pyx":712 - * + * * # Pickling support: * def __getstate__(self): # <<<<<<<<<<<<<< * return self.get_state() - * + * */ static PyObject *__pyx_pf_6mtrand_11RandomState_10__getstate__(struct __pyx_obj_6mtrand_RandomState *__pyx_v_self) { @@ -6133,7 +6133,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_10__getstate__(struct __pyx_obj_ * # Pickling support: * def __getstate__(self): * return self.get_state() # <<<<<<<<<<<<<< - * + * * def __setstate__(self, state): */ __Pyx_XDECREF(__pyx_r); @@ -6172,10 +6172,10 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_13__setstate__(PyObject *__pyx_v /* "mtrand.pyx":715 * return self.get_state() - * + * * def __setstate__(self, state): # <<<<<<<<<<<<<< * self.set_state(state) - * + * */ static PyObject *__pyx_pf_6mtrand_11RandomState_12__setstate__(struct __pyx_obj_6mtrand_RandomState *__pyx_v_self, PyObject *__pyx_v_state) { @@ -6190,10 +6190,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_12__setstate__(struct __pyx_obj_ __Pyx_RefNannySetupContext("__setstate__", 0); /* "mtrand.pyx":716 - * + * * def __setstate__(self, state): * self.set_state(state) # <<<<<<<<<<<<<< - * + * * def __reduce__(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__set_state); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -6236,10 +6236,10 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_15__reduce__(PyObject *__pyx_v_s /* "mtrand.pyx":718 * self.set_state(state) - * + * * def __reduce__(self): # <<<<<<<<<<<<<< * return (np.random.__RandomState_ctor, (), self.get_state()) - * + * */ static PyObject *__pyx_pf_6mtrand_11RandomState_14__reduce__(struct __pyx_obj_6mtrand_RandomState *__pyx_v_self) { @@ -6254,10 +6254,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_14__reduce__(struct __pyx_obj_6m __Pyx_RefNannySetupContext("__reduce__", 0); /* "mtrand.pyx":719 - * + * * def __reduce__(self): * return (np.random.__RandomState_ctor, (), self.get_state()) # <<<<<<<<<<<<<< - * + * * # Basic distributions: */ __Pyx_XDECREF(__pyx_r); @@ -6319,7 +6319,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_17random_sample(PyObject *__pyx_ PyObject* values[1] = {0}; /* "mtrand.pyx":722 - * + * * # Basic distributions: * def random_sample(self, size=None): # <<<<<<<<<<<<<< * """ @@ -6377,10 +6377,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_16random_sample(struct __pyx_obj __Pyx_RefNannySetupContext("random_sample", 0); /* "mtrand.pyx":763 - * + * * """ * return cont0_array(self.internal_state, rk_double, size) # <<<<<<<<<<<<<< - * + * * def tomaxint(self, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -6419,7 +6419,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_19tomaxint(PyObject *__pyx_v_sel /* "mtrand.pyx":765 * return cont0_array(self.internal_state, rk_double, size) - * + * * def tomaxint(self, size=None): # <<<<<<<<<<<<<< * """ * tomaxint(size=None) @@ -6476,10 +6476,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_18tomaxint(struct __pyx_obj_6mtr __Pyx_RefNannySetupContext("tomaxint", 0); /* "mtrand.pyx":810 - * + * * """ * return disc0_array(self.internal_state, rk_long, size) # <<<<<<<<<<<<<< - * + * * def randint(self, low, high=None, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -6520,7 +6520,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_21randint(PyObject *__pyx_v_self /* "mtrand.pyx":812 * return disc0_array(self.internal_state, rk_long, size) - * + * * def randint(self, low, high=None, size=None): # <<<<<<<<<<<<<< * """ * randint(low, high=None, size=None) @@ -6606,7 +6606,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra /* "mtrand.pyx":869 * cdef npy_intp i - * + * * if high is None: # <<<<<<<<<<<<<< * lo = 0 * hi = low @@ -6615,7 +6615,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra if (__pyx_t_1) { /* "mtrand.pyx":870 - * + * * if high is None: * lo = 0 # <<<<<<<<<<<<<< * hi = low @@ -6641,7 +6641,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra * else: * lo = low # <<<<<<<<<<<<<< * hi = high - * + * */ __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_v_low); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_lo = __pyx_t_2; @@ -6650,7 +6650,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra * else: * lo = low * hi = high # <<<<<<<<<<<<<< - * + * * if lo >= hi : */ __pyx_t_2 = __Pyx_PyInt_AsLong(__pyx_v_high); if (unlikely((__pyx_t_2 == (long)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 874; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -6660,19 +6660,19 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra /* "mtrand.pyx":876 * hi = high - * + * * if lo >= hi : # <<<<<<<<<<<<<< * raise ValueError("low >= high") - * + * */ __pyx_t_1 = (__pyx_v_lo >= __pyx_v_hi); if (__pyx_t_1) { /* "mtrand.pyx":877 - * + * * if lo >= hi : * raise ValueError("low >= high") # <<<<<<<<<<<<<< - * + * * diff = <unsigned long>hi - <unsigned long>lo - 1UL */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_16), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -6686,7 +6686,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra /* "mtrand.pyx":879 * raise ValueError("low >= high") - * + * * diff = <unsigned long>hi - <unsigned long>lo - 1UL # <<<<<<<<<<<<<< * if size is None: * rv = lo + <long>rk_interval(diff, self. internal_state) @@ -6694,7 +6694,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra __pyx_v_diff = ((((unsigned long)__pyx_v_hi) - ((unsigned long)__pyx_v_lo)) - 1UL); /* "mtrand.pyx":880 - * + * * diff = <unsigned long>hi - <unsigned long>lo - 1UL * if size is None: # <<<<<<<<<<<<<< * rv = lo + <long>rk_interval(diff, self. internal_state) @@ -6801,7 +6801,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra * rv = lo + <long>rk_interval(diff, self. internal_state) * array_data[i] = rv # <<<<<<<<<<<<<< * return array - * + * */ (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_rv; } @@ -6810,7 +6810,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_20randint(struct __pyx_obj_6mtra * rv = lo + <long>rk_interval(diff, self. internal_state) * array_data[i] = rv * return array # <<<<<<<<<<<<<< - * + * * def bytes(self, npy_intp length): */ __Pyx_XDECREF(__pyx_r); @@ -6862,7 +6862,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_23bytes(PyObject *__pyx_v_self, /* "mtrand.pyx":892 * return array - * + * * def bytes(self, npy_intp length): # <<<<<<<<<<<<<< * """ * bytes(length) @@ -6896,7 +6896,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_22bytes(struct __pyx_obj_6mtrand * bytestring = empty_py_bytes(length, &bytes) * rk_fill(bytes, length, self.internal_state) # <<<<<<<<<<<<<< * return bytestring - * + * */ rk_fill(__pyx_v_bytes, __pyx_v_length, __pyx_v_self->internal_state); @@ -6904,8 +6904,8 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_22bytes(struct __pyx_obj_6mtrand * bytestring = empty_py_bytes(length, &bytes) * rk_fill(bytes, length, self.internal_state) * return bytestring # <<<<<<<<<<<<<< - * - * + * + * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_bytestring); @@ -6944,8 +6944,8 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_25choice(PyObject *__pyx_v_self, PyObject* values[4] = {0,0,0,0}; /* "mtrand.pyx":920 - * - * + * + * * def choice(self, a, size=None, replace=True, p=None): # <<<<<<<<<<<<<< * """ * choice(a, size=None, replace=True, p=None) @@ -7054,7 +7054,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran __Pyx_INCREF(__pyx_v_p); /* "mtrand.pyx":998 - * + * * # Format and Verify input * a = np.array(a, copy=False) # <<<<<<<<<<<<<< * if a.ndim == 0: @@ -7280,7 +7280,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * pop_size = a.shape[0] * if pop_size is 0: # <<<<<<<<<<<<<< * raise ValueError("a must be non-empty") - * + * */ __pyx_t_5 = (__pyx_v_pop_size == __pyx_int_0); if (__pyx_t_5) { @@ -7289,7 +7289,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * pop_size = a.shape[0] * if pop_size is 0: * raise ValueError("a must be non-empty") # <<<<<<<<<<<<<< - * + * * if None != p: */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_25), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1012; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -7305,7 +7305,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran /* "mtrand.pyx":1014 * raise ValueError("a must be non-empty") - * + * * if None != p: # <<<<<<<<<<<<<< * p = np.array(p, dtype=np.double, ndmin=1, copy=False) * if p.ndim != 1: @@ -7316,7 +7316,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran if (__pyx_t_5) { /* "mtrand.pyx":1015 - * + * * if None != p: * p = np.array(p, dtype=np.double, ndmin=1, copy=False) # <<<<<<<<<<<<<< * if p.ndim != 1: @@ -7464,7 +7464,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * raise ValueError("probabilities are not non-negative") * if not np.allclose(p.sum(), 1): # <<<<<<<<<<<<<< * raise ValueError("probabilities do not sum to 1") - * + * */ __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1022; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); @@ -7497,7 +7497,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * raise ValueError("probabilities are not non-negative") * if not np.allclose(p.sum(), 1): * raise ValueError("probabilities do not sum to 1") # <<<<<<<<<<<<<< - * + * * shape = size */ __pyx_t_1 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_33), NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1023; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -7514,7 +7514,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran /* "mtrand.pyx":1025 * raise ValueError("probabilities do not sum to 1") - * + * * shape = size # <<<<<<<<<<<<<< * if shape is not None: * size = np.prod(shape, dtype=np.intp) @@ -7523,7 +7523,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran __pyx_v_shape = __pyx_v_size; /* "mtrand.pyx":1026 - * + * * shape = size * if shape is not None: # <<<<<<<<<<<<<< * size = np.prod(shape, dtype=np.intp) @@ -7574,7 +7574,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * size = np.prod(shape, dtype=np.intp) * else: * size = 1 # <<<<<<<<<<<<<< - * + * * # Actual sampling */ __Pyx_INCREF(__pyx_int_1); @@ -7584,7 +7584,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran __pyx_L21:; /* "mtrand.pyx":1032 - * + * * # Actual sampling * if replace: # <<<<<<<<<<<<<< * if None != p: @@ -7767,7 +7767,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * if size > pop_size: * raise ValueError("Cannot take a larger sample than " # <<<<<<<<<<<<<< * "population when 'replace=False'") - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_35), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -7780,7 +7780,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran /* "mtrand.pyx":1046 * "population when 'replace=False'") - * + * * if None != p: # <<<<<<<<<<<<<< * if np.sum(p > 0) < size: * raise ValueError("Fewer non-zero entries in p than size") @@ -7791,7 +7791,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran if (__pyx_t_11) { /* "mtrand.pyx":1047 - * + * * if None != p: * if np.sum(p > 0) < size: # <<<<<<<<<<<<<< * raise ValueError("Fewer non-zero entries in p than size") @@ -8082,11 +8082,11 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_10 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __pyx_t_10 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); } else { - __pyx_t_10 = PyList_GET_ITEM(sequence, 0); - __pyx_t_4 = PyList_GET_ITEM(sequence, 1); + __pyx_t_10 = PyList_GET_ITEM(sequence, 0); + __pyx_t_4 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(__pyx_t_4); @@ -8236,7 +8236,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * idx = self.permutation(pop_size)[:size] * if shape is not None: # <<<<<<<<<<<<<< * idx.shape = shape - * + * */ __pyx_t_11 = (__pyx_v_shape != Py_None); if (__pyx_t_11) { @@ -8245,7 +8245,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * idx = self.permutation(pop_size)[:size] * if shape is not None: * idx.shape = shape # <<<<<<<<<<<<<< - * + * * if shape is None and isinstance(idx, np.ndarray): */ if (__Pyx_PyObject_SetAttrStr(__pyx_v_idx, __pyx_n_s__shape, __pyx_v_shape) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1069; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -8259,7 +8259,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran /* "mtrand.pyx":1071 * idx.shape = shape - * + * * if shape is None and isinstance(idx, np.ndarray): # <<<<<<<<<<<<<< * # In most cases a scalar will have been made an array * idx = idx.item(0) @@ -8283,7 +8283,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * if shape is None and isinstance(idx, np.ndarray): * # In most cases a scalar will have been made an array * idx = idx.item(0) # <<<<<<<<<<<<<< - * + * * #Use samples as indices for a if a is array-like */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_idx, __pyx_n_s__item); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1073; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -8299,11 +8299,11 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran __pyx_L33:; /* "mtrand.pyx":1076 - * + * * #Use samples as indices for a if a is array-like * if a.ndim == 0: # <<<<<<<<<<<<<< * return idx - * + * */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s__ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -8317,7 +8317,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * #Use samples as indices for a if a is array-like * if a.ndim == 0: * return idx # <<<<<<<<<<<<<< - * + * * if shape is not None and idx.ndim == 0: */ __Pyx_XDECREF(__pyx_r); @@ -8330,7 +8330,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran /* "mtrand.pyx":1079 * return idx - * + * * if shape is not None and idx.ndim == 0: # <<<<<<<<<<<<<< * # If size == () then the user requested a 0-d array as opposed to * # a scalar object when size is None. However a[idx] is always a @@ -8379,7 +8379,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * res = np.empty((), dtype=a.dtype) * res[()] = a[idx] # <<<<<<<<<<<<<< * return res - * + * */ __pyx_t_10 = PyObject_GetItem(__pyx_v_a, __pyx_v_idx); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1086; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); @@ -8390,7 +8390,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran * res = np.empty((), dtype=a.dtype) * res[()] = a[idx] * return res # <<<<<<<<<<<<<< - * + * * return a[idx] */ __Pyx_XDECREF(__pyx_r); @@ -8403,10 +8403,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtran /* "mtrand.pyx":1089 * return res - * + * * return a[idx] # <<<<<<<<<<<<<< - * - * + * + * */ __Pyx_XDECREF(__pyx_r); __pyx_t_10 = PyObject_GetItem(__pyx_v_a, __pyx_v_idx); if (!__pyx_t_10) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1089; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -8467,8 +8467,8 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_27uniform(PyObject *__pyx_v_self values[1] = __pyx_k_41; /* "mtrand.pyx":1092 - * - * + * + * * def uniform(self, low=0.0, high=1.0, size=None): # <<<<<<<<<<<<<< * """ * uniform(low=0.0, high=1.0, size=1) @@ -8551,7 +8551,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra /* "mtrand.pyx":1166 * cdef object temp - * + * * flow = PyFloat_AsDouble(low) # <<<<<<<<<<<<<< * fhigh = PyFloat_AsDouble(high) * if not PyErr_Occurred(): @@ -8559,7 +8559,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra __pyx_v_flow = PyFloat_AsDouble(__pyx_v_low); /* "mtrand.pyx":1167 - * + * * flow = PyFloat_AsDouble(low) * fhigh = PyFloat_AsDouble(high) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -8674,7 +8674,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra * # rules because EnsureArray steals a reference * odiff = <ndarray>PyArray_EnsureArray(temp) # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_uniform, size, olow, odiff) - * + * */ __pyx_t_4 = PyArray_EnsureArray(__pyx_v_temp); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1176; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); @@ -8688,7 +8688,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_26uniform(struct __pyx_obj_6mtra * # rules because EnsureArray steals a reference * odiff = <ndarray>PyArray_EnsureArray(temp) * return cont2_array(self.internal_state, rk_uniform, size, olow, odiff) # <<<<<<<<<<<<<< - * + * * def rand(self, *args): */ __Pyx_XDECREF(__pyx_r); @@ -8735,7 +8735,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_29rand(PyObject *__pyx_v_self, P /* "mtrand.pyx":1179 * return cont2_array(self.internal_state, rk_uniform, size, olow, odiff) - * + * * def rand(self, *args): # <<<<<<<<<<<<<< * """ * rand(d0, d1, ..., dn) @@ -8755,7 +8755,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_28rand(struct __pyx_obj_6mtrand_ __Pyx_RefNannySetupContext("rand", 0); /* "mtrand.pyx":1218 - * + * * """ * if len(args) == 0: # <<<<<<<<<<<<<< * return self.random_sample() @@ -8789,7 +8789,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_28rand(struct __pyx_obj_6mtrand_ * return self.random_sample() * else: * return self.random_sample(size=args) # <<<<<<<<<<<<<< - * + * * def randn(self, *args): */ __Pyx_XDECREF(__pyx_r); @@ -8841,7 +8841,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_31randn(PyObject *__pyx_v_self, /* "mtrand.pyx":1223 * return self.random_sample(size=args) - * + * * def randn(self, *args): # <<<<<<<<<<<<<< * """ * randn(d0, d1, ..., dn) @@ -8861,7 +8861,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_30randn(struct __pyx_obj_6mtrand __Pyx_RefNannySetupContext("randn", 0); /* "mtrand.pyx":1275 - * + * * """ * if len(args) == 0: # <<<<<<<<<<<<<< * return self.standard_normal() @@ -8895,7 +8895,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_30randn(struct __pyx_obj_6mtrand * return self.standard_normal() * else: * return self.standard_normal(args) # <<<<<<<<<<<<<< - * + * * def random_integers(self, low, high=None, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -8949,7 +8949,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_33random_integers(PyObject *__py /* "mtrand.pyx":1280 * return self.standard_normal(args) - * + * * def random_integers(self, low, high=None, size=None): # <<<<<<<<<<<<<< * """ * random_integers(low, high=None, size=None) @@ -9026,7 +9026,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_32random_integers(struct __pyx_o __Pyx_INCREF(__pyx_v_high); /* "mtrand.pyx":1352 - * + * * """ * if high is None: # <<<<<<<<<<<<<< * high = low @@ -9051,7 +9051,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_32random_integers(struct __pyx_o * high = low * low = 1 # <<<<<<<<<<<<<< * return self.randint(low, high+1, size) - * + * */ __Pyx_INCREF(__pyx_int_1); __Pyx_DECREF(__pyx_v_low); @@ -9064,7 +9064,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_32random_integers(struct __pyx_o * high = low * low = 1 * return self.randint(low, high+1, size) # <<<<<<<<<<<<<< - * + * * # Complicated, continuous distributions: */ __Pyx_XDECREF(__pyx_r); @@ -9123,7 +9123,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_35standard_normal(PyObject *__py PyObject* values[1] = {0}; /* "mtrand.pyx":1358 - * + * * # Complicated, continuous distributions: * def standard_normal(self, size=None): # <<<<<<<<<<<<<< * """ @@ -9181,10 +9181,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_34standard_normal(struct __pyx_o __Pyx_RefNannySetupContext("standard_normal", 0); /* "mtrand.pyx":1388 - * + * * """ * return cont0_array(self.internal_state, rk_gauss, size) # <<<<<<<<<<<<<< - * + * * def normal(self, loc=0.0, scale=1.0, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -9227,7 +9227,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_37normal(PyObject *__pyx_v_self, /* "mtrand.pyx":1390 * return cont0_array(self.internal_state, rk_gauss, size) - * + * * def normal(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * normal(loc=0.0, scale=1.0, size=None) @@ -9309,7 +9309,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran /* "mtrand.pyx":1475 * cdef double floc, fscale - * + * * floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if not PyErr_Occurred(): @@ -9317,7 +9317,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran __pyx_v_floc = PyFloat_AsDouble(__pyx_v_loc); /* "mtrand.pyx":1476 - * + * * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -9350,7 +9350,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_normal, size, floc, fscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_45), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1479; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -9365,7 +9365,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran * if fscale <= 0: * raise ValueError("scale <= 0") * return cont2_array_sc(self.internal_state, rk_normal, size, floc, fscale) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -9380,16 +9380,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran /* "mtrand.pyx":1482 * return cont2_array_sc(self.internal_state, rk_normal, size, floc, fscale) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * oloc = <ndarray>PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":1484 * PyErr_Clear() - * + * * oloc = <ndarray>PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(oscale, 0)): @@ -9403,7 +9403,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran __pyx_t_3 = 0; /* "mtrand.pyx":1485 - * + * * oloc = <ndarray>PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(oscale, 0)): @@ -9464,7 +9464,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran * if np.any(np.less_equal(oscale, 0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_normal, size, oloc, oscale) - * + * */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_46), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1487; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); @@ -9479,7 +9479,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_36normal(struct __pyx_obj_6mtran * if np.any(np.less_equal(oscale, 0)): * raise ValueError("scale <= 0") * return cont2_array(self.internal_state, rk_normal, size, oloc, oscale) # <<<<<<<<<<<<<< - * + * * def beta(self, a, b, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -9525,7 +9525,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_39beta(PyObject *__pyx_v_self, P /* "mtrand.pyx":1490 * return cont2_array(self.internal_state, rk_normal, size, oloc, oscale) - * + * * def beta(self, a, b, size=None): # <<<<<<<<<<<<<< * """ * beta(a, b, size=None) @@ -9605,7 +9605,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_ /* "mtrand.pyx":1530 * cdef double fa, fb - * + * * fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<< * fb = PyFloat_AsDouble(b) * if not PyErr_Occurred(): @@ -9613,7 +9613,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_ __pyx_v_fa = PyFloat_AsDouble(__pyx_v_a); /* "mtrand.pyx":1531 - * + * * fa = PyFloat_AsDouble(a) * fb = PyFloat_AsDouble(b) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -9672,7 +9672,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_ * if fb <= 0: * raise ValueError("b <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_50), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1536; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -9687,7 +9687,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_ * if fb <= 0: * raise ValueError("b <= 0") * return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -9702,16 +9702,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_ /* "mtrand.pyx":1539 * return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":1541 * PyErr_Clear() - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * ob = <ndarray>PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(oa, 0)): @@ -9725,7 +9725,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_ __pyx_t_3 = 0; /* "mtrand.pyx":1542 - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * ob = <ndarray>PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(oa, 0)): @@ -9844,7 +9844,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_ * if np.any(np.less_equal(ob, 0)): * raise ValueError("b <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_beta, size, oa, ob) - * + * */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_52), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1546; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); @@ -9859,7 +9859,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_38beta(struct __pyx_obj_6mtrand_ * if np.any(np.less_equal(ob, 0)): * raise ValueError("b <= 0") * return cont2_array(self.internal_state, rk_beta, size, oa, ob) # <<<<<<<<<<<<<< - * + * * def exponential(self, scale=1.0, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -9905,7 +9905,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_41exponential(PyObject *__pyx_v_ /* "mtrand.pyx":1549 * return cont2_array(self.internal_state, rk_beta, size, oa, ob) - * + * * def exponential(self, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * exponential(scale=1.0, size=None) @@ -9977,7 +9977,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6 /* "mtrand.pyx":1590 * cdef double fscale - * + * * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): * if fscale <= 0: @@ -9985,7 +9985,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6 __pyx_v_fscale = PyFloat_AsDouble(__pyx_v_scale); /* "mtrand.pyx":1591 - * + * * fscale = PyFloat_AsDouble(scale) * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fscale <= 0: @@ -10009,7 +10009,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6 * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_exponential, size, fscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_54), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -10024,7 +10024,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6 * if fscale <= 0: * raise ValueError("scale <= 0") * return cont1_array_sc(self.internal_state, rk_exponential, size, fscale) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -10039,16 +10039,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6 /* "mtrand.pyx":1596 * return cont1_array_sc(self.internal_state, rk_exponential, size, fscale) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * oscale = <ndarray> PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":1598 * PyErr_Clear() - * + * * oscale = <ndarray> PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") @@ -10062,7 +10062,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6 __pyx_t_3 = 0; /* "mtrand.pyx":1599 - * + * * oscale = <ndarray> PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(oscale, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("scale <= 0") @@ -10110,7 +10110,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6 * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_exponential, size, oscale) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_55), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -10125,7 +10125,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_40exponential(struct __pyx_obj_6 * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") * return cont1_array(self.internal_state, rk_exponential, size, oscale) # <<<<<<<<<<<<<< - * + * * def standard_exponential(self, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -10168,7 +10168,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_43standard_exponential(PyObject /* "mtrand.pyx":1603 * return cont1_array(self.internal_state, rk_exponential, size, oscale) - * + * * def standard_exponential(self, size=None): # <<<<<<<<<<<<<< * """ * standard_exponential(size=None) @@ -10225,10 +10225,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_42standard_exponential(struct __ __Pyx_RefNannySetupContext("standard_exponential", 0); /* "mtrand.pyx":1629 - * + * * """ * return cont0_array(self.internal_state, rk_standard_exponential, size) # <<<<<<<<<<<<<< - * + * * def standard_gamma(self, shape, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -10268,7 +10268,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_45standard_gamma(PyObject *__pyx /* "mtrand.pyx":1631 * return cont0_array(self.internal_state, rk_standard_exponential, size) - * + * * def standard_gamma(self, shape, size=None): # <<<<<<<<<<<<<< * """ * standard_gamma(shape, size=None) @@ -10338,7 +10338,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob /* "mtrand.pyx":1701 * cdef double fshape - * + * * fshape = PyFloat_AsDouble(shape) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): * if fshape <= 0: @@ -10346,7 +10346,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob __pyx_v_fshape = PyFloat_AsDouble(__pyx_v_shape); /* "mtrand.pyx":1702 - * + * * fshape = PyFloat_AsDouble(shape) * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fshape <= 0: @@ -10370,7 +10370,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob * if fshape <= 0: * raise ValueError("shape <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_standard_gamma, size, fshape) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_57), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -10385,7 +10385,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob * if fshape <= 0: * raise ValueError("shape <= 0") * return cont1_array_sc(self.internal_state, rk_standard_gamma, size, fshape) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -10400,7 +10400,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob /* "mtrand.pyx":1707 * return cont1_array_sc(self.internal_state, rk_standard_gamma, size, fshape) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< * oshape = <ndarray> PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(oshape, 0.0)): @@ -10408,7 +10408,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob PyErr_Clear(); /* "mtrand.pyx":1708 - * + * * PyErr_Clear() * oshape = <ndarray> PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(oshape, 0.0)): @@ -10471,7 +10471,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob * if np.any(np.less_equal(oshape, 0.0)): * raise ValueError("shape <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_standard_gamma, size, oshape) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_58), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1710; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -10486,7 +10486,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_44standard_gamma(struct __pyx_ob * if np.any(np.less_equal(oshape, 0.0)): * raise ValueError("shape <= 0") * return cont1_array(self.internal_state, rk_standard_gamma, size, oshape) # <<<<<<<<<<<<<< - * + * * def gamma(self, shape, scale=1.0, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -10532,7 +10532,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_47gamma(PyObject *__pyx_v_self, /* "mtrand.pyx":1713 * return cont1_array(self.internal_state, rk_standard_gamma, size, oshape) - * + * * def gamma(self, shape, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * gamma(shape, scale=1.0, size=None) @@ -10612,7 +10612,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand /* "mtrand.pyx":1786 * cdef double fshape, fscale - * + * * fshape = PyFloat_AsDouble(shape) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if not PyErr_Occurred(): @@ -10620,7 +10620,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand __pyx_v_fshape = PyFloat_AsDouble(__pyx_v_shape); /* "mtrand.pyx":1787 - * + * * fshape = PyFloat_AsDouble(shape) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -10679,7 +10679,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, fscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_61), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -10694,7 +10694,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand * if fscale <= 0: * raise ValueError("scale <= 0") * return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, fscale) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -10709,7 +10709,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand /* "mtrand.pyx":1795 * return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, fscale) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< * oshape = <ndarray>PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -10717,7 +10717,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand PyErr_Clear(); /* "mtrand.pyx":1796 - * + * * PyErr_Clear() * oshape = <ndarray>PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -10855,7 +10855,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_63), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1801; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -10870,7 +10870,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_46gamma(struct __pyx_obj_6mtrand * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") * return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale) # <<<<<<<<<<<<<< - * + * * def f(self, dfnum, dfden, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -10916,7 +10916,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_49f(PyObject *__pyx_v_self, PyOb /* "mtrand.pyx":1804 * return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale) - * + * * def f(self, dfnum, dfden, size=None): # <<<<<<<<<<<<<< * """ * f(dfnum, dfden, size=None) @@ -10996,7 +10996,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran /* "mtrand.pyx":1887 * cdef double fdfnum, fdfden - * + * * fdfnum = PyFloat_AsDouble(dfnum) # <<<<<<<<<<<<<< * fdfden = PyFloat_AsDouble(dfden) * if not PyErr_Occurred(): @@ -11004,7 +11004,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran __pyx_v_fdfnum = PyFloat_AsDouble(__pyx_v_dfnum); /* "mtrand.pyx":1888 - * + * * fdfnum = PyFloat_AsDouble(dfnum) * fdfden = PyFloat_AsDouble(dfden) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -11063,7 +11063,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran * if fdfden <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, fdfden) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_65), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1893; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -11078,7 +11078,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran * if fdfden <= 0: * raise ValueError("scale <= 0") * return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, fdfden) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -11093,16 +11093,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran /* "mtrand.pyx":1896 * return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, fdfden) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":1898 * PyErr_Clear() - * + * * odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(odfnum, 0.0)): @@ -11116,7 +11116,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran __pyx_t_3 = 0; /* "mtrand.pyx":1899 - * + * * odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(odfnum, 0.0)): @@ -11239,7 +11239,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_f, size, odfnum, odfden) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_69), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1903; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -11254,7 +11254,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_48f(struct __pyx_obj_6mtrand_Ran * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") * return cont2_array(self.internal_state, rk_f, size, odfnum, odfden) # <<<<<<<<<<<<<< - * + * * def noncentral_f(self, dfnum, dfden, nonc, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -11301,7 +11301,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_51noncentral_f(PyObject *__pyx_v /* "mtrand.pyx":1906 * return cont2_array(self.internal_state, rk_f, size, odfnum, odfden) - * + * * def noncentral_f(self, dfnum, dfden, nonc, size=None): # <<<<<<<<<<<<<< * """ * noncentral_f(dfnum, dfden, nonc, size=None) @@ -11391,7 +11391,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_ /* "mtrand.pyx":1973 * cdef double fdfnum, fdfden, fnonc - * + * * fdfnum = PyFloat_AsDouble(dfnum) # <<<<<<<<<<<<<< * fdfden = PyFloat_AsDouble(dfden) * fnonc = PyFloat_AsDouble(nonc) @@ -11399,7 +11399,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_ __pyx_v_fdfnum = PyFloat_AsDouble(__pyx_v_dfnum); /* "mtrand.pyx":1974 - * + * * fdfnum = PyFloat_AsDouble(dfnum) * fdfden = PyFloat_AsDouble(dfden) # <<<<<<<<<<<<<< * fnonc = PyFloat_AsDouble(nonc) @@ -11509,7 +11509,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_ * raise ValueError("nonc < 0") * return cont3_array_sc(self.internal_state, rk_noncentral_f, size, # <<<<<<<<<<<<<< * fdfnum, fdfden, fnonc) - * + * */ __Pyx_XDECREF(__pyx_r); @@ -11517,7 +11517,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_ * raise ValueError("nonc < 0") * return cont3_array_sc(self.internal_state, rk_noncentral_f, size, * fdfnum, fdfden, fnonc) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __pyx_t_2 = __pyx_f_6mtrand_cont3_array_sc(__pyx_v_self->internal_state, rk_noncentral_f, __pyx_v_size, __pyx_v_fdfnum, __pyx_v_fdfden, __pyx_v_fnonc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1983; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -11531,16 +11531,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_ /* "mtrand.pyx":1986 * fdfnum, fdfden, fnonc) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":1988 * PyErr_Clear() - * + * * odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -11554,11 +11554,11 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_ __pyx_t_3 = 0; /* "mtrand.pyx":1989 - * + * * odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - * + * */ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1989; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -11572,7 +11572,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_ * odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< - * + * * if np.any(np.less_equal(odfnum, 1.0)): */ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1990; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -11585,7 +11585,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_ /* "mtrand.pyx":1992 * ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - * + * * if np.any(np.less_equal(odfnum, 1.0)): # <<<<<<<<<<<<<< * raise ValueError("dfnum <= 1") * if np.any(np.less_equal(odfden, 0.0)): @@ -11628,7 +11628,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_ if (__pyx_t_1) { /* "mtrand.pyx":1993 - * + * * if np.any(np.less_equal(odfnum, 1.0)): * raise ValueError("dfnum <= 1") # <<<<<<<<<<<<<< * if np.any(np.less_equal(odfden, 0.0)): @@ -11768,7 +11768,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_ * raise ValueError("nonc < 0") * return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum, # <<<<<<<<<<<<<< * odfden, ononc) - * + * */ __Pyx_XDECREF(__pyx_r); @@ -11776,7 +11776,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_50noncentral_f(struct __pyx_obj_ * raise ValueError("nonc < 0") * return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum, * odfden, ononc) # <<<<<<<<<<<<<< - * + * * def chisquare(self, df, size=None): */ __pyx_t_3 = __pyx_f_6mtrand_cont3_array(__pyx_v_self->internal_state, rk_noncentral_f, __pyx_v_size, __pyx_v_odfnum, __pyx_v_odfden, __pyx_v_ononc); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1998; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -11821,7 +11821,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_53chisquare(PyObject *__pyx_v_se /* "mtrand.pyx":2001 * odfden, ononc) - * + * * def chisquare(self, df, size=None): # <<<<<<<<<<<<<< * """ * chisquare(df, size=None) @@ -11891,7 +11891,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt /* "mtrand.pyx":2066 * cdef double fdf - * + * * fdf = PyFloat_AsDouble(df) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): * if fdf <= 0: @@ -11899,7 +11899,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt __pyx_v_fdf = PyFloat_AsDouble(__pyx_v_df); /* "mtrand.pyx":2067 - * + * * fdf = PyFloat_AsDouble(df) * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fdf <= 0: @@ -11923,7 +11923,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt * if fdf <= 0: * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_79), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2069; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -11938,7 +11938,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt * if fdf <= 0: * raise ValueError("df <= 0") * return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -11953,16 +11953,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt /* "mtrand.pyx":2072 * return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":2074 * PyErr_Clear() - * + * * odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") @@ -11976,7 +11976,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt __pyx_t_3 = 0; /* "mtrand.pyx":2075 - * + * * odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(odf, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("df <= 0") @@ -12024,7 +12024,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_chisquare, size, odf) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_80), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -12039,7 +12039,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_52chisquare(struct __pyx_obj_6mt * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") * return cont1_array(self.internal_state, rk_chisquare, size, odf) # <<<<<<<<<<<<<< - * + * * def noncentral_chisquare(self, df, nonc, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -12084,7 +12084,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_55noncentral_chisquare(PyObject /* "mtrand.pyx":2079 * return cont1_array(self.internal_state, rk_chisquare, size, odf) - * + * * def noncentral_chisquare(self, df, nonc, size=None): # <<<<<<<<<<<<<< * """ * noncentral_chisquare(df, nonc, size=None) @@ -12247,7 +12247,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __ * raise ValueError("nonc <= 0") * return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, # <<<<<<<<<<<<<< * size, fdf, fnonc) - * + * */ __Pyx_XDECREF(__pyx_r); @@ -12255,7 +12255,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __ * raise ValueError("nonc <= 0") * return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, * size, fdf, fnonc) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_noncentral_chisquare, __pyx_v_size, __pyx_v_fdf, __pyx_v_fnonc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -12269,16 +12269,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __ /* "mtrand.pyx":2160 * size, fdf, fnonc) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":2162 * PyErr_Clear() - * + * * odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(odf, 0.0)): @@ -12292,7 +12292,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __ __pyx_t_3 = 0; /* "mtrand.pyx":2163 - * + * * odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(odf, 0.0)): @@ -12431,7 +12431,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __ * raise ValueError("nonc < 0") * return cont2_array(self.internal_state, rk_noncentral_chisquare, size, # <<<<<<<<<<<<<< * odf, ononc) - * + * */ __Pyx_XDECREF(__pyx_r); @@ -12439,7 +12439,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_54noncentral_chisquare(struct __ * raise ValueError("nonc < 0") * return cont2_array(self.internal_state, rk_noncentral_chisquare, size, * odf, ononc) # <<<<<<<<<<<<<< - * + * * def standard_cauchy(self, size=None): */ __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_noncentral_chisquare, __pyx_v_size, __pyx_v_odf, __pyx_v_ononc); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2168; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -12482,7 +12482,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_57standard_cauchy(PyObject *__py /* "mtrand.pyx":2171 * odf, ononc) - * + * * def standard_cauchy(self, size=None): # <<<<<<<<<<<<<< * """ * standard_cauchy(size=None) @@ -12539,10 +12539,10 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_56standard_cauchy(struct __pyx_o __Pyx_RefNannySetupContext("standard_cauchy", 0); /* "mtrand.pyx":2230 - * + * * """ * return cont0_array(self.internal_state, rk_standard_cauchy, size) # <<<<<<<<<<<<<< - * + * * def standard_t(self, df, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -12582,7 +12582,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_59standard_t(PyObject *__pyx_v_s /* "mtrand.pyx":2232 * return cont0_array(self.internal_state, rk_standard_cauchy, size) - * + * * def standard_t(self, df, size=None): # <<<<<<<<<<<<<< * """ * standard_t(df, size=None) @@ -12652,7 +12652,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m /* "mtrand.pyx":2320 * cdef double fdf - * + * * fdf = PyFloat_AsDouble(df) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): * if fdf <= 0: @@ -12660,7 +12660,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m __pyx_v_fdf = PyFloat_AsDouble(__pyx_v_df); /* "mtrand.pyx":2321 - * + * * fdf = PyFloat_AsDouble(df) * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fdf <= 0: @@ -12684,7 +12684,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m * if fdf <= 0: * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_standard_t, size, fdf) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_87), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2323; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -12699,7 +12699,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m * if fdf <= 0: * raise ValueError("df <= 0") * return cont1_array_sc(self.internal_state, rk_standard_t, size, fdf) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -12714,16 +12714,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m /* "mtrand.pyx":2326 * return cont1_array_sc(self.internal_state, rk_standard_t, size, fdf) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * odf = <ndarray> PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":2328 * PyErr_Clear() - * + * * odf = <ndarray> PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") @@ -12737,7 +12737,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m __pyx_t_3 = 0; /* "mtrand.pyx":2329 - * + * * odf = <ndarray> PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(odf, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("df <= 0") @@ -12785,7 +12785,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_standard_t, size, odf) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_88), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2330; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -12800,7 +12800,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_58standard_t(struct __pyx_obj_6m * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") * return cont1_array(self.internal_state, rk_standard_t, size, odf) # <<<<<<<<<<<<<< - * + * * def vonmises(self, mu, kappa, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -12845,7 +12845,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_61vonmises(PyObject *__pyx_v_sel /* "mtrand.pyx":2333 * return cont1_array(self.internal_state, rk_standard_t, size, odf) - * + * * def vonmises(self, mu, kappa, size=None): # <<<<<<<<<<<<<< * """ * vonmises(mu, kappa, size=None) @@ -12925,7 +12925,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr /* "mtrand.pyx":2412 * cdef double fmu, fkappa - * + * * fmu = PyFloat_AsDouble(mu) # <<<<<<<<<<<<<< * fkappa = PyFloat_AsDouble(kappa) * if not PyErr_Occurred(): @@ -12933,7 +12933,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr __pyx_v_fmu = PyFloat_AsDouble(__pyx_v_mu); /* "mtrand.pyx":2413 - * + * * fmu = PyFloat_AsDouble(mu) * fkappa = PyFloat_AsDouble(kappa) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -12966,7 +12966,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr * if fkappa < 0: * raise ValueError("kappa < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, fkappa) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_90), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -12981,7 +12981,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr * if fkappa < 0: * raise ValueError("kappa < 0") * return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, fkappa) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -12996,16 +12996,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr /* "mtrand.pyx":2419 * return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, fkappa) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * omu = <ndarray> PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":2421 * PyErr_Clear() - * + * * omu = <ndarray> PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * okappa = <ndarray> PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less(okappa, 0.0)): @@ -13019,7 +13019,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr __pyx_t_3 = 0; /* "mtrand.pyx":2422 - * + * * omu = <ndarray> PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * okappa = <ndarray> PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less(okappa, 0.0)): @@ -13082,7 +13082,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr * if np.any(np.less(okappa, 0.0)): * raise ValueError("kappa < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_91), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -13097,7 +13097,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_60vonmises(struct __pyx_obj_6mtr * if np.any(np.less(okappa, 0.0)): * raise ValueError("kappa < 0") * return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa) # <<<<<<<<<<<<<< - * + * * def pareto(self, a, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -13142,7 +13142,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_63pareto(PyObject *__pyx_v_self, /* "mtrand.pyx":2427 * return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa) - * + * * def pareto(self, a, size=None): # <<<<<<<<<<<<<< * """ * pareto(a, size=None) @@ -13212,7 +13212,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran /* "mtrand.pyx":2510 * cdef double fa - * + * * fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): * if fa <= 0: @@ -13220,7 +13220,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran __pyx_v_fa = PyFloat_AsDouble(__pyx_v_a); /* "mtrand.pyx":2511 - * + * * fa = PyFloat_AsDouble(a) * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fa <= 0: @@ -13244,7 +13244,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran * if fa <= 0: * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_pareto, size, fa) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_92), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2513; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -13259,7 +13259,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran * if fa <= 0: * raise ValueError("a <= 0") * return cont1_array_sc(self.internal_state, rk_pareto, size, fa) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -13274,16 +13274,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran /* "mtrand.pyx":2516 * return cont1_array_sc(self.internal_state, rk_pareto, size, fa) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":2518 * PyErr_Clear() - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") @@ -13297,7 +13297,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran __pyx_t_3 = 0; /* "mtrand.pyx":2519 - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(oa, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("a <= 0") @@ -13345,7 +13345,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_pareto, size, oa) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_93), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2520; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -13360,7 +13360,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_62pareto(struct __pyx_obj_6mtran * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") * return cont1_array(self.internal_state, rk_pareto, size, oa) # <<<<<<<<<<<<<< - * + * * def weibull(self, a, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -13404,7 +13404,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_65weibull(PyObject *__pyx_v_self /* "mtrand.pyx":2523 * return cont1_array(self.internal_state, rk_pareto, size, oa) - * + * * def weibull(self, a, size=None): # <<<<<<<<<<<<<< * """ * weibull(a, size=None) @@ -13474,7 +13474,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra /* "mtrand.pyx":2610 * cdef double fa - * + * * fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): * if fa <= 0: @@ -13482,7 +13482,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra __pyx_v_fa = PyFloat_AsDouble(__pyx_v_a); /* "mtrand.pyx":2611 - * + * * fa = PyFloat_AsDouble(a) * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fa <= 0: @@ -13506,7 +13506,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra * if fa <= 0: * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_weibull, size, fa) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_94), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2613; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -13521,7 +13521,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra * if fa <= 0: * raise ValueError("a <= 0") * return cont1_array_sc(self.internal_state, rk_weibull, size, fa) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -13536,16 +13536,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra /* "mtrand.pyx":2616 * return cont1_array_sc(self.internal_state, rk_weibull, size, fa) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":2618 * PyErr_Clear() - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") @@ -13559,7 +13559,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra __pyx_t_3 = 0; /* "mtrand.pyx":2619 - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(oa, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("a <= 0") @@ -13607,7 +13607,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_weibull, size, oa) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_95), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2620; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -13622,7 +13622,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_64weibull(struct __pyx_obj_6mtra * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") * return cont1_array(self.internal_state, rk_weibull, size, oa) # <<<<<<<<<<<<<< - * + * * def power(self, a, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -13666,7 +13666,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_67power(PyObject *__pyx_v_self, /* "mtrand.pyx":2623 * return cont1_array(self.internal_state, rk_weibull, size, oa) - * + * * def power(self, a, size=None): # <<<<<<<<<<<<<< * """ * power(a, size=None) @@ -13736,7 +13736,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand /* "mtrand.pyx":2719 * cdef double fa - * + * * fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): * if fa <= 0: @@ -13744,7 +13744,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand __pyx_v_fa = PyFloat_AsDouble(__pyx_v_a); /* "mtrand.pyx":2720 - * + * * fa = PyFloat_AsDouble(a) * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fa <= 0: @@ -13768,7 +13768,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand * if fa <= 0: * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_power, size, fa) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_96), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2722; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -13783,7 +13783,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand * if fa <= 0: * raise ValueError("a <= 0") * return cont1_array_sc(self.internal_state, rk_power, size, fa) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -13798,16 +13798,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand /* "mtrand.pyx":2725 * return cont1_array_sc(self.internal_state, rk_power, size, fa) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":2727 * PyErr_Clear() - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") @@ -13821,7 +13821,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand __pyx_t_3 = 0; /* "mtrand.pyx":2728 - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(oa, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("a <= 0") @@ -13869,7 +13869,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_power, size, oa) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_97), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2729; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -13884,7 +13884,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_66power(struct __pyx_obj_6mtrand * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") * return cont1_array(self.internal_state, rk_power, size, oa) # <<<<<<<<<<<<<< - * + * * def laplace(self, loc=0.0, scale=1.0, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -13931,7 +13931,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_69laplace(PyObject *__pyx_v_self /* "mtrand.pyx":2732 * return cont1_array(self.internal_state, rk_power, size, oa) - * + * * def laplace(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * laplace(loc=0.0, scale=1.0, size=None) @@ -14013,7 +14013,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra /* "mtrand.pyx":2808 * cdef double floc, fscale - * + * * floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if not PyErr_Occurred(): @@ -14021,7 +14021,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra __pyx_v_floc = PyFloat_AsDouble(__pyx_v_loc); /* "mtrand.pyx":2809 - * + * * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -14054,7 +14054,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_laplace, size, floc, fscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_100), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2812; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -14069,7 +14069,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra * if fscale <= 0: * raise ValueError("scale <= 0") * return cont2_array_sc(self.internal_state, rk_laplace, size, floc, fscale) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -14084,7 +14084,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra /* "mtrand.pyx":2815 * return cont2_array_sc(self.internal_state, rk_laplace, size, floc, fscale) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -14092,7 +14092,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra PyErr_Clear(); /* "mtrand.pyx":2816 - * + * * PyErr_Clear() * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -14166,7 +14166,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_101), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -14181,7 +14181,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_68laplace(struct __pyx_obj_6mtra * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") * return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale) # <<<<<<<<<<<<<< - * + * * def gumbel(self, loc=0.0, scale=1.0, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -14229,7 +14229,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_71gumbel(PyObject *__pyx_v_self, /* "mtrand.pyx":2822 * return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale) - * + * * def gumbel(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * gumbel(loc=0.0, scale=1.0, size=None) @@ -14311,7 +14311,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran /* "mtrand.pyx":2939 * cdef double floc, fscale - * + * * floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if not PyErr_Occurred(): @@ -14319,7 +14319,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran __pyx_v_floc = PyFloat_AsDouble(__pyx_v_loc); /* "mtrand.pyx":2940 - * + * * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -14352,7 +14352,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, fscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_104), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2943; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -14367,7 +14367,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran * if fscale <= 0: * raise ValueError("scale <= 0") * return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, fscale) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -14382,7 +14382,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran /* "mtrand.pyx":2946 * return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, fscale) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -14390,7 +14390,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran PyErr_Clear(); /* "mtrand.pyx":2947 - * + * * PyErr_Clear() * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -14464,7 +14464,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_105), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2950; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -14479,7 +14479,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_70gumbel(struct __pyx_obj_6mtran * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") * return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale) # <<<<<<<<<<<<<< - * + * * def logistic(self, loc=0.0, scale=1.0, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -14527,7 +14527,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_73logistic(PyObject *__pyx_v_sel /* "mtrand.pyx":2953 * return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale) - * + * * def logistic(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * logistic(loc=0.0, scale=1.0, size=None) @@ -14609,7 +14609,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr /* "mtrand.pyx":3027 * cdef double floc, fscale - * + * * floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if not PyErr_Occurred(): @@ -14617,7 +14617,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr __pyx_v_floc = PyFloat_AsDouble(__pyx_v_loc); /* "mtrand.pyx":3028 - * + * * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -14650,7 +14650,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_logistic, size, floc, fscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_108), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3031; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -14665,7 +14665,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr * if fscale <= 0: * raise ValueError("scale <= 0") * return cont2_array_sc(self.internal_state, rk_logistic, size, floc, fscale) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -14680,7 +14680,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr /* "mtrand.pyx":3034 * return cont2_array_sc(self.internal_state, rk_logistic, size, floc, fscale) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -14688,7 +14688,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr PyErr_Clear(); /* "mtrand.pyx":3035 - * + * * PyErr_Clear() * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -14762,7 +14762,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_109), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3038; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -14777,7 +14777,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_72logistic(struct __pyx_obj_6mtr * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") * return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale) # <<<<<<<<<<<<<< - * + * * def lognormal(self, mean=0.0, sigma=1.0, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -14825,7 +14825,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_75lognormal(PyObject *__pyx_v_se /* "mtrand.pyx":3041 * return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale) - * + * * def lognormal(self, mean=0.0, sigma=1.0, size=None): # <<<<<<<<<<<<<< * """ * lognormal(mean=0.0, sigma=1.0, size=None) @@ -14907,25 +14907,25 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt /* "mtrand.pyx":3146 * cdef double fmean, fsigma - * + * * fmean = PyFloat_AsDouble(mean) # <<<<<<<<<<<<<< * fsigma = PyFloat_AsDouble(sigma) - * + * */ __pyx_v_fmean = PyFloat_AsDouble(__pyx_v_mean); /* "mtrand.pyx":3147 - * + * * fmean = PyFloat_AsDouble(mean) * fsigma = PyFloat_AsDouble(sigma) # <<<<<<<<<<<<<< - * + * * if not PyErr_Occurred(): */ __pyx_v_fsigma = PyFloat_AsDouble(__pyx_v_sigma); /* "mtrand.pyx":3149 * fsigma = PyFloat_AsDouble(sigma) - * + * * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fsigma <= 0: * raise ValueError("sigma <= 0") @@ -14934,7 +14934,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt if (__pyx_t_1) { /* "mtrand.pyx":3150 - * + * * if not PyErr_Occurred(): * if fsigma <= 0: # <<<<<<<<<<<<<< * raise ValueError("sigma <= 0") @@ -14948,7 +14948,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt * if fsigma <= 0: * raise ValueError("sigma <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_lognormal, size, fmean, fsigma) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_113), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -14963,7 +14963,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt * if fsigma <= 0: * raise ValueError("sigma <= 0") * return cont2_array_sc(self.internal_state, rk_lognormal, size, fmean, fsigma) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -14978,16 +14978,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt /* "mtrand.pyx":3154 * return cont2_array_sc(self.internal_state, rk_lognormal, size, fmean, fsigma) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":3156 * PyErr_Clear() - * + * * omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(osigma, 0.0)): @@ -14999,7 +14999,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt __pyx_t_2 = 0; /* "mtrand.pyx":3157 - * + * * omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(osigma, 0.0)): @@ -15060,7 +15060,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt * if np.any(np.less_equal(osigma, 0.0)): * raise ValueError("sigma <= 0.0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_115), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -15075,7 +15075,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_74lognormal(struct __pyx_obj_6mt * if np.any(np.less_equal(osigma, 0.0)): * raise ValueError("sigma <= 0.0") * return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma) # <<<<<<<<<<<<<< - * + * * def rayleigh(self, scale=1.0, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -15121,7 +15121,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_77rayleigh(PyObject *__pyx_v_sel /* "mtrand.pyx":3162 * return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma) - * + * * def rayleigh(self, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * rayleigh(scale=1.0, size=None) @@ -15193,16 +15193,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr /* "mtrand.pyx":3220 * cdef double fscale - * + * * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< - * + * * if not PyErr_Occurred(): */ __pyx_v_fscale = PyFloat_AsDouble(__pyx_v_scale); /* "mtrand.pyx":3222 * fscale = PyFloat_AsDouble(scale) - * + * * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fscale <= 0: * raise ValueError("scale <= 0") @@ -15211,7 +15211,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr if (__pyx_t_1) { /* "mtrand.pyx":3223 - * + * * if not PyErr_Occurred(): * if fscale <= 0: # <<<<<<<<<<<<<< * raise ValueError("scale <= 0") @@ -15225,7 +15225,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_rayleigh, size, fscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_117), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -15240,7 +15240,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr * if fscale <= 0: * raise ValueError("scale <= 0") * return cont1_array_sc(self.internal_state, rk_rayleigh, size, fscale) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -15255,16 +15255,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr /* "mtrand.pyx":3227 * return cont1_array_sc(self.internal_state, rk_rayleigh, size, fscale) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":3229 * PyErr_Clear() - * + * * oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0.0") @@ -15278,7 +15278,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr __pyx_t_3 = 0; /* "mtrand.pyx":3230 - * + * * oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(oscale, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("scale <= 0.0") @@ -15326,7 +15326,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0.0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_rayleigh, size, oscale) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_119), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3231; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -15341,7 +15341,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_76rayleigh(struct __pyx_obj_6mtr * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0.0") * return cont1_array(self.internal_state, rk_rayleigh, size, oscale) # <<<<<<<<<<<<<< - * + * * def wald(self, mean, scale, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -15386,7 +15386,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_79wald(PyObject *__pyx_v_self, P /* "mtrand.pyx":3234 * return cont1_array(self.internal_state, rk_rayleigh, size, oscale) - * + * * def wald(self, mean, scale, size=None): # <<<<<<<<<<<<<< * """ * wald(mean, scale, size=None) @@ -15466,7 +15466,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_ /* "mtrand.pyx":3300 * cdef double fmean, fscale - * + * * fmean = PyFloat_AsDouble(mean) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if not PyErr_Occurred(): @@ -15474,7 +15474,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_ __pyx_v_fmean = PyFloat_AsDouble(__pyx_v_mean); /* "mtrand.pyx":3301 - * + * * fmean = PyFloat_AsDouble(mean) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -15533,7 +15533,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_ * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_wald, size, fmean, fscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_122), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3306; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -15548,7 +15548,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_ * if fscale <= 0: * raise ValueError("scale <= 0") * return cont2_array_sc(self.internal_state, rk_wald, size, fmean, fscale) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -15563,7 +15563,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_ /* "mtrand.pyx":3309 * return cont2_array_sc(self.internal_state, rk_wald, size, fmean, fscale) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< * omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -15571,7 +15571,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_ PyErr_Clear(); /* "mtrand.pyx":3310 - * + * * PyErr_Clear() * omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -15704,7 +15704,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_ * elif np.any(np.less_equal(oscale,0.0)): * raise ValueError("scale <= 0.0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_wald, size, omean, oscale) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_125), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3315; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -15719,8 +15719,8 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_78wald(struct __pyx_obj_6mtrand_ * elif np.any(np.less_equal(oscale,0.0)): * raise ValueError("scale <= 0.0") * return cont2_array(self.internal_state, rk_wald, size, omean, oscale) # <<<<<<<<<<<<<< - * - * + * + * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_wald, __pyx_v_size, __pyx_v_omean, __pyx_v_oscale); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3316; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -15765,8 +15765,8 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_81triangular(PyObject *__pyx_v_s PyObject* values[4] = {0,0,0,0}; /* "mtrand.pyx":3320 - * - * + * + * * def triangular(self, left, mode, right, size=None): # <<<<<<<<<<<<<< * """ * triangular(left, mode, right, size=None) @@ -15856,7 +15856,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m /* "mtrand.pyx":3380 * cdef double fleft, fmode, fright - * + * * fleft = PyFloat_AsDouble(left) # <<<<<<<<<<<<<< * fright = PyFloat_AsDouble(right) * fmode = PyFloat_AsDouble(mode) @@ -15864,7 +15864,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m __pyx_v_fleft = PyFloat_AsDouble(__pyx_v_left); /* "mtrand.pyx":3381 - * + * * fleft = PyFloat_AsDouble(left) * fright = PyFloat_AsDouble(right) # <<<<<<<<<<<<<< * fmode = PyFloat_AsDouble(mode) @@ -15974,7 +15974,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m * raise ValueError("left == right") * return cont3_array_sc(self.internal_state, rk_triangular, size, fleft, # <<<<<<<<<<<<<< * fmode, fright) - * + * */ __Pyx_XDECREF(__pyx_r); @@ -15982,7 +15982,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m * raise ValueError("left == right") * return cont3_array_sc(self.internal_state, rk_triangular, size, fleft, * fmode, fright) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __pyx_t_2 = __pyx_f_6mtrand_cont3_array_sc(__pyx_v_self->internal_state, rk_triangular, __pyx_v_size, __pyx_v_fleft, __pyx_v_fmode, __pyx_v_fright); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3390; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -15996,7 +15996,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m /* "mtrand.pyx":3393 * fmode, fright) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< * oleft = <ndarray>PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * omode = <ndarray>PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -16004,7 +16004,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m PyErr_Clear(); /* "mtrand.pyx":3394 - * + * * PyErr_Clear() * oleft = <ndarray>PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * omode = <ndarray>PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED) @@ -16023,7 +16023,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m * oleft = <ndarray>PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * omode = <ndarray>PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oright = <ndarray>PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - * + * */ __pyx_t_3 = PyArray_FROM_OTF(__pyx_v_mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -16037,7 +16037,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m * oleft = <ndarray>PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * omode = <ndarray>PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oright = <ndarray>PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< - * + * * if np.any(np.greater(oleft, omode)): */ __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_right, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3396; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -16050,7 +16050,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m /* "mtrand.pyx":3398 * oright = <ndarray>PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED) - * + * * if np.any(np.greater(oleft, omode)): # <<<<<<<<<<<<<< * raise ValueError("left > mode") * if np.any(np.greater(omode, oright)): @@ -16091,7 +16091,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m if (__pyx_t_1) { /* "mtrand.pyx":3399 - * + * * if np.any(np.greater(oleft, omode)): * raise ValueError("left > mode") # <<<<<<<<<<<<<< * if np.any(np.greater(omode, oright)): @@ -16227,7 +16227,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m * raise ValueError("left == right") * return cont3_array(self.internal_state, rk_triangular, size, oleft, # <<<<<<<<<<<<<< * omode, oright) - * + * */ __Pyx_XDECREF(__pyx_r); @@ -16235,7 +16235,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_80triangular(struct __pyx_obj_6m * raise ValueError("left == right") * return cont3_array(self.internal_state, rk_triangular, size, oleft, * omode, oright) # <<<<<<<<<<<<<< - * + * * # Complicated, discrete distributions: */ __pyx_t_2 = __pyx_f_6mtrand_cont3_array(__pyx_v_self->internal_state, rk_triangular, __pyx_v_size, __pyx_v_oleft, __pyx_v_omode, __pyx_v_oright); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3404; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -16280,7 +16280,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_83binomial(PyObject *__pyx_v_sel PyObject* values[3] = {0,0,0}; /* "mtrand.pyx":3408 - * + * * # Complicated, discrete distributions: * def binomial(self, n, p, size=None): # <<<<<<<<<<<<<< * """ @@ -16361,7 +16361,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr /* "mtrand.pyx":3493 * cdef double fp - * + * * fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<< * ln = PyInt_AsLong(n) * if not PyErr_Occurred(): @@ -16369,7 +16369,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr __pyx_v_fp = PyFloat_AsDouble(__pyx_v_p); /* "mtrand.pyx":3494 - * + * * fp = PyFloat_AsDouble(p) * ln = PyInt_AsLong(n) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -16453,7 +16453,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr * elif fp > 1: * raise ValueError("p > 1") # <<<<<<<<<<<<<< * return discnp_array_sc(self.internal_state, rk_binomial, size, ln, fp) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_140), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3501; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -16468,7 +16468,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr * elif fp > 1: * raise ValueError("p > 1") * return discnp_array_sc(self.internal_state, rk_binomial, size, ln, fp) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -16483,16 +16483,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr /* "mtrand.pyx":3504 * return discnp_array_sc(self.internal_state, rk_binomial, size, ln, fp) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * on = <ndarray>PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":3506 * PyErr_Clear() - * + * * on = <ndarray>PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less(n, 0)): @@ -16506,7 +16506,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr __pyx_t_3 = 0; /* "mtrand.pyx":3507 - * + * * on = <ndarray>PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED) * op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less(n, 0)): @@ -16683,7 +16683,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") # <<<<<<<<<<<<<< * return discnp_array(self.internal_state, rk_binomial, size, on, op) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_143), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3513; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -16698,7 +16698,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_82binomial(struct __pyx_obj_6mtr * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") * return discnp_array(self.internal_state, rk_binomial, size, on, op) # <<<<<<<<<<<<<< - * + * * def negative_binomial(self, n, p, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -16744,7 +16744,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_85negative_binomial(PyObject *__ /* "mtrand.pyx":3516 * return discnp_array(self.internal_state, rk_binomial, size, on, op) - * + * * def negative_binomial(self, n, p, size=None): # <<<<<<<<<<<<<< * """ * negative_binomial(n, p, size=None) @@ -16824,7 +16824,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx /* "mtrand.pyx":3586 * cdef double fp - * + * * fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<< * fn = PyFloat_AsDouble(n) * if not PyErr_Occurred(): @@ -16832,7 +16832,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx __pyx_v_fp = PyFloat_AsDouble(__pyx_v_p); /* "mtrand.pyx":3587 - * + * * fp = PyFloat_AsDouble(p) * fn = PyFloat_AsDouble(n) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): @@ -16932,7 +16932,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx * raise ValueError("p > 1") * return discdd_array_sc(self.internal_state, rk_negative_binomial, # <<<<<<<<<<<<<< * size, fn, fp) - * + * */ __Pyx_XDECREF(__pyx_r); @@ -16940,7 +16940,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx * raise ValueError("p > 1") * return discdd_array_sc(self.internal_state, rk_negative_binomial, * size, fn, fp) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __pyx_t_2 = __pyx_f_6mtrand_discdd_array_sc(__pyx_v_self->internal_state, rk_negative_binomial, __pyx_v_size, __pyx_v_fn, __pyx_v_fp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -16954,16 +16954,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx /* "mtrand.pyx":3598 * size, fn, fp) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * on = <ndarray>PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":3600 * PyErr_Clear() - * + * * on = <ndarray>PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(n, 0)): @@ -16977,7 +16977,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx __pyx_t_3 = 0; /* "mtrand.pyx":3601 - * + * * on = <ndarray>PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(n, 0)): @@ -17170,7 +17170,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx * raise ValueError("p > 1") * return discdd_array(self.internal_state, rk_negative_binomial, size, # <<<<<<<<<<<<<< * on, op) - * + * */ __Pyx_XDECREF(__pyx_r); @@ -17178,7 +17178,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_84negative_binomial(struct __pyx * raise ValueError("p > 1") * return discdd_array(self.internal_state, rk_negative_binomial, size, * on, op) # <<<<<<<<<<<<<< - * + * * def poisson(self, lam=1.0, size=None): */ __pyx_t_3 = __pyx_f_6mtrand_discdd_array(__pyx_v_self->internal_state, rk_negative_binomial, __pyx_v_size, __pyx_v_on, __pyx_v_op); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3608; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -17223,7 +17223,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_87poisson(PyObject *__pyx_v_self /* "mtrand.pyx":3611 * on, op) - * + * * def poisson(self, lam=1.0, size=None): # <<<<<<<<<<<<<< * """ * poisson(lam=1.0, size=None) @@ -17360,7 +17360,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra * if lam > self.poisson_lam_max: * raise ValueError("lam value too large") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_poisson, size, flam) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_155), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3670; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -17375,7 +17375,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra * if lam > self.poisson_lam_max: * raise ValueError("lam value too large") * return discd_array_sc(self.internal_state, rk_poisson, size, flam) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -17390,16 +17390,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra /* "mtrand.pyx":3673 * return discd_array_sc(self.internal_state, rk_poisson, size, flam) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * olam = <ndarray>PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":3675 * PyErr_Clear() - * + * * olam = <ndarray>PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less(olam, 0)): * raise ValueError("lam < 0") @@ -17413,7 +17413,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra __pyx_t_2 = 0; /* "mtrand.pyx":3676 - * + * * olam = <ndarray>PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less(olam, 0)): # <<<<<<<<<<<<<< * raise ValueError("lam < 0") @@ -17519,7 +17519,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra * if np.any(np.greater(olam, self.poisson_lam_max)): * raise ValueError("lam value too large.") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_poisson, size, olam) - * + * */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_158), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3679; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); @@ -17534,7 +17534,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_86poisson(struct __pyx_obj_6mtra * if np.any(np.greater(olam, self.poisson_lam_max)): * raise ValueError("lam value too large.") * return discd_array(self.internal_state, rk_poisson, size, olam) # <<<<<<<<<<<<<< - * + * * def zipf(self, a, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -17578,7 +17578,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_89zipf(PyObject *__pyx_v_self, P /* "mtrand.pyx":3682 * return discd_array(self.internal_state, rk_poisson, size, olam) - * + * * def zipf(self, a, size=None): # <<<<<<<<<<<<<< * """ * zipf(a, size=None) @@ -17648,7 +17648,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_ /* "mtrand.pyx":3757 * cdef double fa - * + * * fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): * if fa <= 1.0: @@ -17656,7 +17656,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_ __pyx_v_fa = PyFloat_AsDouble(__pyx_v_a); /* "mtrand.pyx":3758 - * + * * fa = PyFloat_AsDouble(a) * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fa <= 1.0: @@ -17680,7 +17680,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_ * if fa <= 1.0: * raise ValueError("a <= 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_zipf, size, fa) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_160), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3760; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -17695,7 +17695,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_ * if fa <= 1.0: * raise ValueError("a <= 1.0") * return discd_array_sc(self.internal_state, rk_zipf, size, fa) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -17710,16 +17710,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_ /* "mtrand.pyx":3763 * return discd_array_sc(self.internal_state, rk_zipf, size, fa) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":3765 * PyErr_Clear() - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(oa, 1.0)): * raise ValueError("a <= 1.0") @@ -17733,7 +17733,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_ __pyx_t_3 = 0; /* "mtrand.pyx":3766 - * + * * oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(oa, 1.0)): # <<<<<<<<<<<<<< * raise ValueError("a <= 1.0") @@ -17781,7 +17781,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_ * if np.any(np.less_equal(oa, 1.0)): * raise ValueError("a <= 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_zipf, size, oa) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_161), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3767; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -17796,7 +17796,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_88zipf(struct __pyx_obj_6mtrand_ * if np.any(np.less_equal(oa, 1.0)): * raise ValueError("a <= 1.0") * return discd_array(self.internal_state, rk_zipf, size, oa) # <<<<<<<<<<<<<< - * + * * def geometric(self, p, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -17840,7 +17840,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_91geometric(PyObject *__pyx_v_se /* "mtrand.pyx":3770 * return discd_array(self.internal_state, rk_zipf, size, oa) - * + * * def geometric(self, p, size=None): # <<<<<<<<<<<<<< * """ * geometric(p, size=None) @@ -17910,7 +17910,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt /* "mtrand.pyx":3818 * cdef double fp - * + * * fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): * if fp < 0.0: @@ -17918,7 +17918,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt __pyx_v_fp = PyFloat_AsDouble(__pyx_v_p); /* "mtrand.pyx":3819 - * + * * fp = PyFloat_AsDouble(p) * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fp < 0.0: @@ -17968,7 +17968,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt * if fp > 1.0: * raise ValueError("p > 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_geometric, size, fp) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_165), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -17983,7 +17983,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt * if fp > 1.0: * raise ValueError("p > 1.0") * return discd_array_sc(self.internal_state, rk_geometric, size, fp) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -17998,16 +17998,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt /* "mtrand.pyx":3826 * return discd_array_sc(self.internal_state, rk_geometric, size, fp) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * - * + * + * */ PyErr_Clear(); /* "mtrand.pyx":3829 - * - * + * + * * op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less(op, 0.0)): * raise ValueError("p < 0.0") @@ -18021,7 +18021,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt __pyx_t_3 = 0; /* "mtrand.pyx":3830 - * + * * op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less(op, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("p < 0.0") @@ -18129,7 +18129,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt * if np.any(np.greater(op, 1.0)): * raise ValueError("p > 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_geometric, size, op) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_167), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -18144,7 +18144,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_90geometric(struct __pyx_obj_6mt * if np.any(np.greater(op, 1.0)): * raise ValueError("p > 1.0") * return discd_array(self.internal_state, rk_geometric, size, op) # <<<<<<<<<<<<<< - * + * * def hypergeometric(self, ngood, nbad, nsample, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -18190,7 +18190,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_93hypergeometric(PyObject *__pyx /* "mtrand.pyx":3836 * return discd_array(self.internal_state, rk_geometric, size, op) - * + * * def hypergeometric(self, ngood, nbad, nsample, size=None): # <<<<<<<<<<<<<< * """ * hypergeometric(ngood, nbad, nsample, size=None) @@ -18281,7 +18281,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob /* "mtrand.pyx":3924 * cdef long lngood, lnbad, lnsample - * + * * lngood = PyInt_AsLong(ngood) # <<<<<<<<<<<<<< * lnbad = PyInt_AsLong(nbad) * lnsample = PyInt_AsLong(nsample) @@ -18289,7 +18289,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob __pyx_v_lngood = PyInt_AsLong(__pyx_v_ngood); /* "mtrand.pyx":3925 - * + * * lngood = PyInt_AsLong(ngood) * lnbad = PyInt_AsLong(nbad) # <<<<<<<<<<<<<< * lnsample = PyInt_AsLong(nsample) @@ -18425,7 +18425,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob * raise ValueError("ngood + nbad < nsample") * return discnmN_array_sc(self.internal_state, rk_hypergeometric, size, # <<<<<<<<<<<<<< * lngood, lnbad, lnsample) - * + * */ __Pyx_XDECREF(__pyx_r); @@ -18433,7 +18433,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob * raise ValueError("ngood + nbad < nsample") * return discnmN_array_sc(self.internal_state, rk_hypergeometric, size, * lngood, lnbad, lnsample) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __pyx_t_2 = __pyx_f_6mtrand_discnmN_array_sc(__pyx_v_self->internal_state, rk_hypergeometric, __pyx_v_size, __pyx_v_lngood, __pyx_v_lnbad, __pyx_v_lnsample); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3936; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -18447,16 +18447,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob /* "mtrand.pyx":3939 * lngood, lnbad, lnsample) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * ongood = <ndarray>PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":3941 * PyErr_Clear() - * + * * ongood = <ndarray>PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * onbad = <ndarray>PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED) * onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED) @@ -18470,7 +18470,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob __pyx_t_3 = 0; /* "mtrand.pyx":3942 - * + * * ongood = <ndarray>PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED) * onbad = <ndarray>PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED) @@ -18753,7 +18753,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob * raise ValueError("ngood + nbad < nsample") * return discnmN_array(self.internal_state, rk_hypergeometric, size, # <<<<<<<<<<<<<< * ongood, onbad, onsample) - * + * */ __Pyx_XDECREF(__pyx_r); @@ -18761,7 +18761,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_92hypergeometric(struct __pyx_ob * raise ValueError("ngood + nbad < nsample") * return discnmN_array(self.internal_state, rk_hypergeometric, size, * ongood, onbad, onsample) # <<<<<<<<<<<<<< - * + * * def logseries(self, p, size=None): */ __pyx_t_6 = __pyx_f_6mtrand_discnmN_array(__pyx_v_self->internal_state, rk_hypergeometric, __pyx_v_size, __pyx_v_ongood, __pyx_v_onbad, __pyx_v_onsample); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3952; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -18807,7 +18807,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_95logseries(PyObject *__pyx_v_se /* "mtrand.pyx":3955 * ongood, onbad, onsample) - * + * * def logseries(self, p, size=None): # <<<<<<<<<<<<<< * """ * logseries(p, size=None) @@ -18877,7 +18877,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt /* "mtrand.pyx":4032 * cdef double fp - * + * * fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<< * if not PyErr_Occurred(): * if fp <= 0.0: @@ -18885,7 +18885,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt __pyx_v_fp = PyFloat_AsDouble(__pyx_v_p); /* "mtrand.pyx":4033 - * + * * fp = PyFloat_AsDouble(p) * if not PyErr_Occurred(): # <<<<<<<<<<<<<< * if fp <= 0.0: @@ -18935,7 +18935,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt * if fp >= 1.0: * raise ValueError("p >= 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_logseries, size, fp) - * + * */ __pyx_t_2 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_183), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4037; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -18950,7 +18950,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt * if fp >= 1.0: * raise ValueError("p >= 1.0") * return discd_array_sc(self.internal_state, rk_logseries, size, fp) # <<<<<<<<<<<<<< - * + * * PyErr_Clear() */ __Pyx_XDECREF(__pyx_r); @@ -18965,16 +18965,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt /* "mtrand.pyx":4040 * return discd_array_sc(self.internal_state, rk_logseries, size, fp) - * + * * PyErr_Clear() # <<<<<<<<<<<<<< - * + * * op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ PyErr_Clear(); /* "mtrand.pyx":4042 * PyErr_Clear() - * + * * op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * if np.any(np.less_equal(op, 0.0)): * raise ValueError("p <= 0.0") @@ -18988,7 +18988,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt __pyx_t_3 = 0; /* "mtrand.pyx":4043 - * + * * op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * if np.any(np.less_equal(op, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("p <= 0.0") @@ -19096,7 +19096,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt * if np.any(np.greater_equal(op, 1.0)): * raise ValueError("p >= 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_logseries, size, op) - * + * */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_185), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4046; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); @@ -19111,7 +19111,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_94logseries(struct __pyx_obj_6mt * if np.any(np.greater_equal(op, 1.0)): * raise ValueError("p >= 1.0") * return discd_array(self.internal_state, rk_logseries, size, op) # <<<<<<<<<<<<<< - * + * * # Multivariate distributions: */ __Pyx_XDECREF(__pyx_r); @@ -19155,7 +19155,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_97multivariate_normal(PyObject * PyObject* values[3] = {0,0,0}; /* "mtrand.pyx":4050 - * + * * # Multivariate distributions: * def multivariate_normal(self, mean, cov, size=None): # <<<<<<<<<<<<<< * """ @@ -19632,7 +19632,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p /* "mtrand.pyx":4172 * # decomposition of cov is such an A. - * + * * from numpy.dual import svd # <<<<<<<<<<<<<< * # XXX: we really should be doing this by Cholesky decomposition * (u,s,v) = svd(cov) @@ -19681,13 +19681,13 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p } #if CYTHON_COMPILING_IN_CPYTHON if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_9 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_1 = PyTuple_GET_ITEM(sequence, 1); - __pyx_t_2 = PyTuple_GET_ITEM(sequence, 2); + __pyx_t_9 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_1 = PyTuple_GET_ITEM(sequence, 1); + __pyx_t_2 = PyTuple_GET_ITEM(sequence, 2); } else { - __pyx_t_9 = PyList_GET_ITEM(sequence, 0); - __pyx_t_1 = PyList_GET_ITEM(sequence, 1); - __pyx_t_2 = PyList_GET_ITEM(sequence, 2); + __pyx_t_9 = PyList_GET_ITEM(sequence, 0); + __pyx_t_1 = PyList_GET_ITEM(sequence, 1); + __pyx_t_2 = PyList_GET_ITEM(sequence, 2); } __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(__pyx_t_1); @@ -19811,7 +19811,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p * np.add(mean,x,x) * x.shape = tuple(final_shape) # <<<<<<<<<<<<<< * return x - * + * */ __pyx_t_2 = ((PyObject *)PyList_AsTuple(__pyx_v_final_shape)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4179; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); @@ -19822,7 +19822,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_96multivariate_normal(struct __p * np.add(mean,x,x) * x.shape = tuple(final_shape) * return x # <<<<<<<<<<<<<< - * + * * def multinomial(self, npy_intp n, object pvals, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -19874,7 +19874,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_99multinomial(PyObject *__pyx_v_ /* "mtrand.pyx":4182 * return x - * + * * def multinomial(self, npy_intp n, object pvals, size=None): # <<<<<<<<<<<<<< * """ * multinomial(n, pvals, size=None) @@ -19962,7 +19962,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 /* "mtrand.pyx":4241 * cdef double Sum - * + * * d = len(pvals) # <<<<<<<<<<<<<< * parr = <ndarray>PyArray_ContiguousFromObject(pvals, NPY_DOUBLE, 1, 1) * pix = <double*>PyArray_DATA(parr) @@ -19971,11 +19971,11 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 __pyx_v_d = __pyx_t_1; /* "mtrand.pyx":4242 - * + * * d = len(pvals) * parr = <ndarray>PyArray_ContiguousFromObject(pvals, NPY_DOUBLE, 1, 1) # <<<<<<<<<<<<<< * pix = <double*>PyArray_DATA(parr) - * + * */ __pyx_t_2 = PyArray_ContiguousFromObject(__pyx_v_pvals, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4242; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -19989,26 +19989,26 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 * d = len(pvals) * parr = <ndarray>PyArray_ContiguousFromObject(pvals, NPY_DOUBLE, 1, 1) * pix = <double*>PyArray_DATA(parr) # <<<<<<<<<<<<<< - * + * * if kahan_sum(pix, d-1) > (1.0 + 1e-12): */ __pyx_v_pix = ((double *)PyArray_DATA(arrayObject_parr)); /* "mtrand.pyx":4245 * pix = <double*>PyArray_DATA(parr) - * + * * if kahan_sum(pix, d-1) > (1.0 + 1e-12): # <<<<<<<<<<<<<< * raise ValueError("sum(pvals[:-1]) > 1.0") - * + * */ __pyx_t_4 = (__pyx_f_6mtrand_kahan_sum(__pyx_v_pix, (__pyx_v_d - 1)) > (1.0 + 1e-12)); if (__pyx_t_4) { /* "mtrand.pyx":4246 - * + * * if kahan_sum(pix, d-1) > (1.0 + 1e-12): * raise ValueError("sum(pvals[:-1]) > 1.0") # <<<<<<<<<<<<<< - * + * * if size is None: */ __pyx_t_3 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_195), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4246; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -20022,7 +20022,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 /* "mtrand.pyx":4248 * raise ValueError("sum(pvals[:-1]) > 1.0") - * + * * if size is None: # <<<<<<<<<<<<<< * shape = (d,) * elif type(size) is int: @@ -20031,7 +20031,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 if (__pyx_t_4) { /* "mtrand.pyx":4249 - * + * * if size is None: * shape = (d,) # <<<<<<<<<<<<<< * elif type(size) is int: @@ -20086,7 +20086,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 * shape = (size, d) * else: * shape = size + (d,) # <<<<<<<<<<<<<< - * + * * multin = np.zeros(shape, int) */ __pyx_t_3 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_d); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -20106,7 +20106,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 /* "mtrand.pyx":4255 * shape = size + (d,) - * + * * multin = np.zeros(shape, int) # <<<<<<<<<<<<<< * mnarr = <ndarray>multin * mnix = <long*>PyArray_DATA(mnarr) @@ -20132,7 +20132,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 __pyx_t_5 = 0; /* "mtrand.pyx":4256 - * + * * multin = np.zeros(shape, int) * mnarr = <ndarray>multin # <<<<<<<<<<<<<< * mnix = <long*>PyArray_DATA(mnarr) @@ -20264,7 +20264,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 * Sum = Sum - pix[j] * if dn > 0: # <<<<<<<<<<<<<< * mnix[i+d-1] = dn - * + * */ __pyx_t_4 = (__pyx_v_dn > 0); if (__pyx_t_4) { @@ -20273,7 +20273,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 * Sum = Sum - pix[j] * if dn > 0: * mnix[i+d-1] = dn # <<<<<<<<<<<<<< - * + * * i = i + d */ (__pyx_v_mnix[((__pyx_v_i + __pyx_v_d) - 1)]) = __pyx_v_dn; @@ -20283,9 +20283,9 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 /* "mtrand.pyx":4271 * mnix[i+d-1] = dn - * + * * i = i + d # <<<<<<<<<<<<<< - * + * * return multin */ __pyx_v_i = (__pyx_v_i + __pyx_v_d); @@ -20293,9 +20293,9 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_98multinomial(struct __pyx_obj_6 /* "mtrand.pyx":4273 * i = i + d - * + * * return multin # <<<<<<<<<<<<<< - * + * * def dirichlet(self, object alpha, size=None): */ __Pyx_XDECREF(__pyx_r); @@ -20339,7 +20339,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_101dirichlet(PyObject *__pyx_v_s /* "mtrand.pyx":4275 * return multin - * + * * def dirichlet(self, object alpha, size=None): # <<<<<<<<<<<<<< * """ * dirichlet(alpha, size=None) @@ -20420,7 +20420,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m /* "mtrand.pyx":4361 * cdef double acc, invacc - * + * * k = len(alpha) # <<<<<<<<<<<<<< * alpha_arr = <ndarray>PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) * alpha_data = <double*>PyArray_DATA(alpha_arr) @@ -20429,11 +20429,11 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m __pyx_v_k = __pyx_t_1; /* "mtrand.pyx":4362 - * + * * k = len(alpha) * alpha_arr = <ndarray>PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) # <<<<<<<<<<<<<< * alpha_data = <double*>PyArray_DATA(alpha_arr) - * + * */ __pyx_t_2 = PyArray_ContiguousFromObject(__pyx_v_alpha, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4362; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -20447,14 +20447,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m * k = len(alpha) * alpha_arr = <ndarray>PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) * alpha_data = <double*>PyArray_DATA(alpha_arr) # <<<<<<<<<<<<<< - * + * * if size is None: */ __pyx_v_alpha_data = ((double *)PyArray_DATA(__pyx_v_alpha_arr)); /* "mtrand.pyx":4365 * alpha_data = <double*>PyArray_DATA(alpha_arr) - * + * * if size is None: # <<<<<<<<<<<<<< * shape = (k,) * elif type(size) is int: @@ -20463,7 +20463,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m if (__pyx_t_4) { /* "mtrand.pyx":4366 - * + * * if size is None: * shape = (k,) # <<<<<<<<<<<<<< * elif type(size) is int: @@ -20518,7 +20518,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m * shape = (size, k) * else: * shape = size + (k,) # <<<<<<<<<<<<<< - * + * * diric = np.zeros(shape, np.float64) */ __pyx_t_3 = __Pyx_PyInt_to_py_npy_intp(__pyx_v_k); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -20538,7 +20538,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m /* "mtrand.pyx":4372 * shape = size + (k,) - * + * * diric = np.zeros(shape, np.float64) # <<<<<<<<<<<<<< * val_arr = <ndarray>diric * val_data= <double*>PyArray_DATA(val_arr) @@ -20569,11 +20569,11 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m __pyx_t_5 = 0; /* "mtrand.pyx":4373 - * + * * diric = np.zeros(shape, np.float64) * val_arr = <ndarray>diric # <<<<<<<<<<<<<< * val_data= <double*>PyArray_DATA(val_arr) - * + * */ __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_diric))); __pyx_v_val_arr = ((PyArrayObject *)__pyx_v_diric); @@ -20582,14 +20582,14 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m * diric = np.zeros(shape, np.float64) * val_arr = <ndarray>diric * val_data= <double*>PyArray_DATA(val_arr) # <<<<<<<<<<<<<< - * + * * i = 0 */ __pyx_v_val_data = ((double *)PyArray_DATA(__pyx_v_val_arr)); /* "mtrand.pyx":4376 * val_data= <double*>PyArray_DATA(val_arr) - * + * * i = 0 # <<<<<<<<<<<<<< * totsize = PyArray_SIZE(val_arr) * while i < totsize: @@ -20597,7 +20597,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m __pyx_v_i = 0; /* "mtrand.pyx":4377 - * + * * i = 0 * totsize = PyArray_SIZE(val_arr) # <<<<<<<<<<<<<< * while i < totsize: @@ -20688,7 +20688,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m * for j from 0 <= j < k: * val_data[i+j] = val_data[i+j] * invacc # <<<<<<<<<<<<<< * i = i + k - * + * */ (__pyx_v_val_data[(__pyx_v_i + __pyx_v_j)]) = ((__pyx_v_val_data[(__pyx_v_i + __pyx_v_j)]) * __pyx_v_invacc); } @@ -20697,7 +20697,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m * for j from 0 <= j < k: * val_data[i+j] = val_data[i+j] * invacc * i = i + k # <<<<<<<<<<<<<< - * + * * return diric */ __pyx_v_i = (__pyx_v_i + __pyx_v_k); @@ -20705,9 +20705,9 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_100dirichlet(struct __pyx_obj_6m /* "mtrand.pyx":4388 * i = i + k - * + * * return diric # <<<<<<<<<<<<<< - * + * * # Shuffling and permutations: */ __Pyx_XDECREF(__pyx_r); @@ -20746,7 +20746,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_103shuffle(PyObject *__pyx_v_sel } /* "mtrand.pyx":4391 - * + * * # Shuffling and permutations: * def shuffle(self, object x): # <<<<<<<<<<<<<< * """ @@ -20774,16 +20774,16 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtr /* "mtrand.pyx":4426 * cdef npy_intp i, j - * + * * i = len(x) - 1 # <<<<<<<<<<<<<< - * + * * # Logic adapted from random.shuffle() */ __pyx_t_1 = PyObject_Length(__pyx_v_x); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4426; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_i = (__pyx_t_1 - 1); /* "mtrand.pyx":4429 - * + * * # Logic adapted from random.shuffle() * if isinstance(x, np.ndarray) and x.ndim > 1: # <<<<<<<<<<<<<< * # For a multi-dimensional ndarray, indexing returns a view onto @@ -20936,7 +20936,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtr * j = rk_interval(i, self.internal_state) * x[i], x[j] = x[j], x[i] # <<<<<<<<<<<<<< * i = i - 1 - * + * */ __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_j, sizeof(npy_intp), __Pyx_PyInt_to_py_npy_intp, 0, 1, 1); if (!__pyx_t_8) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4446; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); @@ -20951,7 +20951,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_102shuffle(struct __pyx_obj_6mtr * j = rk_interval(i, self.internal_state) * x[i], x[j] = x[j], x[i] * i = i - 1 # <<<<<<<<<<<<<< - * + * * def permutation(self, object x): */ __pyx_v_i = (__pyx_v_i - 1); @@ -20989,7 +20989,7 @@ static PyObject *__pyx_pw_6mtrand_11RandomState_105permutation(PyObject *__pyx_v /* "mtrand.pyx":4449 * i = i - 1 - * + * * def permutation(self, object x): # <<<<<<<<<<<<<< * """ * permutation(x) @@ -21009,7 +21009,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_104permutation(struct __pyx_obj_ __Pyx_RefNannySetupContext("permutation", 0); /* "mtrand.pyx":4485 - * + * * """ * if isinstance(x, (int, long, np.integer)): # <<<<<<<<<<<<<< * arr = np.arange(x) @@ -21093,7 +21093,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_104permutation(struct __pyx_obj_ * arr = np.array(x) * self.shuffle(arr) # <<<<<<<<<<<<<< * return arr - * + * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s__shuffle); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4489; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); @@ -21112,7 +21112,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_104permutation(struct __pyx_obj_ * arr = np.array(x) * self.shuffle(arr) * return arr # <<<<<<<<<<<<<< - * + * * _rand = RandomState() */ __Pyx_XDECREF(__pyx_r); @@ -21799,10 +21799,10 @@ static int __Pyx_InitCachedConstants(void) { __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14)); /* "mtrand.pyx":877 - * + * * if lo >= hi : * raise ValueError("low >= high") # <<<<<<<<<<<<<< - * + * * diff = <unsigned long>hi - <unsigned long>lo - 1UL */ __pyx_k_tuple_16 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_15)); if (unlikely(!__pyx_k_tuple_16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 877; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -21846,7 +21846,7 @@ static int __Pyx_InitCachedConstants(void) { * pop_size = a.shape[0] * if pop_size is 0: * raise ValueError("a must be non-empty") # <<<<<<<<<<<<<< - * + * * if None != p: */ __pyx_k_tuple_25 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_24)); if (unlikely(!__pyx_k_tuple_25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1012; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -21890,7 +21890,7 @@ static int __Pyx_InitCachedConstants(void) { * raise ValueError("probabilities are not non-negative") * if not np.allclose(p.sum(), 1): * raise ValueError("probabilities do not sum to 1") # <<<<<<<<<<<<<< - * + * * shape = size */ __pyx_k_tuple_33 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_32)); if (unlikely(!__pyx_k_tuple_33)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1023; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -21902,7 +21902,7 @@ static int __Pyx_InitCachedConstants(void) { * if size > pop_size: * raise ValueError("Cannot take a larger sample than " # <<<<<<<<<<<<<< * "population when 'replace=False'") - * + * */ __pyx_k_tuple_35 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_34)); if (unlikely(!__pyx_k_tuple_35)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1043; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_35); @@ -21923,7 +21923,7 @@ static int __Pyx_InitCachedConstants(void) { * if shape is None and isinstance(idx, np.ndarray): * # In most cases a scalar will have been made an array * idx = idx.item(0) # <<<<<<<<<<<<<< - * + * * #Use samples as indices for a if a is array-like */ __pyx_k_tuple_38 = PyTuple_Pack(1, __pyx_int_0); if (unlikely(!__pyx_k_tuple_38)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1073; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -21946,7 +21946,7 @@ static int __Pyx_InitCachedConstants(void) { * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_normal, size, floc, fscale) - * + * */ __pyx_k_tuple_45 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_45)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1479; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_45); @@ -21957,7 +21957,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oscale, 0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_normal, size, oloc, oscale) - * + * */ __pyx_k_tuple_46 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_46)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1487; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_46); @@ -21979,7 +21979,7 @@ static int __Pyx_InitCachedConstants(void) { * if fb <= 0: * raise ValueError("b <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb) - * + * */ __pyx_k_tuple_50 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_49)); if (unlikely(!__pyx_k_tuple_50)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1536; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_50); @@ -22001,7 +22001,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(ob, 0)): * raise ValueError("b <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_beta, size, oa, ob) - * + * */ __pyx_k_tuple_52 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_49)); if (unlikely(!__pyx_k_tuple_52)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1546; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_52); @@ -22012,7 +22012,7 @@ static int __Pyx_InitCachedConstants(void) { * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_exponential, size, fscale) - * + * */ __pyx_k_tuple_54 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_54)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1593; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_54); @@ -22023,7 +22023,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_exponential, size, oscale) - * + * */ __pyx_k_tuple_55 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_55)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_55); @@ -22034,7 +22034,7 @@ static int __Pyx_InitCachedConstants(void) { * if fshape <= 0: * raise ValueError("shape <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_standard_gamma, size, fshape) - * + * */ __pyx_k_tuple_57 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_56)); if (unlikely(!__pyx_k_tuple_57)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1704; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_57); @@ -22045,7 +22045,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oshape, 0.0)): * raise ValueError("shape <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_standard_gamma, size, oshape) - * + * */ __pyx_k_tuple_58 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_56)); if (unlikely(!__pyx_k_tuple_58)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1710; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_58); @@ -22067,7 +22067,7 @@ static int __Pyx_InitCachedConstants(void) { * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, fscale) - * + * */ __pyx_k_tuple_61 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_61)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_61); @@ -22089,7 +22089,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale) - * + * */ __pyx_k_tuple_63 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_63)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1801; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_63); @@ -22111,7 +22111,7 @@ static int __Pyx_InitCachedConstants(void) { * if fdfden <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, fdfden) - * + * */ __pyx_k_tuple_65 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_65)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1893; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_65); @@ -22133,7 +22133,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_f, size, odfnum, odfden) - * + * */ __pyx_k_tuple_69 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_68)); if (unlikely(!__pyx_k_tuple_69)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1903; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_69); @@ -22173,7 +22173,7 @@ static int __Pyx_InitCachedConstants(void) { __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_74)); /* "mtrand.pyx":1993 - * + * * if np.any(np.less_equal(odfnum, 1.0)): * raise ValueError("dfnum <= 1") # <<<<<<<<<<<<<< * if np.any(np.less_equal(odfden, 0.0)): @@ -22210,7 +22210,7 @@ static int __Pyx_InitCachedConstants(void) { * if fdf <= 0: * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf) - * + * */ __pyx_k_tuple_79 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_78)); if (unlikely(!__pyx_k_tuple_79)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2069; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_79); @@ -22221,7 +22221,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_chisquare, size, odf) - * + * */ __pyx_k_tuple_80 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_78)); if (unlikely(!__pyx_k_tuple_80)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2076; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_80); @@ -22276,7 +22276,7 @@ static int __Pyx_InitCachedConstants(void) { * if fdf <= 0: * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_standard_t, size, fdf) - * + * */ __pyx_k_tuple_87 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_78)); if (unlikely(!__pyx_k_tuple_87)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2323; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_87); @@ -22287,7 +22287,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_standard_t, size, odf) - * + * */ __pyx_k_tuple_88 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_78)); if (unlikely(!__pyx_k_tuple_88)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2330; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_88); @@ -22298,7 +22298,7 @@ static int __Pyx_InitCachedConstants(void) { * if fkappa < 0: * raise ValueError("kappa < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, fkappa) - * + * */ __pyx_k_tuple_90 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_89)); if (unlikely(!__pyx_k_tuple_90)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2416; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_90); @@ -22309,7 +22309,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less(okappa, 0.0)): * raise ValueError("kappa < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa) - * + * */ __pyx_k_tuple_91 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_89)); if (unlikely(!__pyx_k_tuple_91)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_91); @@ -22320,7 +22320,7 @@ static int __Pyx_InitCachedConstants(void) { * if fa <= 0: * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_pareto, size, fa) - * + * */ __pyx_k_tuple_92 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_92)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2513; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_92); @@ -22331,7 +22331,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_pareto, size, oa) - * + * */ __pyx_k_tuple_93 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_93)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2520; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_93); @@ -22342,7 +22342,7 @@ static int __Pyx_InitCachedConstants(void) { * if fa <= 0: * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_weibull, size, fa) - * + * */ __pyx_k_tuple_94 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_94)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2613; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_94); @@ -22353,7 +22353,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_weibull, size, oa) - * + * */ __pyx_k_tuple_95 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_95)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2620; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_95); @@ -22364,7 +22364,7 @@ static int __Pyx_InitCachedConstants(void) { * if fa <= 0: * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_power, size, fa) - * + * */ __pyx_k_tuple_96 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_96)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2722; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_96); @@ -22375,7 +22375,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_power, size, oa) - * + * */ __pyx_k_tuple_97 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_47)); if (unlikely(!__pyx_k_tuple_97)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2729; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_97); @@ -22386,7 +22386,7 @@ static int __Pyx_InitCachedConstants(void) { * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_laplace, size, floc, fscale) - * + * */ __pyx_k_tuple_100 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_100)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2812; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_100); @@ -22397,7 +22397,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale) - * + * */ __pyx_k_tuple_101 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_101)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_101); @@ -22408,7 +22408,7 @@ static int __Pyx_InitCachedConstants(void) { * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, fscale) - * + * */ __pyx_k_tuple_104 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_104)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2943; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_104); @@ -22419,7 +22419,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale) - * + * */ __pyx_k_tuple_105 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_105)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 2950; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_105); @@ -22430,7 +22430,7 @@ static int __Pyx_InitCachedConstants(void) { * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_logistic, size, floc, fscale) - * + * */ __pyx_k_tuple_108 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_108)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3031; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_108); @@ -22441,7 +22441,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale) - * + * */ __pyx_k_tuple_109 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_109)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3038; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_109); @@ -22452,7 +22452,7 @@ static int __Pyx_InitCachedConstants(void) { * if fsigma <= 0: * raise ValueError("sigma <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_lognormal, size, fmean, fsigma) - * + * */ __pyx_k_tuple_113 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_112)); if (unlikely(!__pyx_k_tuple_113)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_113); @@ -22463,7 +22463,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(osigma, 0.0)): * raise ValueError("sigma <= 0.0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma) - * + * */ __pyx_k_tuple_115 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_114)); if (unlikely(!__pyx_k_tuple_115)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3159; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_115); @@ -22474,7 +22474,7 @@ static int __Pyx_InitCachedConstants(void) { * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_rayleigh, size, fscale) - * + * */ __pyx_k_tuple_117 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_117)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_117); @@ -22485,7 +22485,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oscale, 0.0)): * raise ValueError("scale <= 0.0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_rayleigh, size, oscale) - * + * */ __pyx_k_tuple_119 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_118)); if (unlikely(!__pyx_k_tuple_119)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3231; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_119); @@ -22507,7 +22507,7 @@ static int __Pyx_InitCachedConstants(void) { * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_wald, size, fmean, fscale) - * + * */ __pyx_k_tuple_122 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_44)); if (unlikely(!__pyx_k_tuple_122)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3306; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_122); @@ -22529,7 +22529,7 @@ static int __Pyx_InitCachedConstants(void) { * elif np.any(np.less_equal(oscale,0.0)): * raise ValueError("scale <= 0.0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_wald, size, omean, oscale) - * + * */ __pyx_k_tuple_125 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_118)); if (unlikely(!__pyx_k_tuple_125)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3315; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_125); @@ -22569,7 +22569,7 @@ static int __Pyx_InitCachedConstants(void) { __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_131)); /* "mtrand.pyx":3399 - * + * * if np.any(np.greater(oleft, omode)): * raise ValueError("left > mode") # <<<<<<<<<<<<<< * if np.any(np.greater(omode, oright)): @@ -22628,7 +22628,7 @@ static int __Pyx_InitCachedConstants(void) { * elif fp > 1: * raise ValueError("p > 1") # <<<<<<<<<<<<<< * return discnp_array_sc(self.internal_state, rk_binomial, size, ln, fp) - * + * */ __pyx_k_tuple_140 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_139)); if (unlikely(!__pyx_k_tuple_140)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3501; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_140); @@ -22661,7 +22661,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") # <<<<<<<<<<<<<< * return discnp_array(self.internal_state, rk_binomial, size, on, op) - * + * */ __pyx_k_tuple_143 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_139)); if (unlikely(!__pyx_k_tuple_143)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3513; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_143); @@ -22749,7 +22749,7 @@ static int __Pyx_InitCachedConstants(void) { * if lam > self.poisson_lam_max: * raise ValueError("lam value too large") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_poisson, size, flam) - * + * */ __pyx_k_tuple_155 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_154)); if (unlikely(!__pyx_k_tuple_155)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3670; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_155); @@ -22771,7 +22771,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.greater(olam, self.poisson_lam_max)): * raise ValueError("lam value too large.") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_poisson, size, olam) - * + * */ __pyx_k_tuple_158 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_157)); if (unlikely(!__pyx_k_tuple_158)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3679; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_158); @@ -22782,7 +22782,7 @@ static int __Pyx_InitCachedConstants(void) { * if fa <= 1.0: * raise ValueError("a <= 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_zipf, size, fa) - * + * */ __pyx_k_tuple_160 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_159)); if (unlikely(!__pyx_k_tuple_160)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3760; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_160); @@ -22793,7 +22793,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.less_equal(oa, 1.0)): * raise ValueError("a <= 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_zipf, size, oa) - * + * */ __pyx_k_tuple_161 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_159)); if (unlikely(!__pyx_k_tuple_161)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3767; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_161); @@ -22815,7 +22815,7 @@ static int __Pyx_InitCachedConstants(void) { * if fp > 1.0: * raise ValueError("p > 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_geometric, size, fp) - * + * */ __pyx_k_tuple_165 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_164)); if (unlikely(!__pyx_k_tuple_165)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_165); @@ -22837,7 +22837,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.greater(op, 1.0)): * raise ValueError("p > 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_geometric, size, op) - * + * */ __pyx_k_tuple_167 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_164)); if (unlikely(!__pyx_k_tuple_167)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 3833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_167); @@ -22947,7 +22947,7 @@ static int __Pyx_InitCachedConstants(void) { * if fp >= 1.0: * raise ValueError("p >= 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_logseries, size, fp) - * + * */ __pyx_k_tuple_183 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_182)); if (unlikely(!__pyx_k_tuple_183)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4037; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_183); @@ -22969,7 +22969,7 @@ static int __Pyx_InitCachedConstants(void) { * if np.any(np.greater_equal(op, 1.0)): * raise ValueError("p >= 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_logseries, size, op) - * + * */ __pyx_k_tuple_185 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_182)); if (unlikely(!__pyx_k_tuple_185)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4046; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_k_tuple_185); @@ -23020,10 +23020,10 @@ static int __Pyx_InitCachedConstants(void) { __Pyx_GIVEREF(__pyx_k_slice_192); /* "mtrand.pyx":4246 - * + * * if kahan_sum(pix, d-1) > (1.0 + 1e-12): * raise ValueError("sum(pvals[:-1]) > 1.0") # <<<<<<<<<<<<<< - * + * * if size is None: */ __pyx_k_tuple_195 = PyTuple_Pack(1, ((PyObject *)__pyx_kp_s_194)); if (unlikely(!__pyx_k_tuple_195)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4246; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -23045,7 +23045,7 @@ static int __Pyx_InitCachedConstants(void) { * """ * cdef rk_state *internal_state * poisson_lam_max = np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10 # <<<<<<<<<<<<<< - * + * * def __init__(self, seed=None): */ __pyx_k_tuple_197 = PyTuple_Pack(1, ((PyObject *)__pyx_n_s__l)); if (unlikely(!__pyx_k_tuple_197)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -23170,20 +23170,20 @@ PyMODINIT_FUNC PyInit_mtrand(void) /*--- Execution code ---*/ /* "mtrand.pyx":124 - * + * * # Initialize numpy * import_array() # <<<<<<<<<<<<<< - * + * * import numpy as np */ import_array(); /* "mtrand.pyx":126 * import_array() - * + * * import numpy as np # <<<<<<<<<<<<<< * import operator - * + * */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 126; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); @@ -23191,10 +23191,10 @@ PyMODINIT_FUNC PyInit_mtrand(void) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "mtrand.pyx":127 - * + * * import numpy as np * import operator # <<<<<<<<<<<<<< - * + * * cdef object cont0_array(rk_state *state, rk_cont0 func, object size): */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__operator), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -23206,7 +23206,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) * """ * cdef rk_state *internal_state * poisson_lam_max = np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10 # <<<<<<<<<<<<<< - * + * * def __init__(self, seed=None): */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 559; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -23257,8 +23257,8 @@ PyMODINIT_FUNC PyInit_mtrand(void) PyType_Modified(__pyx_ptype_6mtrand_RandomState); /* "mtrand.pyx":920 - * - * + * + * * def choice(self, a, size=None, replace=True, p=None): # <<<<<<<<<<<<<< * """ * choice(a, size=None, replace=True, p=None) @@ -23270,8 +23270,8 @@ PyMODINIT_FUNC PyInit_mtrand(void) __pyx_t_4 = 0; /* "mtrand.pyx":1092 - * - * + * + * * def uniform(self, low=0.0, high=1.0, size=None): # <<<<<<<<<<<<<< * """ * uniform(low=0.0, high=1.0, size=1) @@ -23289,7 +23289,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":1390 * return cont0_array(self.internal_state, rk_gauss, size) - * + * * def normal(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * normal(loc=0.0, scale=1.0, size=None) @@ -23307,7 +23307,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":1549 * return cont2_array(self.internal_state, rk_beta, size, oa, ob) - * + * * def exponential(self, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * exponential(scale=1.0, size=None) @@ -23320,7 +23320,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":1713 * return cont1_array(self.internal_state, rk_standard_gamma, size, oshape) - * + * * def gamma(self, shape, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * gamma(shape, scale=1.0, size=None) @@ -23333,7 +23333,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":2732 * return cont1_array(self.internal_state, rk_power, size, oa) - * + * * def laplace(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * laplace(loc=0.0, scale=1.0, size=None) @@ -23351,7 +23351,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":2822 * return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale) - * + * * def gumbel(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * gumbel(loc=0.0, scale=1.0, size=None) @@ -23369,7 +23369,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":2953 * return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale) - * + * * def logistic(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * logistic(loc=0.0, scale=1.0, size=None) @@ -23387,7 +23387,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":3041 * return cont2_array(self.internal_state, rk_logistic, size, oloc, oscale) - * + * * def lognormal(self, mean=0.0, sigma=1.0, size=None): # <<<<<<<<<<<<<< * """ * lognormal(mean=0.0, sigma=1.0, size=None) @@ -23405,7 +23405,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":3162 * return cont2_array(self.internal_state, rk_lognormal, size, omean, osigma) - * + * * def rayleigh(self, scale=1.0, size=None): # <<<<<<<<<<<<<< * """ * rayleigh(scale=1.0, size=None) @@ -23418,7 +23418,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":3611 * on, op) - * + * * def poisson(self, lam=1.0, size=None): # <<<<<<<<<<<<<< * """ * poisson(lam=1.0, size=None) @@ -23431,7 +23431,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":4492 * return arr - * + * * _rand = RandomState() # <<<<<<<<<<<<<< * seed = _rand.seed * get_state = _rand.get_state @@ -23442,7 +23442,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "mtrand.pyx":4493 - * + * * _rand = RandomState() * seed = _rand.seed # <<<<<<<<<<<<<< * get_state = _rand.get_state @@ -23941,7 +23941,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) * rayleigh = _rand.rayleigh * wald = _rand.wald # <<<<<<<<<<<<<< * triangular = _rand.triangular - * + * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4526; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); @@ -23955,7 +23955,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) * rayleigh = _rand.rayleigh * wald = _rand.wald * triangular = _rand.triangular # <<<<<<<<<<<<<< - * + * * binomial = _rand.binomial */ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4527; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -23968,7 +23968,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":4529 * triangular = _rand.triangular - * + * * binomial = _rand.binomial # <<<<<<<<<<<<<< * negative_binomial = _rand.negative_binomial * poisson = _rand.poisson @@ -23982,7 +23982,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "mtrand.pyx":4530 - * + * * binomial = _rand.binomial * negative_binomial = _rand.negative_binomial # <<<<<<<<<<<<<< * poisson = _rand.poisson @@ -24046,7 +24046,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) * geometric = _rand.geometric * hypergeometric = _rand.hypergeometric # <<<<<<<<<<<<<< * logseries = _rand.logseries - * + * */ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4534; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); @@ -24060,7 +24060,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) * geometric = _rand.geometric * hypergeometric = _rand.hypergeometric * logseries = _rand.logseries # <<<<<<<<<<<<<< - * + * * multivariate_normal = _rand.multivariate_normal */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4535; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -24073,7 +24073,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":4537 * logseries = _rand.logseries - * + * * multivariate_normal = _rand.multivariate_normal # <<<<<<<<<<<<<< * multinomial = _rand.multinomial * dirichlet = _rand.dirichlet @@ -24087,11 +24087,11 @@ PyMODINIT_FUNC PyInit_mtrand(void) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "mtrand.pyx":4538 - * + * * multivariate_normal = _rand.multivariate_normal * multinomial = _rand.multinomial # <<<<<<<<<<<<<< * dirichlet = _rand.dirichlet - * + * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4538; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); @@ -24105,7 +24105,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) * multivariate_normal = _rand.multivariate_normal * multinomial = _rand.multinomial * dirichlet = _rand.dirichlet # <<<<<<<<<<<<<< - * + * * shuffle = _rand.shuffle */ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s___rand); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4539; __pyx_clineno = __LINE__; goto __pyx_L1_error;} @@ -24118,7 +24118,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) /* "mtrand.pyx":4541 * dirichlet = _rand.dirichlet - * + * * shuffle = _rand.shuffle # <<<<<<<<<<<<<< * permutation = _rand.permutation */ @@ -24131,7 +24131,7 @@ PyMODINIT_FUNC PyInit_mtrand(void) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "mtrand.pyx":4542 - * + * * shuffle = _rand.shuffle * permutation = _rand.permutation # <<<<<<<<<<<<<< */ diff --git a/numpy/random/setup.py b/numpy/random/setup.py index 917623bba..c7e792f2f 100644 --- a/numpy/random/setup.py +++ b/numpy/random/setup.py @@ -19,7 +19,7 @@ def needs_mingw_ftime_workaround(): def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration, get_mathlibs - config = Configuration('random',parent_package,top_path) + config = Configuration('random', parent_package, top_path) def generate_libraries(ext, build_dir): config_cmd = config.get_config_cmd() @@ -41,9 +41,9 @@ def configuration(parent_package='',top_path=None): ['mtrand.c', 'randomkit.c', 'initarray.c', 'distributions.c']]+[generate_libraries], libraries=libs, - depends = [join('mtrand','*.h'), - join('mtrand','*.pyx'), - join('mtrand','*.pxi'), + depends = [join('mtrand', '*.h'), + join('mtrand', '*.pyx'), + join('mtrand', '*.pxi'), ], define_macros = defs, ) diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 6959c45dc..4ae7cfeb1 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -26,8 +26,8 @@ class TestMultinomial(TestCase): random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) def test_int_negative_interval(self): - assert_( -5 <= random.randint(-5,-1) < -1) - x = random.randint(-5,-1,5) + assert_( -5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) assert_(np.all(-5 <= x)) assert_(np.all(x < -1)) @@ -94,20 +94,20 @@ class TestRandomDist(TestCase): actual = np.random.rand(3, 2) desired = np.array([[ 0.61879477158567997, 0.59162362775974664], [ 0.88868358904449662, 0.89165480011560816], - [ 0.4575674820298663 , 0.7781880808593471 ]]) + [ 0.4575674820298663, 0.7781880808593471 ]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): np.random.seed(self.seed) actual = np.random.randn(3, 2) desired = np.array([[ 1.34016345771863121, 1.73759122771936081], - [ 1.498988344300628 , -0.2286433324536169 ], - [ 2.031033998682787 , 2.17032494605655257]]) + [ 1.498988344300628, -0.2286433324536169 ], + [ 2.031033998682787, 2.17032494605655257]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_randint(self): np.random.seed(self.seed) - actual = np.random.randint(-99, 99, size=(3,2)) + actual = np.random.randint(-99, 99, size=(3, 2)) desired = np.array([[ 31, 3], [-52, 41], [-48, -66]]) @@ -115,7 +115,7 @@ class TestRandomDist(TestCase): def test_random_integers(self): np.random.seed(self.seed) - actual = np.random.random_integers(-99, 99, size=(3,2)) + actual = np.random.random_integers(-99, 99, size=(3, 2)) desired = np.array([[ 31, 3], [-52, 41], [-48, -66]]) @@ -126,7 +126,7 @@ class TestRandomDist(TestCase): actual = np.random.random_sample((3, 2)) desired = np.array([[ 0.61879477158567997, 0.59162362775974664], [ 0.88868358904449662, 0.89165480011560816], - [ 0.4575674820298663 , 0.7781880808593471 ]]) + [ 0.4575674820298663, 0.7781880808593471 ]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_choice_uniform_replace(self): @@ -164,25 +164,25 @@ class TestRandomDist(TestCase): sample = np.random.choice assert_raises(ValueError, sample, -1, 3) assert_raises(ValueError, sample, 3., 3) - assert_raises(ValueError, sample, [[1,2],[3,4]], 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) assert_raises(ValueError, sample, [], 3) - assert_raises(ValueError, sample, [1,2,3,4], 3, - p=[[0.25,0.25],[0.25,0.25]]) - assert_raises(ValueError, sample, [1,2], 3, p=[0.4,0.4,0.2]) - assert_raises(ValueError, sample, [1,2], 3, p=[1.1,-0.1]) - assert_raises(ValueError, sample, [1,2], 3, p=[0.4,0.4]) - assert_raises(ValueError, sample, [1,2,3], 4, replace=False) - assert_raises(ValueError, sample, [1,2,3], 2, replace=False, - p=[1,0,0]) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False, + p=[1, 0, 0]) def test_choice_return_shape(self): - p = [0.1,0.9] + p = [0.1, 0.9] # Check scalar assert_(np.isscalar(np.random.choice(2, replace=True))) assert_(np.isscalar(np.random.choice(2, replace=False))) assert_(np.isscalar(np.random.choice(2, replace=True, p=p))) assert_(np.isscalar(np.random.choice(2, replace=False, p=p))) - assert_(np.isscalar(np.random.choice([1,2], replace=True))) + assert_(np.isscalar(np.random.choice([1, 2], replace=True))) assert_(np.random.choice([None], replace=True) is None) a = np.array([1, 2]) arr = np.empty(1, dtype=object) @@ -195,7 +195,7 @@ class TestRandomDist(TestCase): assert_(not np.isscalar(np.random.choice(2, s, replace=False))) assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p))) - assert_(not np.isscalar(np.random.choice([1,2], s, replace=True))) + assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True))) assert_(np.random.choice([None], s, replace=True).ndim == 0) a = np.array([1, 2]) arr = np.empty(1, dtype=object) @@ -203,7 +203,7 @@ class TestRandomDist(TestCase): assert_(np.random.choice(arr, s, replace=True).item() is a) # Check multi dimensional array - s = (2,3) + s = (2, 3) p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] assert_(np.random.choice(6, s, replace=True).shape, s) assert_(np.random.choice(6, s, replace=False).shape, s) @@ -224,7 +224,7 @@ class TestRandomDist(TestCase): lambda x: [(i, i) for i in x], lambda x: np.asarray([(i, i) for i in x])]: np.random.seed(self.seed) - alist = conv([1,2,3,4,5,6,7,8,9,0]) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) np.random.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) @@ -251,7 +251,7 @@ class TestRandomDist(TestCase): actual = np.random.chisquare(50, size=(3, 2)) desired = np.array([[ 63.87858175501090585, 68.68407748911370447], [ 65.77116116901505904, 47.09686762438974483], - [ 72.3828403199695174 , 74.18408615260374006]]) + [ 72.3828403199695174, 74.18408615260374006]]) np.testing.assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): @@ -302,7 +302,7 @@ class TestRandomDist(TestCase): np.random.seed(self.seed) actual = np.random.gumbel(loc = .123456789, scale = 2.0, size = (3, 2)) desired = np.array([[ 0.19591898743416816, 0.34405539668096674], - [-1.4492522252274278 , -1.47374816298446865], + [-1.4492522252274278, -1.47374816298446865], [ 1.10651090478803416, -0.69535848626236174]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) @@ -378,7 +378,7 @@ class TestRandomDist(TestCase): def test_multivariate_normal(self): np.random.seed(self.seed) mean= (.123456789, 10) - cov = [[1,0],[1,0]] + cov = [[1, 0], [1, 0]] size = (3, 2) actual = np.random.multivariate_normal(mean, cov, size) desired = np.array([[[ -1.47027513018564449, 10. ], @@ -418,7 +418,7 @@ class TestRandomDist(TestCase): np.random.seed(self.seed) actual = np.random.normal(loc = .123456789, scale = 2.0, size = (3, 2)) desired = np.array([[ 2.80378370443726244, 3.59863924443872163], - [ 3.121433477601256 , -0.33382987590723379], + [ 3.121433477601256, -0.33382987590723379], [ 4.18552478636557357, 4.46410668111310471]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) @@ -463,7 +463,7 @@ class TestRandomDist(TestCase): def test_rayleigh(self): np.random.seed(self.seed) actual = np.random.rayleigh(scale = 10, size = (3, 2)) - desired = np.array([[ 13.8882496494248393 , 13.383318339044731 ], + desired = np.array([[ 13.8882496494248393, 13.383318339044731 ], [ 20.95413364294492098, 21.08285015800712614], [ 11.06066537006854311, 17.35468505778271009]]) np.testing.assert_array_almost_equal(actual, desired, decimal=14) @@ -480,8 +480,8 @@ class TestRandomDist(TestCase): np.random.seed(self.seed) actual = np.random.standard_exponential(size = (3, 2)) desired = np.array([[ 0.96441739162374596, 0.89556604882105506], - [ 2.1953785836319808 , 2.22243285392490542], - [ 0.6116915921431676 , 1.50592546727413201]]) + [ 2.1953785836319808, 2.22243285392490542], + [ 0.6116915921431676, 1.50592546727413201]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): @@ -496,8 +496,8 @@ class TestRandomDist(TestCase): np.random.seed(self.seed) actual = np.random.standard_normal(size = (3, 2)) desired = np.array([[ 1.34016345771863121, 1.73759122771936081], - [ 1.498988344300628 , -0.2286433324536169 ], - [ 2.031033998682787 , 2.17032494605655257]]) + [ 1.498988344300628, -0.2286433324536169 ], + [ 2.031033998682787, 2.17032494605655257]]) np.testing.assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): diff --git a/numpy/setup.py b/numpy/setup.py index a2f089234..13ae7b3f2 100644 --- a/numpy/setup.py +++ b/numpy/setup.py @@ -4,7 +4,7 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('numpy',parent_package,top_path) + config = Configuration('numpy', parent_package, top_path) config.add_subpackage('distutils') config.add_subpackage('testing') config.add_subpackage('f2py') diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py index 91659bb8d..8a4cfb480 100644 --- a/numpy/testing/decorators.py +++ b/numpy/testing/decorators.py @@ -141,14 +141,14 @@ def skipif(skip_condition, msg=None): def skipper_func(*args, **kwargs): """Skipper for normal test functions.""" if skip_val(): - raise nose.SkipTest(get_msg(f,msg)) + raise nose.SkipTest(get_msg(f, msg)) else: return f(*args, **kwargs) def skipper_gen(*args, **kwargs): """Skipper for test generators.""" if skip_val(): - raise nose.SkipTest(get_msg(f,msg)) + raise nose.SkipTest(get_msg(f, msg)) else: for x in f(*args, **kwargs): yield x diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py index 5497bd9ce..cb757a13f 100644 --- a/numpy/testing/noseclasses.py +++ b/numpy/testing/noseclasses.py @@ -69,7 +69,7 @@ class NumpyDocTestFinder(doctest.DocTestFinder): add them to `tests`. """ - doctest.DocTestFinder._find(self,tests, obj, name, module, + doctest.DocTestFinder._find(self, tests, obj, name, module, source_lines, globs, seen) # Below we re-run pieces of the above method with manual modifications, @@ -122,18 +122,18 @@ class NumpyOutputChecker(doctest.OutputChecker): if not ret: if "#random" in want: return True - + # it would be useful to normalize endianness so that # bigendian machines don't fail all the tests (and there are # actually some bigendian examples in the doctests). Let's try # making them all little endian - got = got.replace("'>","'<") - want= want.replace("'>","'<") + got = got.replace("'>", "'<") + want= want.replace("'>", "'<") # try to normalize out 32 and 64 bit default int sizes - for sz in [4,8]: - got = got.replace("'<i%d'"%sz,"int") - want= want.replace("'<i%d'"%sz,"int") + for sz in [4, 8]: + got = got.replace("'<i%d'"%sz, "int") + want= want.replace("'<i%d'"%sz, "int") ret = doctest.OutputChecker.check_output(self, want, got, optionflags) diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index 79ae79765..77ac61074 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -57,7 +57,7 @@ def import_nose(): """ Import nose only when needed. """ fine_nose = True - minimum_nose_version = (0,10,0) + minimum_nose_version = (0, 10, 0) try: import nose except ImportError: @@ -82,7 +82,7 @@ def run_module_suite(file_to_run = None): if file_to_run is None: raise AssertionError - import_nose().run(argv=['',file_to_run]) + import_nose().run(argv=['', file_to_run]) class NoseTester(object): @@ -214,7 +214,7 @@ class NoseTester(object): spdir = os.path.dirname(scipy.__file__) print("SciPy is installed in %s" % spdir) - pyversion = sys.version.replace('\n','') + pyversion = sys.version.replace('\n', '') print("Python version %s" % pyversion) print("nose version %d.%d.%d" % nose.__versioninfo__) @@ -457,4 +457,3 @@ class NoseTester(object): add_plugins = [Unplugger('doctest')] return nose.run(argv=argv, addplugins=add_plugins) - diff --git a/numpy/testing/numpytest.py b/numpy/testing/numpytest.py index 2120975df..d83a21d83 100644 --- a/numpy/testing/numpytest.py +++ b/numpy/testing/numpytest.py @@ -15,7 +15,7 @@ def importall(package): warnings.warn("`importall is deprecated, and will be remobed in numpy 1.9.0", DeprecationWarning) - if isinstance(package,str): + if isinstance(package, str): package = __import__(package) package_name = package.__name__ @@ -24,7 +24,7 @@ def importall(package): subdir = os.path.join(package_dir, subpackage_name) if not os.path.isdir(subdir): continue - if not os.path.isfile(os.path.join(subdir,'__init__.py')): + if not os.path.isfile(os.path.join(subdir, '__init__.py')): continue name = package_name+'.'+subpackage_name try: diff --git a/numpy/testing/print_coercion_tables.py b/numpy/testing/print_coercion_tables.py index 06c968cf4..bde82a666 100755 --- a/numpy/testing/print_coercion_tables.py +++ b/numpy/testing/print_coercion_tables.py @@ -54,7 +54,7 @@ def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, if use_promote_types: char = np.promote_types(rowvalue.dtype, colvalue.dtype).char else: - value = np.add(rowvalue,colvalue) + value = np.add(rowvalue, colvalue) if isinstance(value, np.ndarray): char = value.dtype.char else: diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py index 0e3a9627e..595e48925 100755 --- a/numpy/testing/setup.py +++ b/numpy/testing/setup.py @@ -4,7 +4,7 @@ from __future__ import division, print_function def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('testing',parent_package,top_path) + config = Configuration('testing', parent_package, top_path) config.add_data_dir('tests') return config diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py index f3e142d54..36c7cc7bb 100644 --- a/numpy/testing/tests/test_decorators.py +++ b/numpy/testing/tests/test_decorators.py @@ -7,7 +7,7 @@ import nose def test_slow(): @dec.slow - def slow_func(x,y,z): + def slow_func(x, y, z): pass assert_(slow_func.slow) diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index cbc8cd23e..94fc4d655 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -194,7 +194,7 @@ class TestArrayAlmostEqual(_GenericTest, unittest.TestCase): def test_inf(self): a = np.array([[1., 2.], [3., 4.]]) b = a.copy() - a[0,0] = np.inf + a[0, 0] = np.inf self.assertRaises(AssertionError, lambda : self._assert_func(a, b)) diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 235b9e0ba..8e324a78d 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -17,7 +17,7 @@ if sys.version_info[0] >= 3: else: from StringIO import StringIO -__all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal', +__all__ = ['assert_equal', 'assert_almost_equal', 'assert_approx_equal', 'assert_array_equal', 'assert_array_less', 'assert_string_equal', 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', @@ -118,7 +118,7 @@ if sys.platform[:5]=='linux': if not _load_time: _load_time.append(time.time()) try: - f=open(_proc_pid_stat,'r') + f=open(_proc_pid_stat, 'r') l = f.readline().split(' ') f.close() return int(l[13]) @@ -129,7 +129,7 @@ if sys.platform[:5]=='linux': """ Return virtual memory size in bytes of the running python. """ try: - f=open(_proc_pid_stat,'r') + f=open(_proc_pid_stat, 'r') l = f.readline().split(' ') f.close() return int(l[22]) @@ -164,7 +164,7 @@ if os.name=='nt' and sys.version[:3] > '2.3': # the CPU to 100%, but the above makes more sense :) import win32pdh if format is None: format = win32pdh.PDH_FMT_LONG - path = win32pdh.MakeCounterPath( (machine,object,instance, None, inum,counter) ) + path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter) ) hq = win32pdh.OpenQuery() try: hc = win32pdh.AddCounter(hq, path) @@ -243,16 +243,16 @@ def assert_equal(actual,desired,err_msg='',verbose=True): if isinstance(desired, dict): if not isinstance(actual, dict) : raise AssertionError(repr(type(actual))) - assert_equal(len(actual),len(desired),err_msg,verbose) - for k,i in desired.items(): + assert_equal(len(actual), len(desired), err_msg, verbose) + for k, i in desired.items(): if k not in actual : raise AssertionError(repr(k)) - assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k,err_msg), verbose) + assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose) return - if isinstance(desired, (list,tuple)) and isinstance(actual, (list,tuple)): - assert_equal(len(actual),len(desired),err_msg,verbose) + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + assert_equal(len(actual), len(desired), err_msg, verbose) for k in range(len(desired)): - assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k,err_msg), verbose) + assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose) return from numpy.core import ndarray, isscalar, signbit from numpy.lib import iscomplexobj, real, imag @@ -316,7 +316,7 @@ def assert_equal(actual,desired,err_msg='',verbose=True): if desired != actual : raise AssertionError(msg) -def print_assert_equal(test_string,actual,desired): +def print_assert_equal(test_string, actual, desired): """ Test if two objects are equal, and print an error message if test fails. @@ -350,9 +350,9 @@ def print_assert_equal(test_string,actual,desired): msg = StringIO() msg.write(test_string) msg.write(' failed\nACTUAL: \n') - pprint.pprint(actual,msg) + pprint.pprint(actual, msg) msg.write('DESIRED: \n') - pprint.pprint(desired,msg) + pprint.pprint(desired, msg) raise AssertionError(msg.getvalue()) def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): @@ -466,7 +466,7 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): return except (NotImplementedError, TypeError): pass - if round(abs(desired - actual),decimal) != 0 : + if round(abs(desired - actual), decimal) != 0 : raise AssertionError(msg) @@ -535,7 +535,7 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) with np.errstate(invalid='ignore'): scale = 0.5*(np.abs(desired) + np.abs(actual)) - scale = np.power(10,np.floor(np.log10(scale))) + scale = np.power(10, np.floor(np.log10(scale))) try: sc_desired = desired/scale except ZeroDivisionError: @@ -562,7 +562,7 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): return except (TypeError, NotImplementedError): pass - if np.abs(sc_desired - sc_actual) >= np.power(10.,-(significant-1)) : + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)) : raise AssertionError(msg) def assert_array_compare(comparison, x, y, err_msg='', verbose=True, @@ -624,7 +624,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, else: val = comparison(x, y) else: - val = comparison(x,y) + val = comparison(x, y) if isinstance(val, bool): cond = val @@ -1100,7 +1100,7 @@ def measure(code_str,times=1,label=None): """ frame = sys._getframe(1) - locs,globs = frame.f_locals,frame.f_globals + locs, globs = frame.f_locals, frame.f_globals code = compile(code_str, 'Test name: %s ' % label, @@ -1109,7 +1109,7 @@ def measure(code_str,times=1,label=None): elapsed = jiffies() while i < times: i += 1 - exec(code, globs,locs) + exec(code, globs, locs) elapsed = jiffies() - elapsed return 0.01*elapsed @@ -1127,7 +1127,7 @@ def _assert_valid_refcount(op): rc = sys.getrefcount(i) for j in range(15): - d = op(b,c) + d = op(b, c) assert_(sys.getrefcount(i) >= rc) @@ -1580,4 +1580,3 @@ def gen_alignment_data(dtype=float32, type='binary', max_size=24): class IgnoreException(Exception): "Ignoring this exception due to disabled feature" - diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index ee36e0a9b..8e9c6c0bd 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -81,21 +81,21 @@ class TestNdpointer(TestCase): self.assertTrue(p.from_param(np.array([[1]]))) def test_shape(self): - p = ndpointer(shape=(1,2)) - self.assertTrue(p.from_param(np.array([[1,2]]))) - self.assertRaises(TypeError, p.from_param, np.array([[1],[2]])) + p = ndpointer(shape=(1, 2)) + self.assertTrue(p.from_param(np.array([[1, 2]]))) + self.assertRaises(TypeError, p.from_param, np.array([[1], [2]])) p = ndpointer(shape=()) self.assertTrue(p.from_param(np.array(1))) def test_flags(self): - x = np.array([[1,2],[3,4]], order='F') + x = np.array([[1, 2], [3, 4]], order='F') p = ndpointer(flags='FORTRAN') self.assertTrue(p.from_param(x)) p = ndpointer(flags='CONTIGUOUS') self.assertRaises(TypeError, p.from_param, x) p = ndpointer(flags=x.flags.num) self.assertTrue(p.from_param(x)) - self.assertRaises(TypeError, p.from_param, np.array([[1,2],[3,4]])) + self.assertRaises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) if __name__ == "__main__": diff --git a/numpy/tests/test_matlib.py b/numpy/tests/test_matlib.py index 814c24b0c..0bc8548ba 100644 --- a/numpy/tests/test_matlib.py +++ b/numpy/tests/test_matlib.py @@ -7,7 +7,7 @@ from numpy.testing import assert_array_equal, assert_, run_module_suite def test_empty(): x = np.matlib.empty((2,)) assert_(isinstance(x, np.matrix)) - assert_(x.shape, (1,2)) + assert_(x.shape, (1, 2)) def test_ones(): assert_array_equal(np.matlib.ones((2, 3)), diff --git a/tools/c_coverage/HOWTO_C_COVERAGE.txt b/tools/c_coverage/HOWTO_C_COVERAGE.txt index aa4737a3f..320d9b0de 100644 --- a/tools/c_coverage/HOWTO_C_COVERAGE.txt +++ b/tools/c_coverage/HOWTO_C_COVERAGE.txt @@ -115,4 +115,3 @@ removing lines because they are tautologically impossible or to combine lines together. Compiling Numpy without optimizations helps, but not completely. Even despite this flaw, this tool is still helpful in identifying large missed blocks or functions. - diff --git a/tools/commitstats.py b/tools/commitstats.py index a3de1a6c5..a35d7b724 100644 --- a/tools/commitstats.py +++ b/tools/commitstats.py @@ -12,9 +12,9 @@ def get_count(filename, repo): mystr = open(filename).read() result = names.findall(mystr) u = np.unique(result) - count = [(x,result.count(x),repo) for x in u] + count = [(x, result.count(x), repo) for x in u] return count - + command = 'svn log -l 2300 > output.txt' os.chdir('..') @@ -33,12 +33,9 @@ os.system(command) count.extend(get_count('output.txt', 'SciKits')) count.sort() - + print("** SciPy and NumPy **") print("=====================") for val in count: print(val) - - - diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py index b8664877e..0174e0708 100644 --- a/tools/test-installed-numpy.py +++ b/tools/test-installed-numpy.py @@ -39,10 +39,10 @@ import numpy # Check that NPY_RELAXED_STRIDES_CHECKING is active when set. # The same flags check is also used in the tests to switch behavior. if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0"): - if not numpy.ones((10,1), order='C').flags.f_contiguous: + if not numpy.ones((10, 1), order='C').flags.f_contiguous: print('NPY_RELAXED_STRIDES_CHECKING set, but not active.') sys.exit(1) -elif numpy.ones((10,1), order='C').flags.f_contiguous: +elif numpy.ones((10, 1), order='C').flags.f_contiguous: print('NPY_RELAXED_STRIDES_CHECKING not set, but active.') sys.exit(1) diff --git a/tools/win32build/doall.py b/tools/win32build/doall.py index fbc794db5..0bf77306e 100644 --- a/tools/win32build/doall.py +++ b/tools/win32build/doall.py @@ -19,9 +19,9 @@ if __name__ == '__main__': subprocess.check_call(['python', 'prepare_bootstrap.py', '-p', pyver]) # Build binaries - subprocess.check_call(['python', 'build.py', '-p', pyver], + subprocess.check_call(['python', 'build.py', '-p', pyver], cwd = 'bootstrap-%s' % pyver) # Build installer using nsis - subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], + subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % pyver) |