summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHernan Grecco <hernan.grecco@gmail.com>2021-01-07 02:00:12 -0300
committerGitHub <noreply@github.com>2021-01-07 02:00:12 -0300
commita4dbdb1f769bffcca5aaccb342d07e267b3eb8ff (patch)
treecf385814e9f911b06fc407090a4c9ab4850c8541
parent8a93b6f0a58868a72b666359976c45f29e6cc809 (diff)
parent22c07bd73ac8fdfffe4805adf8716273b5d2a124 (diff)
downloadpint-a4dbdb1f769bffcca5aaccb342d07e267b3eb8ff.tar.gz
Merge branch 'master' into comparisons
-rw-r--r--.gitattributes1
-rw-r--r--.gitignore4
-rw-r--r--.travis.yml2
-rw-r--r--CHANGES1
-rw-r--r--asv.conf.json160
-rw-r--r--bench/bench.py146
-rw-r--r--bench/bench_base.yaml42
-rw-r--r--bench/bench_numpy.yaml23
-rw-r--r--benchmarks/00_common.py2
-rw-r--r--benchmarks/01_registry_creation.py10
-rw-r--r--benchmarks/10_registry.py101
-rw-r--r--benchmarks/20_quantity.py56
-rw-r--r--benchmarks/30_numpy.py97
-rw-r--r--benchmarks/__init__.py0
-rw-r--r--benchmarks/util.py38
-rw-r--r--docs/numpy.ipynb4
-rw-r--r--pint/numpy_func.py2
-rw-r--r--pint/testsuite/test_issues.py6
18 files changed, 480 insertions, 215 deletions
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..8383fff
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+CHANGES merge=union
diff --git a/.gitignore b/.gitignore
index ce943ca..2d9bf37 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,3 +31,7 @@ notebooks/pandas_test.csv
# dask stuff
dask-worker-space
+
+# airspeed velocity bechmark
+.asv/
+benchmarks/hashes.txt
diff --git a/.travis.yml b/.travis.yml
index 9c1a669..e180fd3 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -17,7 +17,7 @@ env:
- PKGS="python=3.7 flake8 black==19.10b0 isort" # Have linters fail first and quickly
# Pinned packages to match readthedocs CI https://readthedocs.org/projects/pint/
- - PKGS="python=3.7 ipython matplotlib nbsphinx numpy pandas jupyter_client ipykernel python-graphviz graphviz xarray sparse dask[complete] sphinx Pygments==2.3.1 docutils==0.14 alabaster commonmark==0.8.1 recommonmark==0.5.0"
+ - PKGS="python=3.7 ipython matplotlib nbsphinx numpy pandas==1.1.3 jupyter_client ipykernel python-graphviz graphviz xarray sparse dask[complete] sphinx Pygments==2.3.1 docutils==0.14 alabaster commonmark==0.8.1 recommonmark==0.5.0 babel==2.8"
- PKGS="python=3.6"
- PKGS="python=3.7"
- PKGS="python=3.8"
diff --git a/CHANGES b/CHANGES
index 83498df..066a285 100644
--- a/CHANGES
+++ b/CHANGES
@@ -6,6 +6,7 @@ Pint Changelog
- Fix comparisons between Quantities and Measurements.
(Issue #1134)
+- Implemented benchmarks based on airspeed velocity.
0.16.1 (2020-09-22)
diff --git a/asv.conf.json b/asv.conf.json
new file mode 100644
index 0000000..dcc4857
--- /dev/null
+++ b/asv.conf.json
@@ -0,0 +1,160 @@
+{
+ // The version of the config file format. Do not change, unless
+ // you know what you are doing.
+ "version": 1,
+
+ // The name of the project being benchmarked
+ "project": "pint",
+
+ // The project's homepage
+ "project_url": "https://github.com/hgrecco/pint",
+
+ // The URL or local path of the source code repository for the
+ // project being benchmarked
+ "repo": ".",
+
+ // The Python project's subdirectory in your repo. If missing or
+ // the empty string, the project is assumed to be located at the root
+ // of the repository.
+ // "repo_subdir": "",
+
+ // Customizable commands for building, installing, and
+ // uninstalling the project. See asv.conf.json documentation.
+ //
+ // "install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"],
+ // "uninstall_command": ["return-code=any python -mpip uninstall -y {project}"],
+ // "build_command": [
+ // "python setup.py build",
+ // "PIP_NO_BUILD_ISOLATION=false python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}"
+ // ],
+
+ // List of branches to benchmark. If not provided, defaults to "master"
+ // (for git) or "default" (for mercurial).
+ // "branches": ["master"], // for git
+ // "branches": ["default"], // for mercurial
+
+ // The DVCS being used. If not set, it will be automatically
+ // determined from "repo" by looking at the protocol in the URL
+ // (if remote), or by looking for special directories, such as
+ // ".git" (if local).
+ // "dvcs": "git",
+
+ // The tool to use to create environments. May be "conda",
+ // "virtualenv" or other value depending on the plugins in use.
+ // If missing or the empty string, the tool will be automatically
+ // determined by looking for tools on the PATH environment
+ // variable.
+ "environment_type": "conda",
+
+ // timeout in seconds for installing any dependencies in environment
+ // defaults to 10 min
+ //"install_timeout": 600,
+
+ // the base URL to show a commit for the project.
+ "show_commit_url": "http://github.com/hgrecco/pint/commit/",
+
+ // The Pythons you'd like to test against. If not provided, defaults
+ // to the current version of Python used to run `asv`.
+ "pythons": ["3.9"],
+
+ // The list of conda channel names to be searched for benchmark
+ // dependency packages in the specified order
+ // "conda_channels": ["conda-forge", "defaults"],
+
+ // The matrix of dependencies to test. Each key is the name of a
+ // package (in PyPI) and the values are version numbers. An empty
+ // list or empty string indicates to just test against the default
+ // (latest) version. null indicates that the package is to not be
+ // installed. If the package to be tested is only available from
+ // PyPi, and the 'environment_type' is conda, then you can preface
+ // the package name by 'pip+', and the package will be installed via
+ // pip (with all the conda available packages installed first,
+ // followed by the pip installed packages).
+
+ "matrix": {
+ "numpy": ["1.19"],
+ // "six": ["", null], // test with and without six installed
+ // "pip+emcee": [""], // emcee is only available for install with pip.
+ },
+
+ // Combinations of libraries/python versions can be excluded/included
+ // from the set to test. Each entry is a dictionary containing additional
+ // key-value pairs to include/exclude.
+ //
+ // An exclude entry excludes entries where all values match. The
+ // values are regexps that should match the whole string.
+ //
+ // An include entry adds an environment. Only the packages listed
+ // are installed. The 'python' key is required. The exclude rules
+ // do not apply to includes.
+ //
+ // In addition to package names, the following keys are available:
+ //
+ // - python
+ // Python version, as in the *pythons* variable above.
+ // - environment_type
+ // Environment type, as above.
+ // - sys_platform
+ // Platform, as in sys.platform. Possible values for the common
+ // cases: 'linux2', 'win32', 'cygwin', 'darwin'.
+ //
+ // "exclude": [
+ // {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows
+ // {"environment_type": "conda", "six": null}, // don't run without six on conda
+ // ],
+ //
+ // "include": [
+ // // additional env for python2.7
+ // {"python": "2.7", "numpy": "1.8"},
+ // // additional env if run on windows+conda
+ // {"platform": "win32", "environment_type": "conda", "python": "2.7", "libpython": ""},
+ // ],
+
+ // The directory (relative to the current directory) that benchmarks are
+ // stored in. If not provided, defaults to "benchmarks"
+ // "benchmark_dir": "benchmarks",
+
+ // The directory (relative to the current directory) to cache the Python
+ // environments in. If not provided, defaults to "env"
+ "env_dir": ".asv/env",
+
+ // The directory (relative to the current directory) that raw benchmark
+ // results are stored in. If not provided, defaults to "results".
+ "results_dir": ".asv/results",
+
+ // The directory (relative to the current directory) that the html tree
+ // should be written to. If not provided, defaults to "html".
+ "html_dir": ".asv/html",
+
+ // The number of characters to retain in the commit hashes.
+ // "hash_length": 8,
+
+ // `asv` will cache results of the recent builds in each
+ // environment, making them faster to install next time. This is
+ // the number of builds to keep, per environment.
+ // "build_cache_size": 2,
+
+ // The commits after which the regression search in `asv publish`
+ // should start looking for regressions. Dictionary whose keys are
+ // regexps matching to benchmark names, and values corresponding to
+ // the commit (exclusive) after which to start looking for
+ // regressions. The default is to start from the first commit
+ // with results. If the commit is `null`, regression detection is
+ // skipped for the matching benchmark.
+ //
+ // "regressions_first_commits": {
+ // "some_benchmark": "352cdf", // Consider regressions only after this commit
+ // "another_benchmark": null, // Skip regression detection altogether
+ // },
+
+ // The thresholds for relative change in results, after which `asv
+ // publish` starts reporting regressions. Dictionary of the same
+ // form as in ``regressions_first_commits``, with values
+ // indicating the thresholds. If multiple entries match, the
+ // maximum is taken. If no entry matches, the default is 5%.
+ //
+ // "regressions_thresholds": {
+ // "some_benchmark": 0.01, // Threshold of 1%
+ // "another_benchmark": 0.5, // Threshold of 50%
+ // },
+}
diff --git a/bench/bench.py b/bench/bench.py
deleted file mode 100644
index c3c9323..0000000
--- a/bench/bench.py
+++ /dev/null
@@ -1,146 +0,0 @@
-import copy
-import fnmatch
-import os
-from timeit import Timer
-
-import yaml
-
-
-def time_stmt(stmt="pass", setup="pass", number=0, repeat=3):
- """Timer function with the same behaviour as running `python -m timeit `
- in the command line.
-
- Parameters
- ----------
- stmt : str
- (Default value = "pass")
- setup : str
- (Default value = "pass")
- number : int
- (Default value = 0)
- repeat : int
- (Default value = 3)
-
- Returns
- -------
- float
- elapsed time in seconds or NaN if the command failed.
-
- """
-
- t = Timer(stmt, setup)
-
- if not number:
- # determine number so that 0.2 <= total time < 2.0
- for i in range(1, 10):
- number = 10 ** i
-
- try:
- x = t.timeit(number)
- except Exception:
- print(t.print_exc())
- return float("NaN")
-
- if x >= 0.2:
- break
-
- try:
- r = t.repeat(repeat, number)
- except Exception:
- print(t.print_exc())
- return float("NaN")
-
- best = min(r)
-
- return best / number
-
-
-def build_task(task, name="", setup="", number=0, repeat=3):
- nt = copy.copy(task)
-
- nt["name"] = (name + " " + task.get("name", "")).strip()
- nt["setup"] = (setup + "\n" + task.get("setup", "")).strip("\n")
- nt["stmt"] = task.get("stmt", "")
- nt["number"] = task.get("number", number)
- nt["repeat"] = task.get("repeat", repeat)
-
- return nt
-
-
-def time_task(name, stmt="pass", setup="pass", number=0, repeat=3, stmts="", base=""):
-
- if base:
- nvalue = time_stmt(stmt=base, setup=setup, number=number, repeat=repeat)
- yield name + " (base)", nvalue
- suffix = " (normalized)"
- else:
- nvalue = 1.0
- suffix = ""
-
- if stmt:
- value = time_stmt(stmt=stmt, setup=setup, number=number, repeat=repeat)
- yield name, value / nvalue
-
- for task in stmts:
- new_task = build_task(task, name, setup, number, repeat)
- for task_name, value in time_task(**new_task):
- yield task_name + suffix, value / nvalue
-
-
-def time_file(filename, name="", setup="", number=0, repeat=3):
- """Open a yaml benchmark file an time each statement,
-
- yields a tuple with filename, task name, time in seconds.
-
- Parameters
- ----------
- filename :
-
- name :
- (Default value = "")
- setup :
- (Default value = "")
- number :
- (Default value = 0)
- repeat :
- (Default value = 3)
-
- Returns
- -------
-
- """
- with open(filename, "r") as fp:
- tasks = yaml.load(fp)
-
- for task in tasks:
- new_task = build_task(task, name, setup, number, repeat)
- for task_name, value in time_task(**new_task):
- yield task_name, value
-
-
-def recursive_glob(rootdir=".", pattern="*"):
- return [
- os.path.join(looproot, filename)
- for looproot, _, filenames in os.walk(rootdir)
- for filename in filenames
- if fnmatch.fnmatch(filename, pattern)
- ]
-
-
-def main(filenames=None):
- if not filenames:
- filenames = recursive_glob(".", "bench_*.yaml")
- elif isinstance(filenames, str):
- filenames = [filenames]
-
- for filename in filenames:
- print(filename)
- print("-" * len(filename))
- print()
- for task_name, value in time_file(filename):
- print(f"{value:.2e} {task_name}")
- print()
-
-
-if __name__ == "__main__":
- main()
diff --git a/bench/bench_base.yaml b/bench/bench_base.yaml
deleted file mode 100644
index f767a04..0000000
--- a/bench/bench_base.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-- name: importing
- stmt: import pint
-
-- name: empty registry
- setup: import pint
- stmt: ureg = pint.UnitRegistry(None)
-
-- name: default registry
- setup: import pint
- stmt: ureg = pint.UnitRegistry()
-
-- name: finding meter
- setup: |
- import pint
- ureg = pint.UnitRegistry()
- stmts:
- - name: (attr)
- stmt: q = ureg.meter
- - name: (item)
- stmt: q = ureg['meter']
-
-- name: base units
- setup: |
- import pint
- ureg = pint.UnitRegistry()
- stmts:
- - name: meter
- stmt: ureg.get_base_units('meter')
- - name: yard
- stmt: ureg.get_base_units('yard')
- - name: meter / second
- stmt: ureg.get_base_units('meter / second')
- - name: yard / minute
- stmt: ureg.get_base_units('yard / minute')
-
-- name: build cache
- setup: |
- import pint
- ureg = pint.UnitRegistry()
- stmt: ureg._build_cache()
diff --git a/bench/bench_numpy.yaml b/bench/bench_numpy.yaml
deleted file mode 100644
index 8609bc0..0000000
--- a/bench/bench_numpy.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-- name: NumPy
- setup: |
- import numpy as np
- import pint
- ureg = pint.UnitRegistry()
- stmts:
- - name: cosine
- setup: |
- d = np.arange(0, 90, 10)
- r = np.deg2rad(d)
- base: np.cos(r)
- stmts:
- - name: radian
- setup: x = r * ureg.radian
- stmt: np.cos(x)
- - name: dimensionless
- setup: x = r * ureg.dimensionless
- stmt: np.cos(x)
- - name: degree
- setup: x = d * ureg.degree
- stmt: np.cos(x)
diff --git a/benchmarks/00_common.py b/benchmarks/00_common.py
new file mode 100644
index 0000000..261cea9
--- /dev/null
+++ b/benchmarks/00_common.py
@@ -0,0 +1,2 @@
+def time_import():
+ import pint # noqa: F401
diff --git a/benchmarks/01_registry_creation.py b/benchmarks/01_registry_creation.py
new file mode 100644
index 0000000..e555585
--- /dev/null
+++ b/benchmarks/01_registry_creation.py
@@ -0,0 +1,10 @@
+import pint
+
+from . import util
+
+
+def time_create_registry(args):
+ pint.UnitRegistry(*args)
+
+
+time_create_registry.params = [[(None,), tuple(), (util.get_tiny_def(),)]]
diff --git a/benchmarks/10_registry.py b/benchmarks/10_registry.py
new file mode 100644
index 0000000..d95a53f
--- /dev/null
+++ b/benchmarks/10_registry.py
@@ -0,0 +1,101 @@
+import pint
+
+from . import util
+
+units = ("meter", "kilometer", "second", "minute", "angstrom")
+
+other_units = ("meter", "angstrom", "kilometer/second", "angstrom/minute")
+
+all_values = ("int", "float", "complex")
+
+ureg = None
+data = {}
+
+
+def setup(*args):
+
+ global ureg, data
+
+ data["int"] = 1
+ data["float"] = 1.0
+ data["complex"] = complex(1, 2)
+
+ ureg = pint.UnitRegistry(util.get_tiny_def())
+
+
+def my_setup(*args):
+ global data
+ setup(*args)
+ for unit in units + other_units:
+ data["uc_%s" % unit] = pint.registry.to_units_container(unit, ureg)
+
+
+def time_build_cache():
+ ureg._build_cache()
+
+
+def time_getattr(key):
+ getattr(ureg, key)
+
+
+time_getattr.params = units
+
+
+def time_getitem(key):
+ ureg[key]
+
+
+time_getitem.params = units
+
+
+def time_parse_unit_name(key):
+ ureg.parse_unit_name(key)
+
+
+time_parse_unit_name.params = units
+
+
+def time_parse_units(key):
+ ureg.parse_units(key)
+
+
+time_parse_units.params = units
+
+
+def time_parse_expression(key):
+ ureg.parse_expression("1.0 " + key)
+
+
+time_parse_expression.params = units
+
+
+def time_base_units(unit):
+ ureg.get_base_units(unit)
+
+
+time_base_units.params = other_units
+
+
+def time_to_units_container_registry(unit):
+ pint.registry.to_units_container(unit, ureg)
+
+
+time_to_units_container_registry.params = other_units
+
+
+def time_to_units_container_detached(unit):
+ pint.registry.to_units_container(unit, ureg)
+
+
+time_to_units_container_detached.params = other_units
+
+
+def time_convert_from_uc(key):
+ src, dst = key
+ ureg._convert(1.0, data[src], data[dst])
+
+
+time_convert_from_uc.setup = my_setup
+time_convert_from_uc.params = [
+ (("uc_meter", "uc_kilometer"), ("uc_kilometer/second", "uc_angstrom/minute"))
+]
diff --git a/benchmarks/20_quantity.py b/benchmarks/20_quantity.py
new file mode 100644
index 0000000..5f6dd41
--- /dev/null
+++ b/benchmarks/20_quantity.py
@@ -0,0 +1,56 @@
+import itertools as it
+import operator
+
+import pint
+
+from . import util
+
+units = ("meter", "kilometer", "second", "minute", "angstrom")
+all_values = ("int", "float", "complex")
+all_values_q = tuple(
+ "%s_%s" % (a, b) for a, b in it.product(all_values, ("meter", "kilometer"))
+)
+
+op1 = (operator.neg, operator.truth)
+op2_cmp = (operator.eq,) # operator.lt)
+op2_math = (operator.add, operator.sub, operator.mul, operator.truediv)
+
+ureg = None
+data = {}
+
+
+def setup(*args):
+
+ global ureg, data
+
+ data["int"] = 1
+ data["float"] = 1.0
+ data["complex"] = complex(1, 2)
+
+ ureg = pint.UnitRegistry(util.get_tiny_def())
+
+ for key in all_values:
+ data[key + "_meter"] = data[key] * ureg.meter
+ data[key + "_kilometer"] = data[key] * ureg.kilometer
+
+
+def time_build_by_mul(key):
+ data[key] * ureg.meter
+
+
+time_build_by_mul.params = all_values
+
+
+def time_op1(key, op):
+ op(data[key])
+
+
+time_op1.params = [all_values_q, op1]
+
+
+def time_op2(keys, op):
+ key1, key2 = keys
+ op(data[key1], data[key2])
+
+
+time_op2.params = [tuple(it.product(all_values_q, all_values_q)), op2_math + op2_cmp]
diff --git a/benchmarks/30_numpy.py b/benchmarks/30_numpy.py
new file mode 100644
index 0000000..ec83833
--- /dev/null
+++ b/benchmarks/30_numpy.py
@@ -0,0 +1,97 @@
+import itertools as it
+import operator
+
+import numpy as np
+
+import pint
+
+from . import util
+
+lengths = ("short", "mid")
+all_values = tuple(
+ "%s_%s" % (a, b) for a, b in it.product(lengths, ("list", "tuple", "array"))
+)
+all_arrays = ("short_array", "mid_array")
+units = ("meter", "kilometer")
+all_arrays_q = tuple("%s_%s" % (a, b) for a, b in it.product(all_arrays, units))
+
+ureg = None
+data = {}
+op1 = (operator.neg,) # operator.truth,
+op2_cmp = (operator.eq, operator.lt)
+op2_math = (operator.add, operator.sub, operator.mul, operator.truediv)
+numpy_op2_cmp = (np.equal, np.less)
+numpy_op2_math = (np.add, np.subtract, np.multiply, np.true_divide)
+
+
+def float_range(n):
+ return (float(x) for x in range(1, n + 1))
+
+
+def setup(*args):
+
+ global ureg, data
+ short = list(float_range(3))
+ mid = list(float_range(1_000))
+
+ data["short_list"] = short
+ data["short_tuple"] = tuple(short)
+ data["short_array"] = np.asarray(short)
+ data["mid_list"] = mid
+ data["mid_tuple"] = tuple(mid)
+ data["mid_array"] = np.asarray(mid)
+
+ ureg = pint.UnitRegistry(util.get_tiny_def())
+
+ for key in all_arrays:
+ data[key + "_meter"] = data[key] * ureg.meter
+ data[key + "_kilometer"] = data[key] * ureg.kilometer
+
+
+def time_finding_meter_getattr():
+ ureg.meter
+
+
+def time_finding_meter_getitem():
+ ureg["meter"]
+
+
+def time_base_units(unit):
+ ureg.get_base_units(unit)
+
+
+time_base_units.params = ["meter", "angstrom", "meter/second", "angstrom/minute"]
+
+
+def time_build_by_mul(key):
+ data[key] * ureg.meter
+
+
+time_build_by_mul.params = all_arrays
+
+
+def time_op1(key, op):
+ op(data[key])
+
+
+time_op1.params = [all_arrays_q, op1 + (np.sqrt, np.square)]
+
+
+def time_op2(keys, op):
+ key1, key2 = keys
+ op(data[key1], data[key2])
+
+
+time_op2.params = [
+ (
+ ("short_array_meter", "short_array_meter"),
+ ("short_array_meter", "short_array_kilometer"),
+ ("short_array_kilometer", "short_array_meter"),
+ ("short_array_kilometer", "short_array_kilometer"),
+ ("mid_array_meter", "mid_array_meter"),
+ ("mid_array_meter", "mid_array_kilometer"),
+ ("mid_array_kilometer", "mid_array_meter"),
+ ("mid_array_kilometer", "mid_array_kilometer"),
+ ),
+ op2_math + op2_cmp + numpy_op2_math + numpy_op2_cmp,
+]
diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/benchmarks/__init__.py
diff --git a/benchmarks/util.py b/benchmarks/util.py
new file mode 100644
index 0000000..4e72048
--- /dev/null
+++ b/benchmarks/util.py
@@ -0,0 +1,38 @@
+import io
+
+SMALL_VEC_LEN = 3
+MID_VEC_LEN = 1_000
+LARGE_VEC_LEN = 1_000_000
+
+TINY_DEF = """
+yocto- = 1e-24 = y-
+zepto- = 1e-21 = z-
+atto- = 1e-18 = a-
+femto- = 1e-15 = f-
+pico- = 1e-12 = p-
+nano- = 1e-9 = n-
+micro- = 1e-6 = µ- = u-
+milli- = 1e-3 = m-
+centi- = 1e-2 = c-
+deci- = 1e-1 = d-
+deca- = 1e+1 = da- = deka-
+hecto- = 1e2 = h-
+kilo- = 1e3 = k-
+mega- = 1e6 = M-
+giga- = 1e9 = G-
+tera- = 1e12 = T-
+peta- = 1e15 = P-
+exa- = 1e18 = E-
+zetta- = 1e21 = Z-
+yotta- = 1e24 = Y-
+
+meter = [length] = m = metre
+second = [time] = s = sec
+
+angstrom = 1e-10 * meter = Å = ångström = Å
+minute = 60 * second = min
+"""
+
+
+def get_tiny_def():
+ return io.StringIO(TINY_DEF)
diff --git a/docs/numpy.ipynb b/docs/numpy.ipynb
index fa987b2..5edf125 100644
--- a/docs/numpy.ipynb
+++ b/docs/numpy.ipynb
@@ -203,7 +203,7 @@
"- **Math operations**: `add`, `subtract`, `multiply`, `divide`, `logaddexp`, `logaddexp2`, `true_divide`, `floor_divide`, `negative`, `remainder`, `mod`, `fmod`, `absolute`, `rint`, `sign`, `conj`, `exp`, `exp2`, `log`, `log2`, `log10`, `expm1`, `log1p`, `sqrt`, `square`, `cbrt`, `reciprocal`\n",
"- **Trigonometric functions**: `sin`, `cos`, `tan`, `arcsin`, `arccos`, `arctan`, `arctan2`, `hypot`, `sinh`, `cosh`, `tanh`, `arcsinh`, `arccosh`, `arctanh`\n",
"- **Comparison functions**: `greater`, `greater_equal`, `less`, `less_equal`, `not_equal`, `equal`\n",
- "- **Floating functions**: `isreal`, `iscomplex`, `isfinite`, `isinf`, `isnan`, `signbit`, `copysign`, `nextafter`, `modf`, `ldexp`, `frexp`, `fmod`, `floor`, `ceil`, `trunc`\n",
+ "- **Floating functions**: `isreal`, `iscomplex`, `isfinite`, `isinf`, `isnan`, `signbit`, `sign`, `copysign`, `nextafter`, `modf`, `ldexp`, `frexp`, `fmod`, `floor`, `ceil`, `trunc`\n",
"\n",
"And the following NumPy functions:"
]
@@ -500,4 +500,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
-}
+} \ No newline at end of file
diff --git a/pint/numpy_func.py b/pint/numpy_func.py
index 0f220b0..bcf406a 100644
--- a/pint/numpy_func.py
+++ b/pint/numpy_func.py
@@ -339,7 +339,7 @@ Define ufunc behavior collections.
- `op_units_output_ufuncs`: determine output unit from input unit as determined by
operation (see `get_op_output_unit`)
"""
-strip_unit_input_output_ufuncs = ["isnan", "isinf", "isfinite", "signbit"]
+strip_unit_input_output_ufuncs = ["isnan", "isinf", "isfinite", "signbit", "sign"]
matching_input_bare_output_ufuncs = [
"equal",
"greater",
diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py
index e39b01a..0bb7cae 100644
--- a/pint/testsuite/test_issues.py
+++ b/pint/testsuite/test_issues.py
@@ -783,6 +783,12 @@ class TestIssues(QuantityTestCase):
self.assertIsInstance(foo1, foo2.__class__)
self.assertIsInstance(foo2, foo1.__class__)
+ @helpers.requires_numpy()
+ def test_issue1174(self):
+ q = [1.0, -2.0, 3.0, -4.0] * self.ureg.meter
+ self.assertTrue(np.sign(q[0].magnitude))
+ self.assertTrue(np.sign(q[1].magnitude))
+
if np is not None: