summaryrefslogtreecommitdiff
path: root/numpy/lib/format.py
diff options
context:
space:
mode:
authorBartosz Telenczuk <muchatel@poczta.fm>2013-01-31 15:21:14 +0100
committerBartosz Telenczuk <muchatel@poczta.fm>2013-06-12 15:08:51 +0200
commit7c4e9e14c473060595271a856b307bbc04f1c7bb (patch)
tree53bf3a37778166b1c3f6b4b16f62a04469e0e85f /numpy/lib/format.py
parentb69c48d34d6b6d9be01f37bd5117e946e2556df8 (diff)
downloadnumpy-7c4e9e14c473060595271a856b307bbc04f1c7bb.tar.gz
adjust the optimal IO buffer size for npz files
Diffstat (limited to 'numpy/lib/format.py')
-rw-r--r--numpy/lib/format.py19
1 files changed, 11 insertions, 8 deletions
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index de84d2820..ff3b95d6e 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -148,6 +148,7 @@ else:
MAGIC_PREFIX = asbytes('\x93NUMPY')
MAGIC_LEN = len(MAGIC_PREFIX) + 2
+BUFFER_SIZE = 2 ** 18 #size of buffer for reading npz files in bytes
def magic(major, minor):
""" Return the magic string for the given file format version.
@@ -457,20 +458,22 @@ def read_array(fp):
else:
# This is not a real file. We have to read it the memory-intensive
# way.
- # crc32 module fails on reads greater than 2 ** 32 bytes, breaking large reads from gzip streams
- # Chunk reads to 256mb to avoid issue and reduce memory overhead of the read.
- # In non-chunked case count < max_read_count, so only one read is performed.
+ # crc32 module fails on reads greater than 2 ** 32 bytes, breaking
+ # large reads from gzip streams. Chunk reads to BUFFER_SIZE bytes to
+ # avoid issue and reduce memory overhead of the read. In
+ # non-chunked case count < max_read_count, so only one read is
+ # performed.
- max_buffer_size = 2 ** 28
- max_read_count = max_buffer_size / dtype.itemsize
+ max_read_count = BUFFER_SIZE // dtype.itemsize
array = numpy.empty(count, dtype=dtype)
- for i in xrange(0, count, max_read_count):
- read_count = max_read_count if i + max_read_count < count else count - i
+ for i in range(0, count, max_read_count):
+ read_count = min(max_read_count, count - i)
data = fp.read(int(read_count * dtype.itemsize))
- array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, count=read_count)
+ array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
+ count=read_count)
if fortran_order:
array.shape = shape[::-1]