summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Thiel <byronimo@gmail.com>2010-06-03 19:04:18 +0200
committerSebastian Thiel <byronimo@gmail.com>2010-06-03 20:40:43 +0200
commit4b4a514e51fbc7dc6ddcb27c188159d57b5d1fa9 (patch)
tree1401628227fda3f1ab5c81c1ad9ae6213e6ccacb
parent26e138cb47dccc859ff219f108ce9b7d96cbcbcd (diff)
downloadgitpython-4b4a514e51fbc7dc6ddcb27c188159d57b5d1fa9.tar.gz
Added performance comparison to cgit ... and yes, git-python is faster :)
-rw-r--r--lib/git/odb/utils.py31
-rw-r--r--test/git/performance/test_streams.py67
2 files changed, 87 insertions, 11 deletions
diff --git a/lib/git/odb/utils.py b/lib/git/odb/utils.py
index 1e4a8e9d..94d1cea8 100644
--- a/lib/git/odb/utils.py
+++ b/lib/git/odb/utils.py
@@ -103,10 +103,12 @@ class DecompressMemMapReader(object):
times we actually allocate. An own zlib implementation would be good here
to better support streamed reading - it would only need to keep the mmap
and decompress it into chunks, thats all ... """
- __slots__ = ('_m', '_zip', '_buf', '_buflen', '_br', '_cws', '_cwe', '_s', '_cs', '_close')
+ __slots__ = ('_m', '_zip', '_buf', '_buflen', '_br', '_cws', '_cwe', '_s', '_close')
- def __init__(self, m, close_on_deletion, cs = 128*1024):
- """Initialize with mmap and chunk_size for stream reading"""
+ max_read_size = 512*1024
+
+ def __init__(self, m, close_on_deletion):
+ """Initialize with mmap for stream reading"""
self._m = m
self._zip = zlib.decompressobj()
self._buf = None # buffer of decompressed bytes
@@ -115,7 +117,6 @@ class DecompressMemMapReader(object):
self._br = 0 # num uncompressed bytes read
self._cws = 0 # start byte of compression window
self._cwe = 0 # end byte of compression window
- self._cs = cs # chunk size (when reading from zip)
self._close = close_on_deletion # close the memmap on deletion ?
def __del__(self):
@@ -163,6 +164,28 @@ class DecompressMemMapReader(object):
return str()
# END handle depletion
+ # protect from memory peaks
+ # If he tries to read large chunks, our memory patterns get really bad
+ # as we end up copying a possibly huge chunk from our memory map right into
+ # memory. This might not even be possible. Nonetheless, try to dampen the
+ # effect a bit by reading in chunks, returning a huge string in the end.
+ # Our performance now depends on StringIO. This way we don't need two large
+ # buffers in peak times, but only one large one in the end which is
+ # the return buffer
+ if size > self.max_read_size:
+ sio = StringIO()
+ while size:
+ read_size = min(self.max_read_size, size)
+ data = self.read(read_size)
+ sio.write(data)
+ size -= len(data)
+ if len(data) < read_size:
+ break
+ # END data loop
+ sio.seek(0)
+ return sio.getvalue()
+ # END handle maxread
+
# deplete the buffer, then just continue using the decompress object
# which has an own buffer. We just need this to transparently parse the
# header from the zlib stream
diff --git a/test/git/performance/test_streams.py b/test/git/performance/test_streams.py
index 15924c08..6c2834b3 100644
--- a/test/git/performance/test_streams.py
+++ b/test/git/performance/test_streams.py
@@ -10,6 +10,7 @@ import os
import sys
import stat
import random
+import subprocess
from lib import (
@@ -51,23 +52,24 @@ class TestObjDBPerformance(TestBigRepoReadOnly):
# writing - due to the compression it will seem faster than it is
st = time()
sha = ldb.to_object('blob', size, stream)
- elapsed = time() - st
+ elapsed_add = time() - st
assert ldb.has_object(sha)
- fsize_kib = os.path.getsize(ldb.readable_db_object_path(sha)) / 1000
+ db_file = ldb.readable_db_object_path(sha)
+ fsize_kib = os.path.getsize(db_file) / 1000
size_kib = size / 1000
- print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, elapsed, size_kib / elapsed)
+ print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add)
# reading all at once
st = time()
type, size, shastream = ldb.object(sha)
shadata = shastream.read()
- elapsed = time() - st
+ elapsed_readall = time() - st
stream.seek(0)
assert shadata == stream.getvalue()
- print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, elapsed, size_kib / elapsed)
+ print >> sys.stderr, "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, elapsed_readall, size_kib / elapsed_readall)
# reading in chunks of 1 MiB
@@ -81,11 +83,62 @@ class TestObjDBPerformance(TestBigRepoReadOnly):
if len(data) < cs:
break
# END read in chunks
- elapsed = time() - st
+ elapsed_readchunks = time() - st
stream.seek(0)
assert ''.join(chunks) == stream.getvalue()
cs_kib = cs / 1000
- print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, elapsed, size_kib / elapsed)
+ print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, elapsed_readchunks, size_kib / elapsed_readchunks)
+
+ # del db file so git has something to do
+ os.remove(db_file)
+
+ # VS. CGIT
+ ##########
+ # CGIT ! Can using the cgit programs be faster ?
+ proc = rwrepo.git.hash_object('-w', '--stdin', as_process=True, istream=subprocess.PIPE)
+
+ # write file - pump everything in at once to be a fast as possible
+ data = stream.getvalue() # cache it
+ st = time()
+ proc.stdin.write(data)
+ proc.stdin.close()
+ gitsha = proc.stdout.read().strip()
+ proc.wait()
+ gelapsed_add = time() - st
+ del(data)
+ assert gitsha == sha # we do it the same way, right ?
+
+ # as its the same sha, we reuse our path
+ fsize_kib = os.path.getsize(db_file) / 1000
+ print >> sys.stderr, "Added %i KiB (filesize = %i KiB) of %s data to using git-hash-object in %f s ( %f Write KiB / s)" % (size_kib, fsize_kib, desc, gelapsed_add, size_kib / gelapsed_add)
+
+ # compare ...
+ print >> sys.stderr, "Git-Python is %f %% faster than git when adding big %s files" % (100.0 - (elapsed_add / gelapsed_add) * 100, desc)
+
+
+ # read all
+ st = time()
+ s, t, size, data = rwrepo.git.get_object_data(gitsha)
+ gelapsed_readall = time() - st
+ print >> sys.stderr, "Read %i KiB of %s data at once using git-cat-file in %f s ( %f Read KiB / s)" % (size_kib, desc, gelapsed_readall, size_kib / gelapsed_readall)
+
+ # compare
+ print >> sys.stderr, "Git-Python is %f %% faster than git when reading big %sfiles" % (100.0 - (elapsed_readall / gelapsed_readall) * 100, desc)
+
+
+ # read chunks
+ st = time()
+ s, t, size, stream = rwrepo.git.stream_object_data(gitsha)
+ while True:
+ data = stream.read(cs)
+ if len(data) < cs:
+ break
+ # END read stream
+ gelapsed_readchunks = time() - st
+ print >> sys.stderr, "Read %i KiB of %s data in %i KiB chunks from git-cat-file in %f s ( %f Read KiB / s)" % (size_kib, desc, cs_kib, gelapsed_readchunks, size_kib / gelapsed_readchunks)
+
+ # compare
+ print >> sys.stderr, "Git-Python is %f %% faster than git when reading big %s files in chunks" % (100.0 - (elapsed_readchunks / gelapsed_readchunks) * 100, desc)
# END for each randomization factor