summaryrefslogtreecommitdiff
path: root/bfd/cache.c
diff options
context:
space:
mode:
authorJoel Brobecker <brobecker@gnat.com>2008-05-01 15:45:43 +0000
committerJoel Brobecker <brobecker@gnat.com>2008-05-01 15:45:43 +0000
commit52d0c6ccdecee7ef5cada3f2069dc686581fbd7d (patch)
tree03deff0ca91e65fcb128cad529caf10a940a9864 /bfd/cache.c
parentcfef9a208679118d2b29663cccb677234fe535c7 (diff)
downloadbinutils-redhat-52d0c6ccdecee7ef5cada3f2069dc686581fbd7d.tar.gz
* cache.c (cache_bread_1): Renames cache_bread.
(cache_bread): New function.
Diffstat (limited to 'bfd/cache.c')
-rw-r--r--bfd/cache.c39
1 files changed, 38 insertions, 1 deletions
diff --git a/bfd/cache.c b/bfd/cache.c
index eb6120dfbe..3906335041 100644
--- a/bfd/cache.c
+++ b/bfd/cache.c
@@ -250,7 +250,7 @@ cache_bseek (struct bfd *abfd, file_ptr offset, int whence)
first octet in the file, NOT the beginning of the archive header. */
static file_ptr
-cache_bread (struct bfd *abfd, void *buf, file_ptr nbytes)
+cache_bread_1 (struct bfd *abfd, void *buf, file_ptr nbytes)
{
FILE *f;
file_ptr nread;
@@ -301,6 +301,43 @@ cache_bread (struct bfd *abfd, void *buf, file_ptr nbytes)
}
static file_ptr
+cache_bread (struct bfd *abfd, void *buf, file_ptr nbytes)
+{
+ file_ptr nread = 0;
+
+ /* Some filesystems are unable to handle reads that are too large
+ (for instance, NetApp shares with oplocks turned off). To avoid
+ hitting this limitation, we read the buffer in chunks of 8MB max. */
+ while (nread < nbytes)
+ {
+ const file_ptr max_chunk_size = 0x800000;
+ file_ptr chunk_size = nbytes - nread;
+ file_ptr chunk_nread;
+
+ if (chunk_size > max_chunk_size)
+ chunk_size = max_chunk_size;
+
+ chunk_nread = cache_bread_1 (abfd, buf + nread, chunk_size);
+
+ /* Update the nread count.
+
+ We just have to be careful of the case when cache_bread_1 returns
+ a negative count: If this is our first read, then set nread to
+ that negative count in order to return that negative value to the
+ caller. Otherwise, don't add it to our total count, or we would
+ end up returning a smaller number of bytes read than we actually
+ did. */
+ if (nread == 0 || chunk_nread > 0)
+ nread += chunk_nread;
+
+ if (chunk_nread < chunk_size)
+ break;
+ }
+
+ return nread;
+}
+
+static file_ptr
cache_bwrite (struct bfd *abfd, const void *where, file_ptr nbytes)
{
file_ptr nwrite;