diff options
-rwxr-xr-x | t/t0021-conversion.sh | 14 | ||||
-rw-r--r-- | wrapper.c | 12 |
2 files changed, 26 insertions, 0 deletions
diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh index e50f0f742f..b92e6cb046 100755 --- a/t/t0021-conversion.sh +++ b/t/t0021-conversion.sh @@ -190,4 +190,18 @@ test_expect_success 'required filter clean failure' ' test_must_fail git add test.fc ' +test -n "$GIT_TEST_LONG" && test_set_prereq EXPENSIVE + +test_expect_success EXPENSIVE 'filter large file' ' + git config filter.largefile.smudge cat && + git config filter.largefile.clean cat && + for i in $(test_seq 1 2048); do printf "%1048576d" 1; done >2GB && + echo "2GB filter=largefile" >.gitattributes && + git add 2GB 2>err && + ! test -s err && + rm -f 2GB && + git checkout -- 2GB 2>err && + ! test -s err +' + test_done @@ -131,6 +131,14 @@ void *xcalloc(size_t nmemb, size_t size) } /* + * Limit size of IO chunks, because huge chunks only cause pain. OS X + * 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in + * the absense of bugs, large chunks can result in bad latencies when + * you decide to kill the process. + */ +#define MAX_IO_SIZE (8*1024*1024) + +/* * xread() is the same a read(), but it automatically restarts read() * operations with a recoverable error (EAGAIN and EINTR). xread() * DOES NOT GUARANTEE that "len" bytes is read even if the data is available. @@ -138,6 +146,8 @@ void *xcalloc(size_t nmemb, size_t size) ssize_t xread(int fd, void *buf, size_t len) { ssize_t nr; + if (len > MAX_IO_SIZE) + len = MAX_IO_SIZE; while (1) { nr = read(fd, buf, len); if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) @@ -154,6 +164,8 @@ ssize_t xread(int fd, void *buf, size_t len) ssize_t xwrite(int fd, const void *buf, size_t len) { ssize_t nr; + if (len > MAX_IO_SIZE) + len = MAX_IO_SIZE; while (1) { nr = write(fd, buf, len); if ((nr < 0) && (errno == EAGAIN || errno == EINTR)) |