summaryrefslogtreecommitdiff
path: root/libarchive/archive_read.c
diff options
context:
space:
mode:
authorTim Kientzle <kientzle@acm.org>2015-01-10 12:24:58 -0800
committerTim Kientzle <kientzle@acm.org>2015-01-10 12:24:58 -0800
commit6e06b1c89dd0d16f74894eac4cfc1327a06ee4a0 (patch)
tree96e98d0fba702bdca504274db804fd153ab1448a /libarchive/archive_read.c
parent48b288a03347e49f2f9501f040f626f916195de6 (diff)
downloadlibarchive-6e06b1c89dd0d16f74894eac4cfc1327a06ee4a0.tar.gz
Fix a potential crash issue discovered by Alexander Cherepanov:
It seems bsdtar automatically handles stacked compression. This is a nice feature but it could be problematic when it's completely unlimited. Most clearly it's illustrated with quines: $ curl -sRO http://www.maximumcompression.com/selfgz.gz $ (ulimit -v 10000000 && bsdtar -tvf selfgz.gz) bsdtar: Error opening archive: Can't allocate data for gzip decompression Without ulimit, bsdtar will eat all available memory. This could also be a problem for other applications using libarchive.
Diffstat (limited to 'libarchive/archive_read.c')
-rw-r--r--libarchive/archive_read.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/libarchive/archive_read.c b/libarchive/archive_read.c
index 02bf8d3a..8f71a8b9 100644
--- a/libarchive/archive_read.c
+++ b/libarchive/archive_read.c
@@ -548,13 +548,13 @@ archive_read_open1(struct archive *_a)
static int
choose_filters(struct archive_read *a)
{
- int number_bidders, i, bid, best_bid;
+ int number_bidders, i, bid, best_bid, n;
struct archive_read_filter_bidder *bidder, *best_bidder;
struct archive_read_filter *filter;
ssize_t avail;
int r;
- for (;;) {
+ for (n = 0; n < 25; ++n) {
number_bidders = sizeof(a->bidders) / sizeof(a->bidders[0]);
best_bid = 0;
@@ -600,6 +600,9 @@ choose_filters(struct archive_read *a)
return (ARCHIVE_FATAL);
}
}
+ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
+ "Input requires too many filters for decoding");
+ return (ARCHIVE_FATAL);
}
/*