summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicolas Pitre <nico@fluxnic.net>2010-02-17 14:05:54 -0500
committerJunio C Hamano <gitster@pobox.com>2010-02-17 11:08:43 -0800
commit89e0a3a131d251b5345845529d5258ab91105c9b (patch)
treefdd226790bd50130a49182556d487e03a4699669
parent427cb22c40484f9b8c2881bc9e99636591a22655 (diff)
downloadgit-89e0a3a131d251b5345845529d5258ab91105c9b.tar.gz
fast-import: make default pack size unlimited
Now that fast-import is creating packs with index version 2, there is no point limiting the pack size by default. A pack split will still happen if off_t is not sufficiently large to hold large offsets. While updating the doc, let's remove the "packfiles fit on CDs" suggestion. Pack files created by fast-import are still suboptimal and a 'git repack -a -f -d' or even 'git gc --aggressive' would be a pretty good idea before considering storage on CDs. Signed-off-by: Nicolas Pitre <nico@fluxnic.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
-rw-r--r--Documentation/git-fast-import.txt5
-rw-r--r--fast-import.c12
2 files changed, 7 insertions, 10 deletions
diff --git a/Documentation/git-fast-import.txt b/Documentation/git-fast-import.txt
index 6764ff1886..19082b04eb 100644
--- a/Documentation/git-fast-import.txt
+++ b/Documentation/git-fast-import.txt
@@ -45,10 +45,7 @@ OPTIONS
--max-pack-size=<n>::
Maximum size of each output packfile.
- The default is 4 GiB as that is the maximum allowed
- packfile size (due to file format limitations). Some
- importers may wish to lower this, such as to ensure the
- resulting packfiles fit on CDs.
+ The default is unlimited.
--big-file-threshold=<n>::
Maximum size of a blob that fast-import will attempt to
diff --git a/fast-import.c b/fast-import.c
index 9d7ab09620..d2f45b18d9 100644
--- a/fast-import.c
+++ b/fast-import.c
@@ -191,7 +191,7 @@ struct mark_set
struct last_object
{
struct strbuf data;
- uint32_t offset;
+ off_t offset;
unsigned int depth;
unsigned no_swap : 1;
};
@@ -279,7 +279,7 @@ struct recent_command
/* Configured limits on output */
static unsigned long max_depth = 10;
-static off_t max_packsize = (1LL << 32) - 1;
+static off_t max_packsize;
static uintmax_t big_file_threshold = 512 * 1024 * 1024;
static int force_update;
static int pack_compression_level = Z_DEFAULT_COMPRESSION;
@@ -315,7 +315,7 @@ static unsigned int pack_id;
static struct sha1file *pack_file;
static struct packed_git *pack_data;
static struct packed_git **all_packs;
-static unsigned long pack_size;
+static off_t pack_size;
/* Table of objects we've written. */
static unsigned int object_entry_alloc = 5000;
@@ -1068,7 +1068,7 @@ static int store_object(
deflateEnd(&s);
/* Determine if we should auto-checkpoint. */
- if ((pack_size + 60 + s.total_out) > max_packsize
+ if ((max_packsize && (pack_size + 60 + s.total_out) > max_packsize)
|| (pack_size + 60 + s.total_out) < pack_size) {
/* This new object needs to *not* have the current pack_id. */
@@ -1101,7 +1101,7 @@ static int store_object(
crc32_begin(pack_file);
if (delta) {
- unsigned long ofs = e->idx.offset - last->offset;
+ off_t ofs = e->idx.offset - last->offset;
unsigned pos = sizeof(hdr) - 1;
delta_count_by_type[type]++;
@@ -1170,7 +1170,7 @@ static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark)
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
- if ((pack_size + 60 + len) > max_packsize
+ if ((max_packsize && (pack_size + 60 + len) > max_packsize)
|| (pack_size + 60 + len) < pack_size)
cycle_packfile();