summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cache.h1
-rw-r--r--read-cache.c20
-rw-r--r--tree.c69
3 files changed, 85 insertions, 5 deletions
diff --git a/cache.h b/cache.h
index e97af18eea..e5276e6add 100644
--- a/cache.h
+++ b/cache.h
@@ -258,6 +258,7 @@ extern int index_name_pos(struct index_state *, const char *name, int namelen);
#define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */
#define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */
#define ADD_CACHE_SKIP_DFCHECK 4 /* Ok to skip DF conflict checks */
+#define ADD_CACHE_JUST_APPEND 8 /* Append only; tree.c::read_tree() */
extern int add_index_entry(struct index_state *, struct cache_entry *ce, int option);
extern struct cache_entry *refresh_cache_entry(struct cache_entry *ce, int really);
extern int remove_index_entry_at(struct index_state *, int pos);
diff --git a/read-cache.c b/read-cache.c
index e060392d1d..865369df0e 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -665,7 +665,7 @@ static int check_file_directory_conflict(struct index_state *istate,
return retval + has_dir_name(istate, ce, pos, ok_to_replace);
}
-int add_index_entry(struct index_state *istate, struct cache_entry *ce, int option)
+static int add_index_entry_with_check(struct index_state *istate, struct cache_entry *ce, int option)
{
int pos;
int ok_to_add = option & ADD_CACHE_OK_TO_ADD;
@@ -707,6 +707,22 @@ int add_index_entry(struct index_state *istate, struct cache_entry *ce, int opti
pos = index_name_pos(istate, ce->name, ntohs(ce->ce_flags));
pos = -pos-1;
}
+ return pos + 1;
+}
+
+int add_index_entry(struct index_state *istate, struct cache_entry *ce, int option)
+{
+ int pos;
+
+ if (option & ADD_CACHE_JUST_APPEND)
+ pos = istate->cache_nr;
+ else {
+ int ret;
+ ret = add_index_entry_with_check(istate, ce, option);
+ if (ret <= 0)
+ return ret;
+ pos = ret - 1;
+ }
/* Make sure the array is big enough .. */
if (istate->cache_nr == istate->cache_alloc) {
@@ -717,7 +733,7 @@ int add_index_entry(struct index_state *istate, struct cache_entry *ce, int opti
/* Add it in.. */
istate->cache_nr++;
- if (istate->cache_nr > pos)
+ if (istate->cache_nr > pos + 1)
memmove(istate->cache + pos + 1,
istate->cache + pos,
(istate->cache_nr - pos - 1) * sizeof(ce));
diff --git a/tree.c b/tree.c
index 04fe653a8e..8c0819fa72 100644
--- a/tree.c
+++ b/tree.c
@@ -1,4 +1,5 @@
#include "cache.h"
+#include "cache-tree.h"
#include "tree.h"
#include "blob.h"
#include "commit.h"
@@ -7,7 +8,7 @@
const char *tree_type = "tree";
-static int read_one_entry(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage)
+static int read_one_entry_opt(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, int opt)
{
int len;
unsigned int size;
@@ -25,7 +26,23 @@ static int read_one_entry(const unsigned char *sha1, const char *base, int basel
memcpy(ce->name, base, baselen);
memcpy(ce->name + baselen, pathname, len+1);
hashcpy(ce->sha1, sha1);
- return add_cache_entry(ce, ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
+ return add_cache_entry(ce, opt);
+}
+
+static int read_one_entry(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage)
+{
+ return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
+ ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
+}
+
+/*
+ * This is used when the caller knows there is no existing entries at
+ * the stage that will conflict with the entry being added.
+ */
+static int read_one_entry_quick(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage)
+{
+ return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
+ ADD_CACHE_JUST_APPEND);
}
static int match_tree_entry(const char *base, int baselen, const char *path, unsigned int mode, const char **paths)
@@ -119,9 +136,55 @@ int read_tree_recursive(struct tree *tree,
return 0;
}
+static int cmp_cache_name_compare(const void *a_, const void *b_)
+{
+ const struct cache_entry *ce1, *ce2;
+
+ ce1 = *((const struct cache_entry **)a_);
+ ce2 = *((const struct cache_entry **)b_);
+ return cache_name_compare(ce1->name, ntohs(ce1->ce_flags),
+ ce2->name, ntohs(ce2->ce_flags));
+}
+
int read_tree(struct tree *tree, int stage, const char **match)
{
- return read_tree_recursive(tree, "", 0, stage, match, read_one_entry);
+ read_tree_fn_t fn = NULL;
+ int i, err;
+
+ /*
+ * Currently the only existing callers of this function all
+ * call it with stage=1 and after making sure there is nothing
+ * at that stage; we could always use read_one_entry_quick().
+ *
+ * But when we decide to straighten out git-read-tree not to
+ * use unpack_trees() in some cases, this will probably start
+ * to matter.
+ */
+
+ /*
+ * See if we have cache entry at the stage. If so,
+ * do it the original slow way, otherwise, append and then
+ * sort at the end.
+ */
+ for (i = 0; !fn && i < active_nr; i++) {
+ struct cache_entry *ce = active_cache[i];
+ if (ce_stage(ce) == stage)
+ fn = read_one_entry;
+ }
+
+ if (!fn)
+ fn = read_one_entry_quick;
+ err = read_tree_recursive(tree, "", 0, stage, match, fn);
+ if (fn == read_one_entry || err)
+ return err;
+
+ /*
+ * Sort the cache entry -- we need to nuke the cache tree, though.
+ */
+ cache_tree_free(&active_cache_tree);
+ qsort(active_cache, active_nr, sizeof(active_cache[0]),
+ cmp_cache_name_compare);
+ return 0;
}
struct tree *lookup_tree(const unsigned char *sha1)