summaryrefslogtreecommitdiff
path: root/read-cache.c
diff options
context:
space:
mode:
authorJunio C Hamano <gitster@pobox.com>2014-06-25 11:49:48 -0700
committerJunio C Hamano <gitster@pobox.com>2014-06-25 11:49:48 -0700
commit1881d2b88c4b889dcb95782ad4bc5395808438e9 (patch)
treecab7b82ccfc98e89b355c3dc209b7ec6656e1b93 /read-cache.c
parent85785df6d6db50ff37d0ff1878c16ad69a063f6a (diff)
parent426ddeead6112955dfb50ccf9bb4af05d1ca9082 (diff)
downloadgit-1881d2b88c4b889dcb95782ad4bc5395808438e9.tar.gz
Merge branch 'ym/fix-opportunistic-index-update-race' into maint
"git status", even though it is a read-only operation, tries to update the index with refreshed lstat(2) info to optimize future accesses to the working tree opportunistically, but this could race with a "read-write" operation that modify the index while it is running. Detect such a race and avoid overwriting the index. * ym/fix-opportunistic-index-update-race: read-cache.c: verify index file before we opportunistically update it wrapper.c: add xpread() similar to xread()
Diffstat (limited to 'read-cache.c')
-rw-r--r--read-cache.c47
1 files changed, 46 insertions, 1 deletions
diff --git a/read-cache.c b/read-cache.c
index ba13353b37..7f5645e745 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -1477,6 +1477,7 @@ int read_index_from(struct index_state *istate, const char *path)
if (verify_hdr(hdr, mmap_size) < 0)
goto unmap;
+ hashcpy(istate->sha1, (unsigned char *)hdr + mmap_size - 20);
istate->version = ntohl(hdr->hdr_version);
istate->cache_nr = ntohl(hdr->hdr_entries);
istate->cache_alloc = alloc_nr(istate->cache_nr);
@@ -1760,6 +1761,50 @@ static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
return result;
}
+/*
+ * This function verifies if index_state has the correct sha1 of the
+ * index file. Don't die if we have any other failure, just return 0.
+ */
+static int verify_index_from(const struct index_state *istate, const char *path)
+{
+ int fd;
+ ssize_t n;
+ struct stat st;
+ unsigned char sha1[20];
+
+ if (!istate->initialized)
+ return 0;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return 0;
+
+ if (fstat(fd, &st))
+ goto out;
+
+ if (st.st_size < sizeof(struct cache_header) + 20)
+ goto out;
+
+ n = pread_in_full(fd, sha1, 20, st.st_size - 20);
+ if (n != 20)
+ goto out;
+
+ if (hashcmp(istate->sha1, sha1))
+ goto out;
+
+ close(fd);
+ return 1;
+
+out:
+ close(fd);
+ return 0;
+}
+
+static int verify_index(const struct index_state *istate)
+{
+ return verify_index_from(istate, get_index_file());
+}
+
static int has_racy_timestamp(struct index_state *istate)
{
int entries = istate->cache_nr;
@@ -1779,7 +1824,7 @@ static int has_racy_timestamp(struct index_state *istate)
void update_index_if_able(struct index_state *istate, struct lock_file *lockfile)
{
if ((istate->cache_changed || has_racy_timestamp(istate)) &&
- !write_index(istate, lockfile->fd))
+ verify_index(istate) && !write_index(istate, lockfile->fd))
commit_locked_index(lockfile);
else
rollback_lock_file(lockfile);